diff --git a/dev-support/findbugs-exclude.xml b/dev-support/findbugs-exclude.xml
index 4d17582..8f9bf74 100644
--- a/dev-support/findbugs-exclude.xml
+++ b/dev-support/findbugs-exclude.xml
@@ -257,7 +257,7 @@
-
+
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestFavoredNodes.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestFavoredNodes.java
new file mode 100644
index 0000000..63272b5
--- /dev/null
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestFavoredNodes.java
@@ -0,0 +1,420 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.Set;
+
+import com.google.common.net.HostAndPort;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.rsgroup.RSGroupAdmin;
+import org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint;
+import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
+import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
+import org.apache.hadoop.hbase.master.LoadBalancer;
+import org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta;
+import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan;
+import org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.LoadTestTool;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Sets;
+
+@Category(IntegrationTests.class)
+public class IntegrationTestFavoredNodes extends IntegrationTestBase {
+ protected static final Log LOG = LogFactory
+ .getLog(IntegrationTestFavoredNodes.class);
+ private Configuration conf;
+ private static final int SERVER_COUNT = 5; // number of slaves for the smallest cluster
+ private Random random = new Random();
+ private List regionsWithData;
+ private LoadTestTool loadTool;
+ private HBaseAdmin admin;
+ private RSGroupAdmin groupAdmin;
+
+ /** A soft limit on how long we should run */
+ private static final String NAMESPACE = "integration_test";
+ private static final String GROUP_NAME = "testgroup";
+ private static final String TABLE_NAME = NAMESPACE + TableName.NAMESPACE_DELIM + "balance_with_fav_nodes";
+ private static final String FAIL_ON_ERROR = "hbase.testfavnodes.failonerror";
+ private static final String MAX_ERRORS = "hbase.testfavnodes.max.errors";
+ private static final long DEFAULT_RUN_TIME = 5 * 60 * 1000;
+
+ @Override
+ public void setUpCluster() throws Exception {
+ conf = HBaseConfiguration.create();
+ conf.setClass(RSGroupBasedLoadBalancer.HBASE_GROUP_LOADBALANCER_CLASS,
+ FavoredStochasticBalancer.class, LoadBalancer.class);
+ conf.set("hbase.coprocessor.master.classes", RSGroupAdminEndpoint.class.getName());
+ regionsWithData = new ArrayList();
+ util = getTestingUtil(conf);
+ LOG.debug("Initializing/checking cluster has " + SERVER_COUNT + " servers");
+ util.initializeCluster(SERVER_COUNT);
+ LOG.debug("Done initializing/checking cluster");
+ admin = util.getHBaseAdmin();
+ groupAdmin = RSGroupAdmin.newClient(admin.getConnection());
+ deleteTableIfNecessary();
+ loadTool = new LoadTestTool();
+ loadTool.setConf(conf);
+ initTable();
+ }
+
+ @Override
+ public void cleanUpCluster() throws Exception {
+ deleteTableIfNecessary();
+ admin.deleteNamespace(NAMESPACE);
+ RSGroupInfo groupInfo = groupAdmin.getRSGroupInfo(GROUP_NAME);
+ groupAdmin.moveTables(groupInfo.getTables(), RSGroupInfo.DEFAULT_GROUP);
+ groupAdmin.moveServers(groupInfo.getServers(), RSGroupInfo.DEFAULT_GROUP);
+ groupAdmin.removeRSGroup(GROUP_NAME);
+ super.cleanUpCluster();
+ }
+
+ protected void initTable() throws IOException, InterruptedException {
+ addGroup(groupAdmin, GROUP_NAME, 4);
+ NamespaceDescriptor nspDesc = NamespaceDescriptor.create(NAMESPACE)
+ .addConfiguration(RSGroupInfo.NAMESPACEDESC_PROP_GROUP, GROUP_NAME).build();
+ admin.createNamespace(nspDesc);
+ int ret = loadTool.run(new String[] { "-tn", TABLE_NAME, "-init_only" });
+ Assert.assertEquals("Failed to initialize LoadTestTool", 0, ret);
+ }
+
+ @Override
+ public Configuration getConf() {
+ return conf;
+ }
+
+ private void deleteTableIfNecessary() throws IOException {
+ TableName tableName = TableName.valueOf(TABLE_NAME);
+ if (util.getHBaseAdmin().tableExists(tableName)) {
+ util.deleteTable(tableName);
+ }
+ }
+
+ @Override
+ public int runTestFromCommandLine() throws Exception {
+ generateDataAndVerifyFavNodes(DEFAULT_RUN_TIME);
+ return 1;
+ }
+
+ @Test
+ public void testFavNodes() throws Exception {
+ generateDataAndVerifyFavNodes(DEFAULT_RUN_TIME);
+ }
+
+ @Override
+ public TableName getTablename() {
+ return TableName.valueOf(TABLE_NAME);
+ }
+
+ @Override
+ protected Set getColumnFamilies() {
+ return Sets.newHashSet(Bytes.toString(LoadTestTool.DEFAULT_COLUMN_FAMILY));
+ }
+
+ public static void main(String[] args) throws Exception {
+ Configuration conf = HBaseConfiguration.create();
+ IntegrationTestingUtility.setUseDistributedCluster(conf);
+ int ret = ToolRunner.run(conf, new IntegrationTestFavoredNodes(), args);
+ System.exit(ret);
+ }
+
+ private void generateDataAndVerifyFavNodes(long runTime) throws Exception {
+ LOG.info("Running IntegrationTestFavoredStochasticBalancer");
+ generateData(runTime);
+ Map> regionLocalityMap = FSUtils
+ .getRegionDegreeLocalityMappingFromFS(getConf(), getTablename().getNameAsString(), 2);
+ assertLocality100Percent(regionLocalityMap);
+ buildRegionsWithData(regionLocalityMap.keySet());
+
+ //Verify META has all the favored nodes information.
+ SnapshotOfRegionAssignmentFromMeta metaSnapshot = new SnapshotOfRegionAssignmentFromMeta(
+ admin.getConnection());
+ metaSnapshot.initialize();
+ Map> favoredNodesInfo = metaSnapshot.getExistingAssignmentPlan()
+ .getAssignmentMap();
+ List regionsOfTable = util.getHBaseAdmin().getTableRegions(
+ getTablename());
+ assertTrue(favoredNodesInfo.size() >= regionsOfTable.size());
+ for (HRegionInfo rInfo : regionsOfTable) {
+ List favNodes = favoredNodesInfo.get(rInfo);
+ assertNotNull(favNodes);
+ assertTrue("3 favored nodes not found.", favNodes.size() == 3);
+ }
+
+ //Test locality with move to favored nodes server.
+ HRegionInfo regionToBeMoved = getRandomRegion();
+ assertNotNull(regionToBeMoved);
+ ServerName currentRS = util.getHBaseClusterInterface().getServerHoldingRegion(
+ getTablename(), regionToBeMoved.getRegionName());
+ verifyMoveToFavoredNodes(regionToBeMoved, favoredNodesInfo.get(regionToBeMoved), currentRS);
+
+ LOG.info("Killing region server:" + currentRS);
+ util.getHBaseClusterInterface().killRegionServer(currentRS);
+ LOG.info("Sleeping for 2 minutes");
+ Threads.sleep(120000);
+
+ Map localityMap = new HashMap();
+ float localitySum = 0;
+ for (ServerName sn : util.getHBaseAdmin().getClusterStatus().getServers()) {
+ float locality = getPercentFilesLocal(sn);
+ localitySum += locality;
+ localityMap.put(sn, locality);
+ }
+ float avgLocalityBeforeBalance = (float)localitySum/localityMap.size();
+
+ LOG.info("Balancing cluster");
+ util.getHBaseAdmin().balancer();
+ LOG.info("Sleeping for 3 minutes");
+ Threads.sleep(180000);
+ localitySum = 0;
+ Map localityMapAfterBalance = new HashMap();
+ for (ServerName sn : util.getHBaseAdmin().getClusterStatus().getServers()) {
+ float locality = getPercentFilesLocal(sn);
+ localitySum += locality;
+ localityMapAfterBalance.put(sn, locality);
+ }
+ float avgLocalityAfterBalance = (float) localitySum/localityMapAfterBalance.size();
+
+ Joiner.MapJoiner mapJoiner = Joiner.on(',').withKeyValueSeparator("=");
+ LOG.info("Servers before and after balance different: Before " + mapJoiner.join(localityMap)
+ + " After " + mapJoiner.join(localityMapAfterBalance));
+ assertTrue(localityMap.size() == localityMapAfterBalance.size());
+ LOG.info("Verifying locality improvement after balance");
+ for (ServerName sn : util.getHBaseAdmin().getClusterStatus().getServers()) {
+ assertTrue("Server not found in before balance locality map " + sn,
+ localityMap.containsKey(sn));
+ assertTrue("Server not found in after balance locality map " + sn,
+ localityMapAfterBalance.containsKey(sn));
+ LOG.info("Server = " + sn + " Before balance locality " + localityMap.get(sn)
+ + " After balance locality " + localityMapAfterBalance.get(sn));
+ }
+
+ assertTrue("Locality before " + avgLocalityBeforeBalance + " after balance locality "
+ + avgLocalityAfterBalance, avgLocalityAfterBalance >= avgLocalityBeforeBalance);
+
+ }
+
+ private void generateData(long runTime) throws IOException, InterruptedException {
+ LOG.info("Cluster size:" + util.getHBaseClusterInterface().getClusterStatus().getServersSize());
+ long start = System.currentTimeMillis();
+ boolean failOnError = util.getConfiguration().getBoolean(FAIL_ON_ERROR, true);
+ long startKey = 0;
+ long numKeys = getNumKeys(2500);
+ while (System.currentTimeMillis() - start < 0.9 * runTime) {
+ LOG.info("Intended run time: " + (runTime/60000) + " min, left:" +
+ ((runTime - (System.currentTimeMillis() - start))/60000) + " min");
+ int ret = -1;
+ List args = new ArrayList();
+ args.add("-tn");
+ args.add(getTablename().getNameAsString());
+ args.add("-write");
+ args.add(String.format("%d:%d:%d", 10, 1024, 10));
+ args.add("-start_key");
+ args.add(String.valueOf(startKey));
+ args.add("-num_keys");
+ args.add(String.valueOf(numKeys));
+ args.add("-skip_init");
+ args.add("-max_read_errors");
+ int maxErrors = Integer.parseInt(this.util.getConfiguration().get(MAX_ERRORS,
+ Integer.MAX_VALUE + ""));
+ args.add(String.valueOf(maxErrors));
+ ret = loadTool.run(args.toArray(new String[args.size()]));
+ if (0 != ret) {
+ String errorMsg = "Load failed with error code " + ret;
+ LOG.error(errorMsg);
+ if (failOnError) {
+ Assert.fail(errorMsg);
+ }
+ }
+ }
+ util.getHBaseAdmin().flush(getTablename());
+ }
+
+ private void moveRegionToDestination(final HRegionInfo regionToBeMoved, final ServerName destination)
+ throws Exception {
+ LOG.debug("Region to be moved : " + regionToBeMoved + " to destination " + destination);
+ util.getHBaseAdmin().move(regionToBeMoved.getEncodedNameAsBytes(),
+ Bytes.toBytes(destination.getServerName()));
+ util.waitFor(60000, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return (util.getHBaseAdmin().getOnlineRegions(destination).contains(regionToBeMoved));
+ }
+ });
+ /* assertTrue("Region not moved to expected server : " + destination, util.getHBaseAdmin()
+ .getOnlineRegions(destination).contains(regionToBeMoved));*/
+ }
+
+ private void assertLocality100Percent(Map> regionLocalityMap)
+ throws IOException {
+ LOG.debug("Found " + regionLocalityMap.size() + " entries in regionLocalityMap.");
+ for (Entry> entry : regionLocalityMap.entrySet()) {
+ Map localityIndex = entry.getValue();
+ assertTrue("Size of region servers holding region not 3 but " + localityIndex.size()
+ + ". Region encoded name : " + entry.getKey(), localityIndex.size() == 3);
+ for (String sName : localityIndex.keySet()) {
+ assertTrue("Locality not 100 % ", localityIndex.get(sName) == 1);
+ }
+ }
+ }
+
+ private ServerName getUnrelatedServer(List servers) throws IOException {
+ Set onlineServers = new HashSet();
+ onlineServers.addAll(util.getHBaseClusterInterface().getClusterStatus().getServers());
+ LOG.debug("Online servers are : ");
+ for (ServerName sn : onlineServers) {
+ LOG.debug(sn);
+ }
+ LOG.debug(" Favored region servers are : ");
+ for (ServerName sn : servers) {
+ LOG.debug(sn);
+ }
+ ArrayList finalList = new ArrayList();
+ finalList.addAll(onlineServers);
+ for (ServerName server : servers) {
+ for (ServerName curr : onlineServers) {
+ if (ServerName.isSameHostnameAndPort(curr, server)) {
+ finalList.remove(curr);
+ }
+ }
+ }
+ return finalList.get(random.nextInt(finalList.size()));
+ }
+
+ private void verifyMoveToFavoredNodes(HRegionInfo region, List favNodes,
+ ServerName currentRS) throws Exception {
+ FavoredNodesPlan.Position favoredNodePosition = FavoredNodesPlan.getFavoredServerPosition(
+ favNodes, currentRS);
+ assertTrue("Region not on favored node : " + region.getEncodedName(),
+ favoredNodePosition != null);
+ // Move to the other remaining positions and verify locality is 100 %
+ for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) {
+ ServerName sn = favNodes.get(p.ordinal());
+ if ((p.compareTo(favoredNodePosition) != 0)
+ && (!ServerName.isSameHostnameAndPort(sn,
+ RSGroupBasedLoadBalancer.BOGUS_SERVER_NAME))) {
+ ServerName favoredNode = getServerNameWithCorrectStartCode(sn);
+ assertNotNull("Favored node not among online region servers", favoredNode);
+ Float originalLocality = getPercentFilesLocal(favoredNode);
+ moveRegionToDestination(region, favoredNode);
+ Float locality = getPercentFilesLocal(favoredNode);
+ assertTrue("Locality on favored node did not improve, ServerName : " + favoredNode
+ + " Region : " + region, locality >= originalLocality);
+ }
+ }
+ }
+
+ private HRegionInfo getRandomRegion() {
+ if (regionsWithData.size() > 0) {
+ return regionsWithData.get(random.nextInt(regionsWithData.size()));
+ } else {
+ LOG.warn("No regions with data found in table.");
+ return null;
+ }
+ }
+
+ /**
+ * Builds the list of regions with storefiles. This is needed
+ * to check locality after region move.
+ *
+ * @param regions List of region encoded names.
+ * @throws IOException Signals that an I/O exception has occurred.
+ */
+ private void buildRegionsWithData(Set regions) throws IOException {
+ List regionsOfTable = util.getHBaseAdmin().getTableRegions(
+ TableName.valueOf(TABLE_NAME));
+ for (HRegionInfo info : regionsOfTable) {
+ if (regions.contains(info.getEncodedName())) {
+ regionsWithData.add(info);
+ }
+ }
+ }
+
+ private ServerName getServerNameWithCorrectStartCode(ServerName sn) throws IOException {
+ Collection servers = util.getHBaseAdmin().getClusterStatus().getServers();
+ for (ServerName sName : servers) {
+ if (ServerName.isSameHostnameAndPort(sName, sn)) {
+ return sName;
+ }
+ }
+ return null;
+ }
+
+ private float getPercentFilesLocal(ServerName sn) throws IOException {
+ List regionOfServer = util.getHBaseAdmin().getOnlineRegions(sn);
+ HDFSBlocksDistribution blockDist = new HDFSBlocksDistribution();
+ HTableDescriptor desc = util.getHBaseAdmin().getTableDescriptor(TableName.valueOf(TABLE_NAME));
+ for (HRegionInfo regionInfo : regionOfServer) {
+ blockDist.add(HRegion.computeHDFSBlocksDistribution(getConf(), desc, regionInfo));
+ }
+ return blockDist.getBlockLocalityIndex(sn.getHostname());
+ }
+
+ /** Estimates a data size based on the cluster size */
+ private long getNumKeys(int keysPerServer)
+ throws IOException {
+ int numRegionServers = util.getHBaseAdmin().getClusterStatus().getServersSize();
+ return keysPerServer * numRegionServers;
+ }
+
+ private RSGroupInfo addGroup(RSGroupAdmin gAdmin, String groupName, int serverCount)
+ throws IOException, InterruptedException {
+ RSGroupInfo defaultInfo = gAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP);
+ assertTrue(defaultInfo != null);
+ assertTrue(defaultInfo.getServers().size() >= serverCount);
+ gAdmin.addRSGroup(groupName);
+
+ Set set = new HashSet();
+ for (HostAndPort server : defaultInfo.getServers()) {
+ if (set.size() == serverCount) {
+ break;
+ }
+ set.add(server);
+ }
+ gAdmin.moveServers(set, groupName);
+ RSGroupInfo result = gAdmin.getRSGroupInfo(groupName);
+ assertTrue(result.getServers().size() >= serverCount);
+ return result;
+ }
+
+}
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
index 3d1f4bd..3edbce9 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
@@ -10754,6 +10754,3252 @@ public final class RSGroupAdminProtos {
// @@protoc_insertion_point(class_scope:hbase.pb.GetRSGroupInfoOfServerResponse)
}
+ public interface RedistributeGroupRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string group_name = 1;
+ /**
+ * required string group_name = 1;
+ */
+ boolean hasGroupName();
+ /**
+ * required string group_name = 1;
+ */
+ java.lang.String getGroupName();
+ /**
+ * required string group_name = 1;
+ */
+ com.google.protobuf.ByteString
+ getGroupNameBytes();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RedistributeGroupRequest}
+ */
+ public static final class RedistributeGroupRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements RedistributeGroupRequestOrBuilder {
+ // Use RedistributeGroupRequest.newBuilder() to construct.
+ private RedistributeGroupRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private RedistributeGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final RedistributeGroupRequest defaultInstance;
+ public static RedistributeGroupRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public RedistributeGroupRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private RedistributeGroupRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ groupName_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RedistributeGroupRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RedistributeGroupRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public RedistributeGroupRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new RedistributeGroupRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string group_name = 1;
+ public static final int GROUP_NAME_FIELD_NUMBER = 1;
+ private java.lang.Object groupName_;
+ /**
+ * required string group_name = 1;
+ */
+ public boolean hasGroupName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public java.lang.String getGroupName() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ groupName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getGroupNameBytes() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ groupName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ groupName_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasGroupName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getGroupNameBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getGroupNameBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest) obj;
+
+ boolean result = true;
+ result = result && (hasGroupName() == other.hasGroupName());
+ if (hasGroupName()) {
+ result = result && getGroupName()
+ .equals(other.getGroupName());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasGroupName()) {
+ hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getGroupName().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RedistributeGroupRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RedistributeGroupRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RedistributeGroupRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ groupName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RedistributeGroupRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.groupName_ = groupName_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest.getDefaultInstance()) return this;
+ if (other.hasGroupName()) {
+ bitField0_ |= 0x00000001;
+ groupName_ = other.groupName_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasGroupName()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string group_name = 1;
+ private java.lang.Object groupName_ = "";
+ /**
+ * required string group_name = 1;
+ */
+ public boolean hasGroupName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public java.lang.String getGroupName() {
+ java.lang.Object ref = groupName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ groupName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getGroupNameBytes() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ groupName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public Builder setGroupName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ groupName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public Builder clearGroupName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ groupName_ = getDefaultInstance().getGroupName();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public Builder setGroupNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ groupName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.RedistributeGroupRequest)
+ }
+
+ static {
+ defaultInstance = new RedistributeGroupRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.RedistributeGroupRequest)
+ }
+
+ public interface RedistributeGroupResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bool result = 1;
+ /**
+ * required bool result = 1;
+ */
+ boolean hasResult();
+ /**
+ * required bool result = 1;
+ */
+ boolean getResult();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RedistributeGroupResponse}
+ */
+ public static final class RedistributeGroupResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements RedistributeGroupResponseOrBuilder {
+ // Use RedistributeGroupResponse.newBuilder() to construct.
+ private RedistributeGroupResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private RedistributeGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final RedistributeGroupResponse defaultInstance;
+ public static RedistributeGroupResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public RedistributeGroupResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private RedistributeGroupResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ result_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RedistributeGroupResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RedistributeGroupResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public RedistributeGroupResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new RedistributeGroupResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required bool result = 1;
+ public static final int RESULT_FIELD_NUMBER = 1;
+ private boolean result_;
+ /**
+ * required bool result = 1;
+ */
+ public boolean hasResult() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required bool result = 1;
+ */
+ public boolean getResult() {
+ return result_;
+ }
+
+ private void initFields() {
+ result_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasResult()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBool(1, result_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(1, result_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse) obj;
+
+ boolean result = true;
+ result = result && (hasResult() == other.hasResult());
+ if (hasResult()) {
+ result = result && (getResult()
+ == other.getResult());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasResult()) {
+ hash = (37 * hash) + RESULT_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getResult());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RedistributeGroupResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RedistributeGroupResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RedistributeGroupResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ result_ = false;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RedistributeGroupResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.result_ = result_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.getDefaultInstance()) return this;
+ if (other.hasResult()) {
+ setResult(other.getResult());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasResult()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required bool result = 1;
+ private boolean result_ ;
+ /**
+ * required bool result = 1;
+ */
+ public boolean hasResult() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required bool result = 1;
+ */
+ public boolean getResult() {
+ return result_;
+ }
+ /**
+ * required bool result = 1;
+ */
+ public Builder setResult(boolean value) {
+ bitField0_ |= 0x00000001;
+ result_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool result = 1;
+ */
+ public Builder clearResult() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ result_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.RedistributeGroupResponse)
+ }
+
+ static {
+ defaultInstance = new RedistributeGroupResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.RedistributeGroupResponse)
+ }
+
+ public interface CompleteRedistributeGroupRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string group_name = 1;
+ /**
+ * required string group_name = 1;
+ */
+ boolean hasGroupName();
+ /**
+ * required string group_name = 1;
+ */
+ java.lang.String getGroupName();
+ /**
+ * required string group_name = 1;
+ */
+ com.google.protobuf.ByteString
+ getGroupNameBytes();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CompleteRedistributeGroupRequest}
+ */
+ public static final class CompleteRedistributeGroupRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements CompleteRedistributeGroupRequestOrBuilder {
+ // Use CompleteRedistributeGroupRequest.newBuilder() to construct.
+ private CompleteRedistributeGroupRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CompleteRedistributeGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CompleteRedistributeGroupRequest defaultInstance;
+ public static CompleteRedistributeGroupRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CompleteRedistributeGroupRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CompleteRedistributeGroupRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ groupName_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CompleteRedistributeGroupRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CompleteRedistributeGroupRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CompleteRedistributeGroupRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CompleteRedistributeGroupRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string group_name = 1;
+ public static final int GROUP_NAME_FIELD_NUMBER = 1;
+ private java.lang.Object groupName_;
+ /**
+ * required string group_name = 1;
+ */
+ public boolean hasGroupName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public java.lang.String getGroupName() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ groupName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getGroupNameBytes() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ groupName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ groupName_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasGroupName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getGroupNameBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getGroupNameBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest) obj;
+
+ boolean result = true;
+ result = result && (hasGroupName() == other.hasGroupName());
+ if (hasGroupName()) {
+ result = result && getGroupName()
+ .equals(other.getGroupName());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasGroupName()) {
+ hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getGroupName().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CompleteRedistributeGroupRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CompleteRedistributeGroupRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CompleteRedistributeGroupRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ groupName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CompleteRedistributeGroupRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.groupName_ = groupName_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest.getDefaultInstance()) return this;
+ if (other.hasGroupName()) {
+ bitField0_ |= 0x00000001;
+ groupName_ = other.groupName_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasGroupName()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string group_name = 1;
+ private java.lang.Object groupName_ = "";
+ /**
+ * required string group_name = 1;
+ */
+ public boolean hasGroupName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public java.lang.String getGroupName() {
+ java.lang.Object ref = groupName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ groupName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getGroupNameBytes() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ groupName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public Builder setGroupName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ groupName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public Builder clearGroupName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ groupName_ = getDefaultInstance().getGroupName();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public Builder setGroupNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ groupName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.CompleteRedistributeGroupRequest)
+ }
+
+ static {
+ defaultInstance = new CompleteRedistributeGroupRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.CompleteRedistributeGroupRequest)
+ }
+
+ public interface CompleteRedistributeGroupResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bool result = 1;
+ /**
+ * required bool result = 1;
+ */
+ boolean hasResult();
+ /**
+ * required bool result = 1;
+ */
+ boolean getResult();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CompleteRedistributeGroupResponse}
+ */
+ public static final class CompleteRedistributeGroupResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements CompleteRedistributeGroupResponseOrBuilder {
+ // Use CompleteRedistributeGroupResponse.newBuilder() to construct.
+ private CompleteRedistributeGroupResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CompleteRedistributeGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CompleteRedistributeGroupResponse defaultInstance;
+ public static CompleteRedistributeGroupResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CompleteRedistributeGroupResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CompleteRedistributeGroupResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ result_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CompleteRedistributeGroupResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CompleteRedistributeGroupResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CompleteRedistributeGroupResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CompleteRedistributeGroupResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required bool result = 1;
+ public static final int RESULT_FIELD_NUMBER = 1;
+ private boolean result_;
+ /**
+ * required bool result = 1;
+ */
+ public boolean hasResult() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required bool result = 1;
+ */
+ public boolean getResult() {
+ return result_;
+ }
+
+ private void initFields() {
+ result_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasResult()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBool(1, result_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(1, result_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse) obj;
+
+ boolean result = true;
+ result = result && (hasResult() == other.hasResult());
+ if (hasResult()) {
+ result = result && (getResult()
+ == other.getResult());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasResult()) {
+ hash = (37 * hash) + RESULT_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getResult());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CompleteRedistributeGroupResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CompleteRedistributeGroupResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CompleteRedistributeGroupResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ result_ = false;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CompleteRedistributeGroupResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.result_ = result_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.getDefaultInstance()) return this;
+ if (other.hasResult()) {
+ setResult(other.getResult());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasResult()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required bool result = 1;
+ private boolean result_ ;
+ /**
+ * required bool result = 1;
+ */
+ public boolean hasResult() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required bool result = 1;
+ */
+ public boolean getResult() {
+ return result_;
+ }
+ /**
+ * required bool result = 1;
+ */
+ public Builder setResult(boolean value) {
+ bitField0_ |= 0x00000001;
+ result_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool result = 1;
+ */
+ public Builder clearResult() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ result_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.CompleteRedistributeGroupResponse)
+ }
+
+ static {
+ defaultInstance = new CompleteRedistributeGroupResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.CompleteRedistributeGroupResponse)
+ }
+
+ public interface CheckGroupFavoredNodesRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string group_name = 1;
+ /**
+ * required string group_name = 1;
+ */
+ boolean hasGroupName();
+ /**
+ * required string group_name = 1;
+ */
+ java.lang.String getGroupName();
+ /**
+ * required string group_name = 1;
+ */
+ com.google.protobuf.ByteString
+ getGroupNameBytes();
+
+ // optional bool detailed = 2;
+ /**
+ * optional bool detailed = 2;
+ */
+ boolean hasDetailed();
+ /**
+ * optional bool detailed = 2;
+ */
+ boolean getDetailed();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CheckGroupFavoredNodesRequest}
+ */
+ public static final class CheckGroupFavoredNodesRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements CheckGroupFavoredNodesRequestOrBuilder {
+ // Use CheckGroupFavoredNodesRequest.newBuilder() to construct.
+ private CheckGroupFavoredNodesRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CheckGroupFavoredNodesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CheckGroupFavoredNodesRequest defaultInstance;
+ public static CheckGroupFavoredNodesRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CheckGroupFavoredNodesRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CheckGroupFavoredNodesRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ groupName_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ detailed_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CheckGroupFavoredNodesRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CheckGroupFavoredNodesRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CheckGroupFavoredNodesRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CheckGroupFavoredNodesRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string group_name = 1;
+ public static final int GROUP_NAME_FIELD_NUMBER = 1;
+ private java.lang.Object groupName_;
+ /**
+ * required string group_name = 1;
+ */
+ public boolean hasGroupName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public java.lang.String getGroupName() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ groupName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getGroupNameBytes() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ groupName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional bool detailed = 2;
+ public static final int DETAILED_FIELD_NUMBER = 2;
+ private boolean detailed_;
+ /**
+ * optional bool detailed = 2;
+ */
+ public boolean hasDetailed() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional bool detailed = 2;
+ */
+ public boolean getDetailed() {
+ return detailed_;
+ }
+
+ private void initFields() {
+ groupName_ = "";
+ detailed_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasGroupName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getGroupNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBool(2, detailed_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getGroupNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(2, detailed_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest) obj;
+
+ boolean result = true;
+ result = result && (hasGroupName() == other.hasGroupName());
+ if (hasGroupName()) {
+ result = result && getGroupName()
+ .equals(other.getGroupName());
+ }
+ result = result && (hasDetailed() == other.hasDetailed());
+ if (hasDetailed()) {
+ result = result && (getDetailed()
+ == other.getDetailed());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasGroupName()) {
+ hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getGroupName().hashCode();
+ }
+ if (hasDetailed()) {
+ hash = (37 * hash) + DETAILED_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getDetailed());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CheckGroupFavoredNodesRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CheckGroupFavoredNodesRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CheckGroupFavoredNodesRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ groupName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ detailed_ = false;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CheckGroupFavoredNodesRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.groupName_ = groupName_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.detailed_ = detailed_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest.getDefaultInstance()) return this;
+ if (other.hasGroupName()) {
+ bitField0_ |= 0x00000001;
+ groupName_ = other.groupName_;
+ onChanged();
+ }
+ if (other.hasDetailed()) {
+ setDetailed(other.getDetailed());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasGroupName()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string group_name = 1;
+ private java.lang.Object groupName_ = "";
+ /**
+ * required string group_name = 1;
+ */
+ public boolean hasGroupName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public java.lang.String getGroupName() {
+ java.lang.Object ref = groupName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ groupName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getGroupNameBytes() {
+ java.lang.Object ref = groupName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ groupName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public Builder setGroupName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ groupName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public Builder clearGroupName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ groupName_ = getDefaultInstance().getGroupName();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string group_name = 1;
+ */
+ public Builder setGroupNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ groupName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional bool detailed = 2;
+ private boolean detailed_ ;
+ /**
+ * optional bool detailed = 2;
+ */
+ public boolean hasDetailed() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional bool detailed = 2;
+ */
+ public boolean getDetailed() {
+ return detailed_;
+ }
+ /**
+ * optional bool detailed = 2;
+ */
+ public Builder setDetailed(boolean value) {
+ bitField0_ |= 0x00000002;
+ detailed_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional bool detailed = 2;
+ */
+ public Builder clearDetailed() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ detailed_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.CheckGroupFavoredNodesRequest)
+ }
+
+ static {
+ defaultInstance = new CheckGroupFavoredNodesRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.CheckGroupFavoredNodesRequest)
+ }
+
+ public interface CheckGroupFavoredNodesResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated .hbase.pb.ServerName servers = 1;
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ java.util.List
+ getServersList();
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index);
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ int getServersCount();
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersOrBuilderList();
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CheckGroupFavoredNodesResponse}
+ */
+ public static final class CheckGroupFavoredNodesResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements CheckGroupFavoredNodesResponseOrBuilder {
+ // Use CheckGroupFavoredNodesResponse.newBuilder() to construct.
+ private CheckGroupFavoredNodesResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CheckGroupFavoredNodesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CheckGroupFavoredNodesResponse defaultInstance;
+ public static CheckGroupFavoredNodesResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CheckGroupFavoredNodesResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CheckGroupFavoredNodesResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ servers_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ servers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ servers_ = java.util.Collections.unmodifiableList(servers_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CheckGroupFavoredNodesResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CheckGroupFavoredNodesResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CheckGroupFavoredNodesResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CheckGroupFavoredNodesResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ // repeated .hbase.pb.ServerName servers = 1;
+ public static final int SERVERS_FIELD_NUMBER = 1;
+ private java.util.List servers_;
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public java.util.List getServersList() {
+ return servers_;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersOrBuilderList() {
+ return servers_;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public int getServersCount() {
+ return servers_.size();
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) {
+ return servers_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder(
+ int index) {
+ return servers_.get(index);
+ }
+
+ private void initFields() {
+ servers_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ for (int i = 0; i < getServersCount(); i++) {
+ if (!getServers(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < servers_.size(); i++) {
+ output.writeMessage(1, servers_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < servers_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, servers_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse) obj;
+
+ boolean result = true;
+ result = result && getServersList()
+ .equals(other.getServersList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (getServersCount() > 0) {
+ hash = (37 * hash) + SERVERS_FIELD_NUMBER;
+ hash = (53 * hash) + getServersList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.CheckGroupFavoredNodesResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CheckGroupFavoredNodesResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CheckGroupFavoredNodesResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getServersFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (serversBuilder_ == null) {
+ servers_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ serversBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_CheckGroupFavoredNodesResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse(this);
+ int from_bitField0_ = bitField0_;
+ if (serversBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ servers_ = java.util.Collections.unmodifiableList(servers_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.servers_ = servers_;
+ } else {
+ result.servers_ = serversBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.getDefaultInstance()) return this;
+ if (serversBuilder_ == null) {
+ if (!other.servers_.isEmpty()) {
+ if (servers_.isEmpty()) {
+ servers_ = other.servers_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureServersIsMutable();
+ servers_.addAll(other.servers_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.servers_.isEmpty()) {
+ if (serversBuilder_.isEmpty()) {
+ serversBuilder_.dispose();
+ serversBuilder_ = null;
+ servers_ = other.servers_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ serversBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getServersFieldBuilder() : null;
+ } else {
+ serversBuilder_.addAllMessages(other.servers_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ for (int i = 0; i < getServersCount(); i++) {
+ if (!getServers(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // repeated .hbase.pb.ServerName servers = 1;
+ private java.util.List servers_ =
+ java.util.Collections.emptyList();
+ private void ensureServersIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ servers_ = new java.util.ArrayList(servers_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serversBuilder_;
+
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public java.util.List getServersList() {
+ if (serversBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(servers_);
+ } else {
+ return serversBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public int getServersCount() {
+ if (serversBuilder_ == null) {
+ return servers_.size();
+ } else {
+ return serversBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) {
+ if (serversBuilder_ == null) {
+ return servers_.get(index);
+ } else {
+ return serversBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder setServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serversBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServersIsMutable();
+ servers_.set(index, value);
+ onChanged();
+ } else {
+ serversBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder setServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ serversBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder addServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serversBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServersIsMutable();
+ servers_.add(value);
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder addServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serversBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServersIsMutable();
+ servers_.add(index, value);
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder addServers(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.add(builderForValue.build());
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder addServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder addAllServers(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> values) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ super.addAll(values, servers_);
+ onChanged();
+ } else {
+ serversBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder clearServers() {
+ if (serversBuilder_ == null) {
+ servers_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ serversBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder removeServers(int index) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.remove(index);
+ onChanged();
+ } else {
+ serversBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServersBuilder(
+ int index) {
+ return getServersFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder(
+ int index) {
+ if (serversBuilder_ == null) {
+ return servers_.get(index); } else {
+ return serversBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersOrBuilderList() {
+ if (serversBuilder_ != null) {
+ return serversBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(servers_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder() {
+ return getServersFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder(
+ int index) {
+ return getServersFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public java.util.List
+ getServersBuilderList() {
+ return getServersFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersFieldBuilder() {
+ if (serversBuilder_ == null) {
+ serversBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+ servers_,
+ ((bitField0_ & 0x00000001) == 0x00000001),
+ getParentForChildren(),
+ isClean());
+ servers_ = null;
+ }
+ return serversBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.CheckGroupFavoredNodesResponse)
+ }
+
+ static {
+ defaultInstance = new CheckGroupFavoredNodesResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.CheckGroupFavoredNodesResponse)
+ }
+
/**
* Protobuf service {@code hbase.pb.RSGroupAdminService}
*/
@@ -10834,6 +14080,30 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc RedistributeGroup(.hbase.pb.RedistributeGroupRequest) returns (.hbase.pb.RedistributeGroupResponse);
+ */
+ public abstract void redistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc CompleteRedistributeGroup(.hbase.pb.CompleteRedistributeGroupRequest) returns (.hbase.pb.CompleteRedistributeGroupResponse);
+ */
+ public abstract void completeRedistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc checkGroupFavoredNodes(.hbase.pb.CheckGroupFavoredNodesRequest) returns (.hbase.pb.CheckGroupFavoredNodesResponse);
+ */
+ public abstract void checkGroupFavoredNodes(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest request,
+ com.google.protobuf.RpcCallback done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -10911,6 +14181,30 @@ public final class RSGroupAdminProtos {
impl.listRSGroupInfos(controller, request, done);
}
+ @java.lang.Override
+ public void redistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.redistributeGroup(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void completeRedistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.completeRedistributeGroup(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void checkGroupFavoredNodes(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.checkGroupFavoredNodes(controller, request, done);
+ }
+
};
}
@@ -10951,6 +14245,12 @@ public final class RSGroupAdminProtos {
return impl.balanceRSGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest)request);
case 8:
return impl.listRSGroupInfos(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest)request);
+ case 9:
+ return impl.redistributeGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest)request);
+ case 10:
+ return impl.completeRedistributeGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest)request);
+ case 11:
+ return impl.checkGroupFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -10983,6 +14283,12 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.getDefaultInstance();
case 8:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.getDefaultInstance();
+ case 9:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest.getDefaultInstance();
+ case 10:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest.getDefaultInstance();
+ case 11:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -11015,6 +14321,12 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.getDefaultInstance();
case 8:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance();
+ case 9:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.getDefaultInstance();
+ case 10:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.getDefaultInstance();
+ case 11:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -11095,6 +14407,30 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc RedistributeGroup(.hbase.pb.RedistributeGroupRequest) returns (.hbase.pb.RedistributeGroupResponse);
+ */
+ public abstract void redistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc CompleteRedistributeGroup(.hbase.pb.CompleteRedistributeGroupRequest) returns (.hbase.pb.CompleteRedistributeGroupResponse);
+ */
+ public abstract void completeRedistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc checkGroupFavoredNodes(.hbase.pb.CheckGroupFavoredNodesRequest) returns (.hbase.pb.CheckGroupFavoredNodesResponse);
+ */
+ public abstract void checkGroupFavoredNodes(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest request,
+ com.google.protobuf.RpcCallback done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -11162,6 +14498,21 @@ public final class RSGroupAdminProtos {
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
+ case 9:
+ this.redistributeGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 10:
+ this.completeRedistributeGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 11:
+ this.checkGroupFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -11194,6 +14545,12 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.getDefaultInstance();
case 8:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.getDefaultInstance();
+ case 9:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest.getDefaultInstance();
+ case 10:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest.getDefaultInstance();
+ case 11:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -11226,6 +14583,12 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.getDefaultInstance();
case 8:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance();
+ case 9:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.getDefaultInstance();
+ case 10:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.getDefaultInstance();
+ case 11:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -11381,6 +14744,51 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.class,
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance()));
}
+
+ public void redistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(9),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.getDefaultInstance()));
+ }
+
+ public void completeRedistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(10),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.getDefaultInstance()));
+ }
+
+ public void checkGroupFavoredNodes(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(11),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -11433,6 +14841,21 @@ public final class RSGroupAdminProtos {
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse redistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest request)
+ throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse completeRedistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest request)
+ throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse checkGroupFavoredNodes(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -11549,6 +14972,42 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance());
}
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse redistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(9),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupResponse.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse completeRedistributeGroup(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(10),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupResponse.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse checkGroupFavoredNodes(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(11),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse.getDefaultInstance());
+ }
+
}
// @@protoc_insertion_point(class_scope:hbase.pb.RSGroupAdminService)
@@ -11654,6 +15113,36 @@ public final class RSGroupAdminProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_RedistributeGroupRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_RedistributeGroupRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_RedistributeGroupResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_RedistributeGroupResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_CompleteRedistributeGroupRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_CompleteRedistributeGroupRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_CompleteRedistributeGroupResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_CompleteRedistributeGroupResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_CheckGroupFavoredNodesRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_CheckGroupFavoredNodesRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_CheckGroupFavoredNodesResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_CheckGroupFavoredNodesResponse_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -11692,28 +15181,45 @@ public final class RSGroupAdminProtos {
"t\022$\n\006server\030\002 \002(\0132\024.hbase.pb.ServerName\"" +
"O\n\036GetRSGroupInfoOfServerResponse\022-\n\016r_s" +
"_group_info\030\001 \001(\0132\025.hbase.pb.RSGroupInfo" +
- "2\241\006\n\023RSGroupAdminService\022S\n\016GetRSGroupIn",
- "fo\022\037.hbase.pb.GetRSGroupInfoRequest\032 .hb" +
- "ase.pb.GetRSGroupInfoResponse\022h\n\025GetRSGr" +
- "oupInfoOfTable\022&.hbase.pb.GetRSGroupInfo" +
- "OfTableRequest\032\'.hbase.pb.GetRSGroupInfo" +
- "OfTableResponse\022k\n\026GetRSGroupInfoOfServe" +
- "r\022\'.hbase.pb.GetRSGroupInfoOfServerReque" +
- "st\032(.hbase.pb.GetRSGroupInfoOfServerResp" +
- "onse\022J\n\013MoveServers\022\034.hbase.pb.MoveServe" +
- "rsRequest\032\035.hbase.pb.MoveServersResponse" +
- "\022G\n\nMoveTables\022\033.hbase.pb.MoveTablesRequ",
- "est\032\034.hbase.pb.MoveTablesResponse\022G\n\nAdd" +
- "RSGroup\022\033.hbase.pb.AddRSGroupRequest\032\034.h" +
- "base.pb.AddRSGroupResponse\022P\n\rRemoveRSGr" +
- "oup\022\036.hbase.pb.RemoveRSGroupRequest\032\037.hb" +
- "ase.pb.RemoveRSGroupResponse\022S\n\016BalanceR" +
- "SGroup\022\037.hbase.pb.BalanceRSGroupRequest\032" +
- " .hbase.pb.BalanceRSGroupResponse\022Y\n\020Lis" +
- "tRSGroupInfos\022!.hbase.pb.ListRSGroupInfo" +
- "sRequest\032\".hbase.pb.ListRSGroupInfosResp" +
- "onseBH\n*org.apache.hadoop.hbase.protobuf",
- ".generatedB\022RSGroupAdminProtosH\001\210\001\001\240\001\001"
+ "\".\n\030RedistributeGroupRequest\022\022\n\ngroup_na",
+ "me\030\001 \002(\t\"+\n\031RedistributeGroupResponse\022\016\n" +
+ "\006result\030\001 \002(\010\"6\n CompleteRedistributeGro" +
+ "upRequest\022\022\n\ngroup_name\030\001 \002(\t\"3\n!Complet" +
+ "eRedistributeGroupResponse\022\016\n\006result\030\001 \002" +
+ "(\010\"E\n\035CheckGroupFavoredNodesRequest\022\022\n\ng" +
+ "roup_name\030\001 \002(\t\022\020\n\010detailed\030\002 \001(\010\"G\n\036Che" +
+ "ckGroupFavoredNodesResponse\022%\n\007servers\030\001" +
+ " \003(\0132\024.hbase.pb.ServerName2\342\010\n\023RSGroupAd" +
+ "minService\022S\n\016GetRSGroupInfo\022\037.hbase.pb." +
+ "GetRSGroupInfoRequest\032 .hbase.pb.GetRSGr",
+ "oupInfoResponse\022h\n\025GetRSGroupInfoOfTable" +
+ "\022&.hbase.pb.GetRSGroupInfoOfTableRequest" +
+ "\032\'.hbase.pb.GetRSGroupInfoOfTableRespons" +
+ "e\022k\n\026GetRSGroupInfoOfServer\022\'.hbase.pb.G" +
+ "etRSGroupInfoOfServerRequest\032(.hbase.pb." +
+ "GetRSGroupInfoOfServerResponse\022J\n\013MoveSe" +
+ "rvers\022\034.hbase.pb.MoveServersRequest\032\035.hb" +
+ "ase.pb.MoveServersResponse\022G\n\nMoveTables" +
+ "\022\033.hbase.pb.MoveTablesRequest\032\034.hbase.pb" +
+ ".MoveTablesResponse\022G\n\nAddRSGroup\022\033.hbas",
+ "e.pb.AddRSGroupRequest\032\034.hbase.pb.AddRSG" +
+ "roupResponse\022P\n\rRemoveRSGroup\022\036.hbase.pb" +
+ ".RemoveRSGroupRequest\032\037.hbase.pb.RemoveR" +
+ "SGroupResponse\022S\n\016BalanceRSGroup\022\037.hbase" +
+ ".pb.BalanceRSGroupRequest\032 .hbase.pb.Bal" +
+ "anceRSGroupResponse\022Y\n\020ListRSGroupInfos\022" +
+ "!.hbase.pb.ListRSGroupInfosRequest\032\".hba" +
+ "se.pb.ListRSGroupInfosResponse\022\\\n\021Redist" +
+ "ributeGroup\022\".hbase.pb.RedistributeGroup" +
+ "Request\032#.hbase.pb.RedistributeGroupResp",
+ "onse\022t\n\031CompleteRedistributeGroup\022*.hbas" +
+ "e.pb.CompleteRedistributeGroupRequest\032+." +
+ "hbase.pb.CompleteRedistributeGroupRespon" +
+ "se\022k\n\026checkGroupFavoredNodes\022\'.hbase.pb." +
+ "CheckGroupFavoredNodesRequest\032(.hbase.pb" +
+ ".CheckGroupFavoredNodesResponseBH\n*org.a" +
+ "pache.hadoop.hbase.protobuf.generatedB\022R" +
+ "SGroupAdminProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -11840,6 +15346,42 @@ public final class RSGroupAdminProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_descriptor,
new java.lang.String[] { "RSGroupInfo", });
+ internal_static_hbase_pb_RedistributeGroupRequest_descriptor =
+ getDescriptor().getMessageTypes().get(20);
+ internal_static_hbase_pb_RedistributeGroupRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_RedistributeGroupRequest_descriptor,
+ new java.lang.String[] { "GroupName", });
+ internal_static_hbase_pb_RedistributeGroupResponse_descriptor =
+ getDescriptor().getMessageTypes().get(21);
+ internal_static_hbase_pb_RedistributeGroupResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_RedistributeGroupResponse_descriptor,
+ new java.lang.String[] { "Result", });
+ internal_static_hbase_pb_CompleteRedistributeGroupRequest_descriptor =
+ getDescriptor().getMessageTypes().get(22);
+ internal_static_hbase_pb_CompleteRedistributeGroupRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_CompleteRedistributeGroupRequest_descriptor,
+ new java.lang.String[] { "GroupName", });
+ internal_static_hbase_pb_CompleteRedistributeGroupResponse_descriptor =
+ getDescriptor().getMessageTypes().get(23);
+ internal_static_hbase_pb_CompleteRedistributeGroupResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_CompleteRedistributeGroupResponse_descriptor,
+ new java.lang.String[] { "Result", });
+ internal_static_hbase_pb_CheckGroupFavoredNodesRequest_descriptor =
+ getDescriptor().getMessageTypes().get(24);
+ internal_static_hbase_pb_CheckGroupFavoredNodesRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_CheckGroupFavoredNodesRequest_descriptor,
+ new java.lang.String[] { "GroupName", "Detailed", });
+ internal_static_hbase_pb_CheckGroupFavoredNodesResponse_descriptor =
+ getDescriptor().getMessageTypes().get(25);
+ internal_static_hbase_pb_CheckGroupFavoredNodesResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_CheckGroupFavoredNodesResponse_descriptor,
+ new java.lang.String[] { "Servers", });
return null;
}
};
diff --git a/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto b/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto
index fda9b09..b584810 100644
--- a/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto
+++ b/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto
@@ -106,6 +106,32 @@ message GetRSGroupInfoOfServerResponse {
optional RSGroupInfo r_s_group_info = 1;
}
+message RedistributeGroupRequest {
+ required string group_name = 1;
+}
+
+message RedistributeGroupResponse {
+ required bool result = 1;
+}
+
+message CompleteRedistributeGroupRequest {
+ required string group_name = 1;
+}
+
+message CompleteRedistributeGroupResponse {
+ required bool result = 1;
+}
+
+message CheckGroupFavoredNodesRequest {
+ required string group_name = 1;
+ optional bool detailed = 2;
+}
+
+message CheckGroupFavoredNodesResponse {
+ repeated ServerName servers = 1;
+}
+
+
service RSGroupAdminService {
rpc GetRSGroupInfo(GetRSGroupInfoRequest)
returns (GetRSGroupInfoResponse);
@@ -133,4 +159,13 @@ service RSGroupAdminService {
rpc ListRSGroupInfos(ListRSGroupInfosRequest)
returns (ListRSGroupInfosResponse);
+
+ rpc RedistributeGroup(RedistributeGroupRequest)
+ returns (RedistributeGroupResponse);
+
+ rpc CompleteRedistributeGroup(CompleteRedistributeGroupRequest)
+ returns (CompleteRedistributeGroupResponse);
+
+ rpc checkGroupFavoredNodes(CheckGroupFavoredNodesRequest)
+ returns (CheckGroupFavoredNodesResponse);
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/AssignmentVerificationReport.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/AssignmentVerificationReport.java
new file mode 100644
index 0000000..858394c
--- /dev/null
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/AssignmentVerificationReport.java
@@ -0,0 +1,641 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rsgroup;
+
+import java.text.DecimalFormat;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta;
+import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
+import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan;
+/**
+ * Helper class that is used by {@link RegionPlacementMaintainer} to print
+ * information for favored nodes
+ *
+ */
+@InterfaceAudience.Private
+public class AssignmentVerificationReport {
+ private static final Log LOG = LogFactory.getLog(
+ AssignmentVerificationReport.class.getName());
+
+ private TableName tableName = null;
+ private boolean enforceLocality = false;
+ private boolean isFilledUp = false;
+
+ private int totalRegions = 0;
+ private int totalRegionServers = 0;
+ // for unassigned regions
+ private List unAssignedRegionsList =
+ new ArrayList();
+
+ // For regions without valid favored nodes
+ private List regionsWithoutValidFavoredNodes =
+ new ArrayList();
+
+ // For regions not running on the favored nodes
+ private List nonFavoredAssignedRegionList =
+ new ArrayList();
+
+ // For regions running on the favored nodes
+ private int totalFavoredAssignments = 0;
+ private int[] favoredNodes = new int[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM];
+ private float[] favoredNodesLocalitySummary =
+ new float[FavoredNodeAssignmentHelper.FAVORED_NODES_NUM];
+ private float actualLocalitySummary = 0;
+
+ // For region balancing information
+ private float avgRegionsOnRS = 0;
+ private int maxRegionsOnRS = 0;
+ private int minRegionsOnRS = Integer.MAX_VALUE;
+ private Set mostLoadedRSSet =
+ new HashSet();
+ private Set leastLoadedRSSet =
+ new HashSet();
+
+ private float avgDispersionScore = 0;
+ private float maxDispersionScore = 0;
+ private Set maxDispersionScoreServerSet =
+ new HashSet();
+ private float minDispersionScore = Float.MAX_VALUE;
+ private Set minDispersionScoreServerSet =
+ new HashSet();
+
+ private float avgDispersionNum = 0;
+ private float maxDispersionNum = 0;
+ private Set maxDispersionNumServerSet =
+ new HashSet();
+ private float minDispersionNum = Float.MAX_VALUE;
+ private Set minDispersionNumServerSet =
+ new HashSet();
+
+ public void fillUp(TableName tableName, SnapshotOfRegionAssignmentFromMeta snapshot,
+ Map> regionLocalityMap) {
+ // Set the table name
+ this.tableName = tableName;
+
+ // Get all the regions for this table
+ List regionInfoList =
+ snapshot.getTableToRegionMap().get(tableName);
+ // Get the total region num for the current table
+ this.totalRegions = regionInfoList.size();
+
+ // Get the existing assignment plan
+ FavoredNodesPlan favoredNodesAssignment = snapshot.getExistingAssignmentPlan();
+ // Get the region to region server mapping
+ Map currentAssignment =
+ snapshot.getRegionToRegionServerMap();
+ // Initialize the server to its hosing region counter map
+ Map serverToHostingRegionCounterMap =
+ new HashMap();
+
+ Map primaryRSToRegionCounterMap =
+ new HashMap();
+ Map> primaryToSecTerRSMap =
+ new HashMap>();
+
+ // Check the favored nodes and its locality information
+ // Also keep tracker of the most loaded and least loaded region servers
+ for (HRegionInfo region : regionInfoList) {
+ try {
+ ServerName currentRS = currentAssignment.get(region);
+ // Handle unassigned regions
+ if (currentRS == null) {
+ unAssignedRegionsList.add(region);
+ continue;
+ }
+
+ // Keep updating the server to is hosting region counter map
+ Integer hostRegionCounter = serverToHostingRegionCounterMap.get(currentRS);
+ if (hostRegionCounter == null) {
+ hostRegionCounter = Integer.valueOf(0);
+ }
+ hostRegionCounter = hostRegionCounter.intValue() + 1;
+ serverToHostingRegionCounterMap.put(currentRS, hostRegionCounter);
+
+ // Get the favored nodes from the assignment plan and verify it.
+ List favoredNodes = favoredNodesAssignment.getFavoredNodes(region);
+ if (favoredNodes == null ||
+ favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) {
+ regionsWithoutValidFavoredNodes.add(region);
+ continue;
+ }
+ // Get the primary, secondary and tertiary region server
+ ServerName primaryRS =
+ favoredNodes.get(FavoredNodesPlan.Position.PRIMARY.ordinal());
+ ServerName secondaryRS =
+ favoredNodes.get(FavoredNodesPlan.Position.SECONDARY.ordinal());
+ ServerName tertiaryRS =
+ favoredNodes.get(FavoredNodesPlan.Position.TERTIARY.ordinal());
+
+ // Update the primary rs to its region set map
+ Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS);
+ if (regionCounter == null) {
+ regionCounter = Integer.valueOf(0);
+ }
+ regionCounter = regionCounter.intValue() + 1;
+ primaryRSToRegionCounterMap.put(primaryRS, regionCounter);
+
+ // Update the primary rs to secondary and tertiary rs map
+ Set secAndTerSet = primaryToSecTerRSMap.get(primaryRS);
+ if (secAndTerSet == null) {
+ secAndTerSet = new HashSet();
+ }
+ secAndTerSet.add(secondaryRS);
+ secAndTerSet.add(tertiaryRS);
+ primaryToSecTerRSMap.put(primaryRS, secAndTerSet);
+
+ // Get the position of the current region server in the favored nodes list
+ FavoredNodesPlan.Position favoredNodePosition =
+ FavoredNodesPlan.getFavoredServerPosition(favoredNodes, currentRS);
+
+ // Handle the non favored assignment.
+ if (favoredNodePosition == null) {
+ nonFavoredAssignedRegionList.add(region);
+ continue;
+ }
+ // Increase the favored nodes assignment.
+ this.favoredNodes[favoredNodePosition.ordinal()]++;
+ totalFavoredAssignments++;
+
+ // Summary the locality information for each favored nodes
+ if (regionLocalityMap != null) {
+ // Set the enforce locality as true;
+ this.enforceLocality = true;
+
+ // Get the region degree locality map
+ Map regionDegreeLocalityMap =
+ regionLocalityMap.get(region.getEncodedName());
+ if (regionDegreeLocalityMap == null) {
+ continue; // ignore the region which doesn't have any store files.
+ }
+
+ // Get the locality summary for each favored nodes
+ for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) {
+ ServerName favoredNode = favoredNodes.get(p.ordinal());
+ // Get the locality for the current favored nodes
+ Float locality =
+ regionDegreeLocalityMap.get(favoredNode.getHostname());
+ if (locality != null) {
+ this.favoredNodesLocalitySummary[p.ordinal()] += locality;
+ }
+ }
+
+ // Get the locality summary for the current region server
+ Float actualLocality =
+ regionDegreeLocalityMap.get(currentRS.getHostname());
+ if (actualLocality != null) {
+ this.actualLocalitySummary += actualLocality;
+ }
+ }
+ } catch (Exception e) {
+ LOG.error("Cannot verify the region assignment for region " +
+ ((region == null) ? " null " : region.getRegionNameAsString()) +
+ "because of " + e);
+ }
+ }
+
+ float dispersionScoreSummary = 0;
+ float dispersionNumSummary = 0;
+ // Calculate the secondary score for each primary region server
+ for (Map.Entry entry :
+ primaryRSToRegionCounterMap.entrySet()) {
+ ServerName primaryRS = entry.getKey();
+ Integer regionsOnPrimary = entry.getValue();
+
+ // Process the dispersion number and score
+ float dispersionScore = 0;
+ int dispersionNum = 0;
+ if (primaryToSecTerRSMap.get(primaryRS) != null
+ && regionsOnPrimary.intValue() != 0) {
+ dispersionNum = primaryToSecTerRSMap.get(primaryRS).size();
+ dispersionScore = dispersionNum /
+ ((float) regionsOnPrimary.intValue() * 2);
+ }
+ // Update the max dispersion score
+ if (dispersionScore > this.maxDispersionScore) {
+ this.maxDispersionScoreServerSet.clear();
+ this.maxDispersionScoreServerSet.add(primaryRS);
+ this.maxDispersionScore = dispersionScore;
+ } else if (dispersionScore == this.maxDispersionScore) {
+ this.maxDispersionScoreServerSet.add(primaryRS);
+ }
+
+ // Update the max dispersion num
+ if (dispersionNum > this.maxDispersionNum) {
+ this.maxDispersionNumServerSet.clear();
+ this.maxDispersionNumServerSet.add(primaryRS);
+ this.maxDispersionNum = dispersionNum;
+ } else if (dispersionNum == this.maxDispersionNum) {
+ this.maxDispersionNumServerSet.add(primaryRS);
+ }
+
+ // Update the min dispersion score
+ if (dispersionScore < this.minDispersionScore) {
+ this.minDispersionScoreServerSet.clear();
+ this.minDispersionScoreServerSet.add(primaryRS);
+ this.minDispersionScore = dispersionScore;
+ } else if (dispersionScore == this.minDispersionScore) {
+ this.minDispersionScoreServerSet.add(primaryRS);
+ }
+
+ // Update the min dispersion num
+ if (dispersionNum < this.minDispersionNum) {
+ this.minDispersionNumServerSet.clear();
+ this.minDispersionNumServerSet.add(primaryRS);
+ this.minDispersionNum = dispersionNum;
+ } else if (dispersionNum == this.minDispersionNum) {
+ this.minDispersionNumServerSet.add(primaryRS);
+ }
+
+ dispersionScoreSummary += dispersionScore;
+ dispersionNumSummary += dispersionNum;
+ }
+
+ // Update the avg dispersion score
+ if (primaryRSToRegionCounterMap.keySet().size() != 0) {
+ this.avgDispersionScore = dispersionScoreSummary /
+ (float) primaryRSToRegionCounterMap.keySet().size();
+ this.avgDispersionNum = dispersionNumSummary /
+ (float) primaryRSToRegionCounterMap.keySet().size();
+ }
+
+ // Fill up the most loaded and least loaded region server information
+ for (Map.Entry entry :
+ serverToHostingRegionCounterMap.entrySet()) {
+ ServerName currentRS = entry.getKey();
+ int hostRegionCounter = entry.getValue().intValue();
+
+ // Update the most loaded region server list and maxRegionsOnRS
+ if (hostRegionCounter > this.maxRegionsOnRS) {
+ maxRegionsOnRS = hostRegionCounter;
+ this.mostLoadedRSSet.clear();
+ this.mostLoadedRSSet.add(currentRS);
+ } else if (hostRegionCounter == this.maxRegionsOnRS) {
+ this.mostLoadedRSSet.add(currentRS);
+ }
+
+ // Update the least loaded region server list and minRegionsOnRS
+ if (hostRegionCounter < this.minRegionsOnRS) {
+ this.minRegionsOnRS = hostRegionCounter;
+ this.leastLoadedRSSet.clear();
+ this.leastLoadedRSSet.add(currentRS);
+ } else if (hostRegionCounter == this.minRegionsOnRS) {
+ this.leastLoadedRSSet.add(currentRS);
+ }
+ }
+
+ // and total region servers
+ this.totalRegionServers = serverToHostingRegionCounterMap.keySet().size();
+ this.avgRegionsOnRS = (totalRegionServers == 0) ? 0 :
+ (totalRegions / (float) totalRegionServers);
+ // Set the isFilledUp as true
+ isFilledUp = true;
+ }
+
+ /**
+ * Use this to project the dispersion scores
+ * @param tableName
+ * @param snapshot
+ * @param newPlan
+ */
+ public void fillUpDispersion(TableName tableName,
+ SnapshotOfRegionAssignmentFromMeta snapshot, FavoredNodesPlan newPlan) {
+ // Set the table name
+ this.tableName = tableName;
+ // Get all the regions for this table
+ List regionInfoList = snapshot.getTableToRegionMap().get(
+ tableName);
+ // Get the total region num for the current table
+ this.totalRegions = regionInfoList.size();
+ FavoredNodesPlan plan = null;
+ if (newPlan == null) {
+ plan = snapshot.getExistingAssignmentPlan();
+ } else {
+ plan = newPlan;
+ }
+ // Get the region to region server mapping
+ Map primaryRSToRegionCounterMap =
+ new HashMap();
+ Map> primaryToSecTerRSMap =
+ new HashMap>();
+
+ // Check the favored nodes and its locality information
+ // Also keep tracker of the most loaded and least loaded region servers
+ for (HRegionInfo region : regionInfoList) {
+ try {
+ // Get the favored nodes from the assignment plan and verify it.
+ List favoredNodes = plan.getFavoredNodes(region);
+ if (favoredNodes == null
+ || favoredNodes.size() != FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) {
+ regionsWithoutValidFavoredNodes.add(region);
+ continue;
+ }
+ // Get the primary, secondary and tertiary region server
+ ServerName primaryRS = favoredNodes
+ .get(FavoredNodesPlan.Position.PRIMARY.ordinal());
+ ServerName secondaryRS = favoredNodes
+ .get(FavoredNodesPlan.Position.SECONDARY.ordinal());
+ ServerName tertiaryRS = favoredNodes
+ .get(FavoredNodesPlan.Position.TERTIARY.ordinal());
+
+ // Update the primary rs to its region set map
+ Integer regionCounter = primaryRSToRegionCounterMap.get(primaryRS);
+ if (regionCounter == null) {
+ regionCounter = Integer.valueOf(0);
+ }
+ regionCounter = regionCounter.intValue() + 1;
+ primaryRSToRegionCounterMap.put(primaryRS, regionCounter);
+
+ // Update the primary rs to secondary and tertiary rs map
+ Set secAndTerSet = primaryToSecTerRSMap.get(primaryRS);
+ if (secAndTerSet == null) {
+ secAndTerSet = new HashSet();
+ }
+ secAndTerSet.add(secondaryRS);
+ secAndTerSet.add(tertiaryRS);
+ primaryToSecTerRSMap.put(primaryRS, secAndTerSet);
+ } catch (Exception e) {
+ LOG.error("Cannot verify the region assignment for region "
+ + ((region == null) ? " null " : region.getRegionNameAsString())
+ + "because of " + e);
+ }
+ }
+ float dispersionScoreSummary = 0;
+ float dispersionNumSummary = 0;
+ // Calculate the secondary score for each primary region server
+ for (Map.Entry entry :
+ primaryRSToRegionCounterMap.entrySet()) {
+ ServerName primaryRS = entry.getKey();
+ Integer regionsOnPrimary = entry.getValue();
+
+ // Process the dispersion number and score
+ float dispersionScore = 0;
+ int dispersionNum = 0;
+ if (primaryToSecTerRSMap.get(primaryRS) != null
+ && regionsOnPrimary.intValue() != 0) {
+ dispersionNum = primaryToSecTerRSMap.get(primaryRS).size();
+ dispersionScore = dispersionNum /
+ ((float) regionsOnPrimary.intValue() * 2);
+ }
+
+ // Update the max dispersion num
+ if (dispersionNum > this.maxDispersionNum) {
+ this.maxDispersionNumServerSet.clear();
+ this.maxDispersionNumServerSet.add(primaryRS);
+ this.maxDispersionNum = dispersionNum;
+ } else if (dispersionNum == this.maxDispersionNum) {
+ this.maxDispersionNumServerSet.add(primaryRS);
+ }
+
+ // Update the min dispersion score
+ if (dispersionScore < this.minDispersionScore) {
+ this.minDispersionScoreServerSet.clear();
+ this.minDispersionScoreServerSet.add(primaryRS);
+ this.minDispersionScore = dispersionScore;
+ } else if (dispersionScore == this.minDispersionScore) {
+ this.minDispersionScoreServerSet.add(primaryRS);
+ }
+
+ // Update the min dispersion num
+ if (dispersionNum < this.minDispersionNum) {
+ this.minDispersionNumServerSet.clear();
+ this.minDispersionNumServerSet.add(primaryRS);
+ this.minDispersionNum = dispersionNum;
+ } else if (dispersionNum == this.minDispersionNum) {
+ this.minDispersionNumServerSet.add(primaryRS);
+ }
+
+ dispersionScoreSummary += dispersionScore;
+ dispersionNumSummary += dispersionNum;
+ }
+
+ // Update the avg dispersion score
+ if (primaryRSToRegionCounterMap.keySet().size() != 0) {
+ this.avgDispersionScore = dispersionScoreSummary /
+ (float) primaryRSToRegionCounterMap.keySet().size();
+ this.avgDispersionNum = dispersionNumSummary /
+ (float) primaryRSToRegionCounterMap.keySet().size();
+ }
+ }
+
+ /**
+ * @return list which contains just 3 elements: average dispersion score, max
+ * dispersion score and min dispersion score as first, second and third element
+ * respectively.
+ *
+ */
+ public List getDispersionInformation() {
+ List dispersion = new ArrayList();
+ dispersion.add(avgDispersionScore);
+ dispersion.add(maxDispersionScore);
+ dispersion.add(minDispersionScore);
+ return dispersion;
+ }
+
+ public void print(boolean isDetailMode) {
+ if (!isFilledUp) {
+ System.err.println("[Error] Region assignment verfication report" +
+ "hasn't been filled up");
+ }
+ DecimalFormat df = new java.text.DecimalFormat( "#.##");
+
+ // Print some basic information
+ System.out.println("Region Assignment Verification for Table: " + tableName +
+ "\n\tTotal regions : " + totalRegions);
+
+ // Print the number of regions on each kinds of the favored nodes
+ System.out.println("\tTotal regions on favored nodes " +
+ totalFavoredAssignments);
+ for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) {
+ System.out.println("\t\tTotal regions on "+ p.toString() +
+ " region servers: " + favoredNodes[p.ordinal()]);
+ }
+ // Print the number of regions in each kinds of invalid assignment
+ System.out.println("\tTotal unassigned regions: " +
+ unAssignedRegionsList.size());
+ if (isDetailMode) {
+ for (HRegionInfo region : unAssignedRegionsList) {
+ System.out.println("\t\t" + region.getRegionNameAsString());
+ }
+ }
+
+ System.out.println("\tTotal regions NOT on favored nodes: " +
+ nonFavoredAssignedRegionList.size());
+ if (isDetailMode) {
+ for (HRegionInfo region : nonFavoredAssignedRegionList) {
+ System.out.println("\t\t" + region.getRegionNameAsString());
+ }
+ }
+
+ System.out.println("\tTotal regions without favored nodes: " +
+ regionsWithoutValidFavoredNodes.size());
+ if (isDetailMode) {
+ for (HRegionInfo region : regionsWithoutValidFavoredNodes) {
+ System.out.println("\t\t" + region.getRegionNameAsString());
+ }
+ }
+
+ // Print the locality information if enabled
+ if (this.enforceLocality && totalRegions != 0) {
+ // Print the actual locality for this table
+ float actualLocality = 100 *
+ this.actualLocalitySummary / (float) totalRegions;
+ System.out.println("\n\tThe actual avg locality is " +
+ df.format(actualLocality) + " %");
+
+ // Print the expected locality if regions are placed on the each kinds of
+ // favored nodes
+ for (FavoredNodesPlan.Position p : FavoredNodesPlan.Position.values()) {
+ float avgLocality = 100 *
+ (favoredNodesLocalitySummary[p.ordinal()] / (float) totalRegions);
+ System.out.println("\t\tThe expected avg locality if all regions" +
+ " on the " + p.toString() + " region servers: "
+ + df.format(avgLocality) + " %");
+ }
+ }
+
+ // Print the region balancing information
+ System.out.println("\n\tTotal hosting region servers: " +
+ totalRegionServers);
+ // Print the region balance information
+ if (totalRegionServers != 0) {
+ System.out.println(
+ "\tAvg dispersion num: " +df.format(avgDispersionNum) +
+ " hosts;\tMax dispersion num: " + df.format(maxDispersionNum) +
+ " hosts;\tMin dispersion num: " + df.format(minDispersionNum) +
+ " hosts;");
+
+ System.out.println("\t\tThe number of the region servers with the max" +
+ " dispersion num: " + this.maxDispersionNumServerSet.size());
+ if (isDetailMode) {
+ printHServerAddressSet(maxDispersionNumServerSet);
+ }
+
+ System.out.println("\t\tThe number of the region servers with the min" +
+ " dispersion num: " + this.minDispersionNumServerSet.size());
+ if (isDetailMode) {
+ printHServerAddressSet(maxDispersionNumServerSet);
+ }
+
+ System.out.println(
+ "\tAvg dispersion score: " + df.format(avgDispersionScore) +
+ ";\tMax dispersion score: " + df.format(maxDispersionScore) +
+ ";\tMin dispersion score: " + df.format(minDispersionScore) + ";");
+
+ System.out.println("\t\tThe number of the region servers with the max" +
+ " dispersion score: " + this.maxDispersionScoreServerSet.size());
+ if (isDetailMode) {
+ printHServerAddressSet(maxDispersionScoreServerSet);
+ }
+
+ System.out.println("\t\tThe number of the region servers with the min" +
+ " dispersion score: " + this.minDispersionScoreServerSet.size());
+ if (isDetailMode) {
+ printHServerAddressSet(minDispersionScoreServerSet);
+ }
+
+ System.out.println(
+ "\tAvg regions/region server: " + df.format(avgRegionsOnRS) +
+ ";\tMax regions/region server: " + maxRegionsOnRS +
+ ";\tMin regions/region server: " + minRegionsOnRS + ";");
+
+ // Print the details about the most loaded region servers
+ System.out.println("\t\tThe number of the most loaded region servers: "
+ + mostLoadedRSSet.size());
+ if (isDetailMode) {
+ printHServerAddressSet(mostLoadedRSSet);
+ }
+
+ // Print the details about the least loaded region servers
+ System.out.println("\t\tThe number of the least loaded region servers: "
+ + leastLoadedRSSet.size());
+ if (isDetailMode) {
+ printHServerAddressSet(leastLoadedRSSet);
+ }
+ }
+ System.out.println("==============================");
+ }
+
+ /**
+ * Return the unassigned regions
+ * @return unassigned regions
+ */
+ List getUnassignedRegions() {
+ return unAssignedRegionsList;
+ }
+
+ /**
+ * Return the regions without favored nodes
+ * @return regions without favored nodes
+ */
+ List getRegionsWithoutValidFavoredNodes() {
+ return regionsWithoutValidFavoredNodes;
+ }
+
+ /**
+ * Return the regions not assigned to its favored nodes
+ * @return regions not assigned to its favored nodes
+ */
+ List getNonFavoredAssignedRegions() {
+ return nonFavoredAssignedRegionList;
+ }
+
+ /**
+ * Return the number of regions assigned to their favored nodes
+ * @return number of regions assigned to their favored nodes
+ */
+ int getTotalFavoredAssignments() {
+ return totalFavoredAssignments;
+ }
+
+ /**
+ * Return the number of regions based on the position (primary/secondary/
+ * tertiary) assigned to their favored nodes
+ * @param position
+ * @return the number of regions
+ */
+ int getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position position) {
+ return favoredNodes[position.ordinal()];
+ }
+
+ private void printHServerAddressSet(Set serverSet) {
+ if (serverSet == null) {
+ return ;
+ }
+ int i = 0;
+ for (ServerName addr : serverSet){
+ if ((i++) % 3 == 0) {
+ System.out.print("\n\t\t\t");
+ }
+ System.out.print(addr.getHostAndPort() + " ; ");
+ }
+ System.out.println("\n");
+ }
+}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/FavoredGroupLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/FavoredGroupLoadBalancer.java
new file mode 100644
index 0000000..dc0788c
--- /dev/null
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/FavoredGroupLoadBalancer.java
@@ -0,0 +1,369 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rsgroup;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Random;
+import java.util.Set;
+
+import com.google.common.net.HostAndPort;
+import com.google.common.collect.Sets;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.StartcodeAgnosticServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.master.RackManager;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
+import org.apache.hadoop.hbase.master.balancer.FavoredNodesPromoter;
+import org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
+import org.apache.hadoop.util.StringUtils;
+
+import com.google.common.collect.Lists;
+
+public class FavoredGroupLoadBalancer extends RSGroupBasedLoadBalancer implements
+ FavoredNodesPromoter {
+ private static final Log LOG = LogFactory.getLog(FavoredGroupLoadBalancer.class);
+
+ private FavoredNodesPromoter favoredNodesPromoter;
+ private RackManager rackManager;
+
+ @Override
+ public void initialize() throws HBaseIOException {
+
+ internalBalancer = new FavoredStochasticBalancer();
+ internalBalancer.setConf(config);
+ internalBalancer.setMasterServices(masterServices);
+ internalBalancer.setClusterStatus(clusterStatus);
+ internalBalancer.initialize();
+ favoredNodesPromoter = (FavoredNodesPromoter) internalBalancer;
+ if (RSGroupInfoManager == null) {
+ initializeRSGroupManager();
+ }
+ rackManager = new RackManager(config);
+ }
+
+ @Override
+ public Map> redistribute(
+ Map> clusterState) throws IOException {
+ if (!isOnline()) {
+ throw new IllegalStateException(RSGroupInfoManager.RSGROUP_TABLE_NAME
+ + " is not online, unable to perform balance");
+ }
+ Map> result = new HashMap>();
+ for (RSGroupInfo info : RSGroupInfoManager.listRSGroups()) {
+ Map> groupClusterState = new HashMap>();
+ for (HostAndPort hostAndPort : info.getServers()) {
+ for (ServerName curr : clusterState.keySet()) {
+ if (ServerName.isSameHostnameAndPort(curr, ServerName.valueOf(hostAndPort, ServerName.NON_STARTCODE))) {
+ groupClusterState.put(curr, clusterState.get(curr));
+ }
+ }
+ }
+ if (groupClusterState.size() > 0) {
+ Map> groupFavoredNodes = favoredNodesPromoter
+ .redistribute(groupClusterState);
+ if (groupFavoredNodes != null) {
+ result.putAll(groupFavoredNodes);
+ }
+ }
+ }
+ return result;
+ }
+
+ // TODO - Check if implementation and plans are correct
+ @Override
+ public List completeRedistribute(
+ Map> clusterState) throws IOException {
+ if (!isOnline()) {
+ throw new IllegalStateException(RSGroupInfoManager.RSGROUP_TABLE_NAME
+ + " is not online, unable to perform balance");
+ }
+ List result = Lists.newArrayList();
+ try {
+ for (RSGroupInfo info : RSGroupInfoManager.listRSGroups()) {
+ Map> groupClusterState = new HashMap>();
+ for (HostAndPort sName : info.getServers()) {
+ for (ServerName curr : clusterState.keySet()) {
+ if (ServerName.isSameHostnameAndPort(curr, ServerName.valueOf(sName, ServerName.NON_STARTCODE))) {
+ groupClusterState.put(curr, clusterState.get(curr));
+ }
+ }
+ }
+ List regionsToRR = Lists.newArrayList();
+ for (List hris : groupClusterState.values()) {
+ regionsToRR.addAll(hris);
+ }
+ if (groupClusterState.size() > 0) {
+ LOG.debug("Doing complete redistribute for group : " + info.getName());
+ List partialResult = favoredNodesPromoter
+ .completeRedistribute(groupClusterState);
+ if (partialResult != null) {
+ result.addAll(partialResult);
+ }
+ }
+ }
+ } catch (IOException exp) {
+ LOG.warn("Exception while redistributing cluster.", exp);
+ result.clear();
+ }
+ return result;
+ }
+
+ @Override
+ public Map> checkFavoredNodes(List servers,
+ List regions) {
+ Map> result = new HashMap>();
+ try {
+ for (RSGroupInfo info : RSGroupInfoManager.listRSGroups()) {
+ result.putAll(checkGroupFavoredNodes(info.getName(), servers));
+ }
+ } catch (IOException exp) {
+ LOG.warn(StringUtils.stringifyException(exp));
+ }
+ return result;
+ }
+
+ public Map> checkGroupFavoredNodes(String groupName,
+ List servers) throws IOException {
+ RSGroupInfo info = RSGroupInfoManager.getRSGroup(groupName);
+ Map> result = new HashMap>();
+ List groupServers = filterOfflineServers(info, servers);
+ List groupRegions = Lists.newArrayList();
+ for (TableName tName : info.getTables()) {
+ groupRegions.addAll(this.masterServices.getAssignmentManager().getRegionStates()
+ .getRegionsOfTable(tName));
+ }
+ if (groupServers.size() > 0) {
+ LOG.debug("checking favored nodes for group : " + info.getName());
+ Map> partialResult = favoredNodesPromoter.checkFavoredNodes(
+ groupServers, groupRegions);
+ if (partialResult != null) {
+ result.putAll(partialResult);
+ }
+ } else {
+ for (HostAndPort hostAndPort : info.getServers()) {
+ List empty = Lists.newArrayList();
+ result.put(ServerName.valueOf(hostAndPort, ServerName.NON_STARTCODE), empty);
+ }
+ LOG.warn("The entire region server group " + info.getName() + " is dead.");
+ }
+ return result;
+ }
+
+ @Override
+ public Map> generateFavoredNodesForDaughter(
+ List servers, HRegionInfo parent, HRegionInfo hriA, HRegionInfo hriB)
+ throws IOException {
+
+ Set regionAFavNodes = Sets.newHashSet();
+ Set regionBFavNodes = Sets.newHashSet();
+ Map> result = new HashMap>();
+ TableName table = parent.getTable();
+
+ String group = RSGroupInfoManager.getRSGroupOfTable(table);
+ List groupServers =
+ getGroupServerList(RSGroupInfoManager.getRSGroup(group));
+
+ // Use all servers in group instead of onlineservers. That will help avoid failures when only one
+ // or two servers are online in a group.
+ FavoredNodeAssignmentHelper helper =
+ new FavoredNodeAssignmentHelper(groupServers, rackManager);
+ helper.initialize();
+
+ FavoredNodesManager fnm = masterServices.getFavoredNodesManager();
+ List parentFavoredNodes = fnm.getFavoredNodes(parent);
+ if (parentFavoredNodes == null) {
+ LOG.debug("Unable to find favored nodes for parent, " + parent
+ + " generating new favored nodes for daughter");
+ regionAFavNodes = Sets.newHashSet(helper.generateFavoredNodes(hriA));
+ regionBFavNodes = Sets.newHashSet(helper.generateFavoredNodes(hriB));
+
+ } else {
+
+ LOG.debug("FN of parent " + parent.getRegionNameAsString() + " " + parentFavoredNodes);
+ List groupFavoredNodes = getServersInGroup(groupServers, parentFavoredNodes);
+ List nonGroupFavoredNodes = getServersNotInGroup(groupServers, parentFavoredNodes);
+
+ // Let both daughters have the same primary from parent nodes in the same group whether table is moved/not
+ ServerName primary = getRNServerWithExclusion(parentFavoredNodes, Sets.newHashSet(nonGroupFavoredNodes));
+ if (primary != null) {
+ regionAFavNodes.add(primary);
+ regionBFavNodes.add(primary);
+ }
+
+ if (nonGroupFavoredNodes.size() == 0) {
+
+ addRNFavNodeWithExclusion(regionAFavNodes, parentFavoredNodes, regionAFavNodes);
+ addRNFavNodeWithExclusion(regionBFavNodes, parentFavoredNodes, regionAFavNodes);
+
+ } else {
+ LOG.debug("Nodes:" + nonGroupFavoredNodes + " does not belong to server list, Table:"
+ + parent.getTable().getNameAsString() + " likely being moved");
+ LOG.debug("Nodes:" + groupFavoredNodes + " belong to the same group for table: "
+ + parent.getTable().getNameAsString());
+
+ addRNFavNodeWithExclusion(regionAFavNodes, nonGroupFavoredNodes, regionAFavNodes);
+ addRNFavNodeWithExclusion(regionBFavNodes, nonGroupFavoredNodes, regionBFavNodes);
+ }
+
+ assignMissingFavoredNodes(regionAFavNodes, helper, parentFavoredNodes);
+ assignMissingFavoredNodes(regionBFavNodes, helper, parentFavoredNodes);
+ }
+
+ LOG.debug("FN of daughter-1- " + hriA.getRegionNameAsString() + " " + regionAFavNodes);
+ LOG.debug("FN of daughter-2- " + hriB.getRegionNameAsString() + " " + regionBFavNodes);
+
+ result.put(hriA, Lists.newArrayList(regionAFavNodes));
+ result.put(hriB, Lists.newArrayList(regionBFavNodes));
+ fnm.updateFavoredNodes(hriA, result.get(hriA));
+ fnm.updateFavoredNodes(hriB, result.get(hriB));
+ return result;
+ }
+
+ private void assignMissingFavoredNodes(Set regionFN,
+ FavoredNodeAssignmentHelper helper, List parentFavoredNodes) throws IOException {
+
+ try {
+ regionFN.add(helper.generateMissingFavoredNode(Lists.newArrayList(regionFN), parentFavoredNodes));
+ } catch (IOException e) {
+ LOG.warn("Could not generate FN for " + regionFN + " trying without excludenodes:" + parentFavoredNodes);
+ regionFN.add(helper.generateMissingFavoredNode(Lists.newArrayList(regionFN)));
+ }
+ }
+
+ private void addRNFavNodeWithExclusion(Set regionNodes, List selectionNodes,
+ Set excludeNodes) throws IOException {
+ ServerName sn = getRNServerWithExclusion(selectionNodes, excludeNodes);
+ if (sn != null) {
+ regionNodes.add(sn);
+ }
+ }
+
+ private ServerName getRNServerWithExclusion(List parentFavoredNodes,
+ Set excludeNodes) throws IOException {
+
+ List nodesToConsider = Lists.newArrayList(parentFavoredNodes);
+ nodesToConsider.removeAll(excludeNodes);
+ if (nodesToConsider.size() > 0) {
+ return getRandomServerFromList(nodesToConsider);
+ }
+ return null;
+ }
+
+ private ServerName getRandomServerFromList(List nodesToConsider) {
+ Random random = new Random();
+ return nodesToConsider.get(random.nextInt(nodesToConsider.size()));
+ }
+
+ private List getGroupServerList(RSGroupInfo group) {
+ List groupServerList = Lists.newArrayList();
+ for (HostAndPort hostAndPort : group.getServers()) {
+ groupServerList.add(ServerName.valueOf(hostAndPort, ServerName.NON_STARTCODE));
+ }
+ return groupServerList;
+ }
+
+ private List getServerNamesWithoutStartCode(List groupServers) {
+ List groupServerList = Lists.newArrayList();
+ for (ServerName sn : groupServers) {
+ groupServerList.add(ServerName.valueOf(sn.getHostAndPort(), ServerName.NON_STARTCODE));
+ }
+ return groupServerList;
+ }
+
+ private List getServersInGroup(List groupServers,
+ List parentFavoredNodes) {
+
+ List serversInGroup = Lists.newArrayList(parentFavoredNodes);
+ List groupServerList = getServerNamesWithoutStartCode(groupServers);
+ serversInGroup.retainAll(groupServerList);
+ return serversInGroup;
+ }
+
+ private List getServersNotInGroup(List groupServers,
+ List parentFavoredNodes) {
+
+ List serversNotInList = Lists.newArrayList(parentFavoredNodes);
+ List groupServerList = getServerNamesWithoutStartCode(groupServers);
+ serversNotInList.removeAll(groupServerList);
+ return serversNotInList;
+ }
+
+ @Override
+ public void generateFavoredNodesForMergedRegion(HRegionInfo merged, HRegionInfo hriA,
+ HRegionInfo hriB) throws IOException {
+ favoredNodesPromoter.generateFavoredNodesForMergedRegion(merged, hriA, hriB);
+ }
+
+ public void removeFavoredFromGroup(HostAndPort hostAndPort, String destinationGroup,
+ List onlineServers) throws IOException {
+ RSGroupInfo group = RSGroupInfoManager.getRSGroup(destinationGroup);
+ if (group != null) {
+ List newServers = filterServers(group.getServers(), onlineServers);
+ masterServices.getFavoredNodesManager().removeFavoredNode(
+ ServerName.valueOf(hostAndPort, ServerName.NON_STARTCODE), newServers);
+ } else {
+ throw new IOException(destinationGroup + " does not exist.");
+ }
+ }
+
+ @Override
+ public void setClusterStatus(ClusterStatus st) {
+ super.setClusterStatus(st);
+ if (this.internalBalancer != null) {
+ this.internalBalancer.setClusterStatus(st);
+ }
+ }
+
+ public boolean checkForFavoredNodesInTargetGroup(TableName table, String destinationGroup,
+ List onlineGroupServers) throws IOException {
+ boolean matched = true;
+ List regions = this.masterServices.getAssignmentManager().getRegionStates()
+ .getRegionsOfTable(table);
+ for (HRegionInfo hri : regions) {
+ if (hri.getTable().isSystemTable()) continue;
+ List fns = masterServices.getFavoredNodesManager().getFavoredNodes(hri);
+ boolean regionMatchedFN = false;
+ for (ServerName sn : fns) {
+ if (onlineGroupServers.contains(StartcodeAgnosticServerName.valueOf(sn))) {
+ regionMatchedFN = true;
+ break;
+ }
+ }
+ matched = matched && regionMatchedFN;
+ if (!matched) {
+ LOG.debug("Found region with no FNs in target group " + hri.getRegionNameAsString());
+ return false;
+ }
+ }
+ return matched;
+ }
+}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
index f94d0f6..be9c84c 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
@@ -26,6 +26,7 @@ import java.io.IOException;
import java.util.List;
import java.util.Set;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
@@ -118,4 +119,30 @@ public abstract class RSGroupAdmin implements Closeable {
* @throws java.io.IOException on unexpected failure to retrieve GroupInfo
*/
public abstract RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException;
+
+ /**
+ * Redistributes the replicas of regions belonging to a group.
+ * This API does not change assignment of regions, but might
+ * do an update to the favored nodes in META.
+ * The conditions considering a replica for move are:
+ * 1. The replica is not the current assignment.
+ * 2. The other two replicas have data locality greater than
+ * specified by FavoredNodesPromoter.MIN_LOCALITY_FOR_REDISTRIBUTE
+ * @param name the name of the group to balance
+ * @return true, if successful
+ * @throws IOException Signals that an I/O exception has occurred.
+ */
+ public abstract boolean redistribute(String name) throws IOException;
+
+ /**
+ * Completely redistributes region replicas in a group.
+ * This API does a load balance of all the 3*region count
+ * replicas.
+ * @param name the name of the group to balance
+ * @return true, if successful
+ * @throws IOException Signals that an I/O exception has occurred.
+ */
+ public abstract boolean completeRedistribute(String name) throws IOException;
+
+ public abstract List checkGroupFavoredNodes(String groupName) throws IOException;
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
index a7f14f7..ca7eab3 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
@@ -19,6 +19,7 @@
*/
package org.apache.hadoop.hbase.rsgroup;
+import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.net.HostAndPort;
import com.google.protobuf.ServiceException;
@@ -30,6 +31,7 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
@@ -39,6 +41,10 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RedistributeGroupRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CheckGroupFavoredNodesResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.CompleteRedistributeGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
@@ -199,6 +205,46 @@ class RSGroupAdminClient extends RSGroupAdmin {
}
@Override
+ public boolean redistribute(String name) throws IOException {
+ RedistributeGroupRequest request = RedistributeGroupRequest
+ .newBuilder().setGroupName(name).build();
+ try {
+ return proxy.redistributeGroup(null, request).getResult();
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public boolean completeRedistribute(String name) throws IOException {
+ CompleteRedistributeGroupRequest request = CompleteRedistributeGroupRequest
+ .newBuilder().setGroupName(name).build();
+ try {
+ return proxy.completeRedistributeGroup(null, request).getResult();
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ }
+
+ @Override
+ public List checkGroupFavoredNodes(String groupName) throws IOException {
+ CheckGroupFavoredNodesRequest request = CheckGroupFavoredNodesRequest
+ .newBuilder().setGroupName(groupName).build();
+ List result = Lists.newArrayList();
+ try {
+ CheckGroupFavoredNodesResponse response = proxy.checkGroupFavoredNodes(
+ null, request);
+ for (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn : response
+ .getServersList()) {
+ result.add(ProtobufUtil.toServerName(sn));
+ }
+ } catch (ServiceException e) {
+ throw ProtobufUtil.getRemoteException(e);
+ }
+ return result;
+ }
+
+ @Override
public void close() throws IOException {
}
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 22bad72..13cc1f2 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -78,7 +78,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesR
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
-
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
public class RSGroupAdminEndpoint extends RSGroupAdminService
implements CoprocessorService, Coprocessor, MasterObserver {
@@ -259,6 +259,61 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
done.run(response);
}
+ @Override public void redistributeGroup(RpcController controller,
+ RSGroupAdminProtos.RedistributeGroupRequest request,
+ RpcCallback done) {
+ RSGroupAdminProtos.RedistributeGroupResponse response = null;
+ RSGroupAdminProtos.RedistributeGroupResponse.Builder builder =
+ RSGroupAdminProtos.RedistributeGroupResponse.newBuilder();
+ boolean result = false;
+ try {
+ result = groupAdminServer.redistribute(request.getGroupName());
+ } catch (IOException e) {
+ LOG.debug(
+ "Caught Exception while doing redistributeGroup for group " + request.getGroupName(), e);
+ ResponseConverter.setControllerException(controller, e);
+ }
+ builder.setResult(result);
+ response = builder.build();
+ done.run(response);
+ }
+
+ @Override public void completeRedistributeGroup(RpcController controller,
+ RSGroupAdminProtos.CompleteRedistributeGroupRequest request,
+ RpcCallback done) {
+ RSGroupAdminProtos.CompleteRedistributeGroupResponse response = null;
+ RSGroupAdminProtos.CompleteRedistributeGroupResponse.Builder builder =
+ RSGroupAdminProtos.CompleteRedistributeGroupResponse.newBuilder();
+ boolean result = false;
+ try {
+ result = groupAdminServer.completeRedistribute(request.getGroupName());
+ } catch (IOException e) {
+ LOG.debug(
+ "Caught Exception while doing completeRedistributeGroup for group "
+ + request.getGroupName(), e);
+ ResponseConverter.setControllerException(controller, e);
+ }
+ builder.setResult(result);
+ response = builder.build();
+ done.run(response);
+ }
+
+ @Override public void checkGroupFavoredNodes(RpcController controller,
+ RSGroupAdminProtos.CheckGroupFavoredNodesRequest request,
+ RpcCallback done) {
+ RSGroupAdminProtos.CheckGroupFavoredNodesResponse.Builder response =
+ RSGroupAdminProtos.CheckGroupFavoredNodesResponse.newBuilder();
+ try {
+ List deadServers = groupAdminServer.checkGroupFavoredNodes(request.getGroupName());
+ for (ServerName sn : deadServers) {
+ response.addServers(ProtobufUtil.toServerName(sn));
+ }
+ } catch (IOException e) {
+ ResponseConverter.setControllerException(controller, e);
+ }
+ done.run(response.build());
+ }
+
@Override
public void getRSGroupInfoOfServer(RpcController controller,
GetRSGroupInfoOfServerRequest request,
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 8725781..23c496d 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.StartcodeAgnosticServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.constraint.ConstraintException;
@@ -54,6 +55,9 @@ import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
+import org.apache.hadoop.hbase.master.balancer.FavoredNodesPromoter;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
* Service to support Region Server Grouping (HBase-6721)
@@ -231,6 +235,14 @@ public class RSGroupAdminServer extends RSGroupAdmin {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postMoveServers(servers, targetGroupName);
}
+ if (this.master.getFavoredNodesManager() != null) {
+ FavoredGroupLoadBalancer loadBalancer = ((FavoredGroupLoadBalancer) this.master
+ .getLoadBalancer());
+ for (HostAndPort host : servers) {
+ loadBalancer.removeFavoredFromGroup(host, srcGrp.getName(), master.getServerManager()
+ .getOnlineServersList());
+ }
+ }
LOG.info("Move server done: "+sourceGroupName+"->"+targetGroupName);
}
}
@@ -246,6 +258,10 @@ public class RSGroupAdminServer extends RSGroupAdmin {
return;
}
RSGroupInfoManager manager = getRSGroupInfoManager();
+ FavoredGroupLoadBalancer fnm = null;
+ if (master.getLoadBalancer() instanceof FavoredGroupLoadBalancer) {
+ fnm = ((FavoredGroupLoadBalancer) this.master.getLoadBalancer());
+ }
synchronized (manager) {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preMoveTables(tables, targetGroup);
@@ -256,8 +272,20 @@ public class RSGroupAdminServer extends RSGroupAdmin {
if(destGroup == null) {
throw new ConstraintException("Target group does not exist: "+targetGroup);
}
- if(destGroup.getServers().size() < 1) {
- throw new ConstraintException("Target group must have at least one server.");
+ if(destGroup.getServers().size() < FavoredNodeAssignmentHelper.FAVORED_NODES_NUM) {
+ throw new ConstraintException("Target group must have at least "
+ + FavoredNodeAssignmentHelper.FAVORED_NODES_NUM + " servers.");
+ }
+ if (fnm != null) {
+ List groupServers = FavoredNodeAssignmentHelper
+ .filterServers(destGroup.getServers(), master.getServerManager().getOnlineServersList());
+
+ for(TableName table : tables) {
+ if (!fnm.checkForFavoredNodesInTargetGroup(table, targetGroup, groupServers)) {
+ throw new ConstraintException(
+ "Atleast one favored node has to be migrated to the new group for table :" + table);
+ }
+ }
}
}
@@ -373,16 +401,20 @@ public class RSGroupAdminServer extends RSGroupAdmin {
plans.addAll(partialPlans);
}
}
- long startTime = System.currentTimeMillis();
+ long startTime = EnvironmentEdgeManager.currentTime();
balancerRan = plans != null;
if (plans != null && !plans.isEmpty()) {
LOG.info("Group balance "+groupName+" starting with plan count: "+plans.size());
for (RegionPlan plan: plans) {
- LOG.info("balance " + plan);
- assignmentManager.balance(plan);
+ LOG.trace("balance " + plan);
+ if (LoadBalancer.BOGUS_SERVER_NAME.equals(plan.getDestination())) {
+ assignmentManager.unassign(plan.getRegionInfo());
+ } else {
+ assignmentManager.balance(plan);
+ }
}
- LOG.info("Group balance "+groupName+" completed after "+
- (System.currentTimeMillis()-startTime)+" seconds");
+ LOG.info("Group balance " + groupName + " completed after "
+ + (EnvironmentEdgeManager.currentTime() - startTime) + " seconds");
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postBalanceRSGroup(groupName, balancerRan);
@@ -401,6 +433,110 @@ public class RSGroupAdminServer extends RSGroupAdmin {
return getRSGroupInfoManager().getRSGroupOfServer(hostPort);
}
+ @Override
+ public boolean redistribute(String name) throws IOException {
+ if (!master.isInitialized()) {
+ throw new IOException(
+ "Master has not been initialized, cannot run redistribute.");
+ }
+ ServerManager serverManager = master.getServerManager();
+ FavoredNodesPromoter balancer = (FavoredNodesPromoter) master.getLoadBalancer();
+ if (balancer == null) {
+ LOG.debug("Balancer not an instance of FavoredNodesPromoter.");
+ return false;
+ }
+ long startTime = System.currentTimeMillis();
+ LOG.info("Starting to redistribute");
+ synchronized (balancer) {
+ // Only allow one balance run at at time.
+ Map groupRIT = rsGroupGetRegionsInTransition(name);
+ if (groupRIT.size() > 0) {
+ LOG.debug("Not running redistribute because "
+ + groupRIT.size()
+ + " region(s) in transition : "
+ + StringUtils.abbreviate(master.getAssignmentManager().getRegionStates()
+ .getRegionsInTransition().toString(), 256));
+ return false;
+ }
+ if (serverManager.areDeadServersInProgress()) {
+ LOG.debug("Not running redistribute because processing dead regionserver(s): "
+ + serverManager.getDeadServers());
+ return false;
+ }
+ Map> assignment = getRSGroupAssignments(name);
+ Map> updatedFavoredNodes = balancer
+ .redistribute(assignment);
+ LOG.info(" Redistribute resulted in update of favored nodes for regions : "
+ + updatedFavoredNodes.size());
+ for (HRegionInfo hri : updatedFavoredNodes.keySet()) {
+ master.getFavoredNodesManager().updateFavoredNodes(hri, updatedFavoredNodes.get(hri));
+ }
+ master.getFavoredNodesManager().updateFavoredNodesInRegionServer(updatedFavoredNodes);
+ }
+ LOG.info("Group redistribute " + name + " completed after "
+ + (System.currentTimeMillis() - startTime) + " seconds");
+ return true;
+ }
+
+ @Override
+ public boolean completeRedistribute(String name) throws IOException {
+ if (!master.isInitialized()) {
+ throw new IOException(
+ "Master has not been initialized, cannot run completeRedistribute.");
+ }
+ ServerManager serverManager = master.getServerManager();
+ FavoredNodesPromoter balancer = (FavoredNodesPromoter) master.getLoadBalancer();
+ if (balancer == null) {
+ LOG.debug("Balancer not an instance of FavoredNodesPromoter.");
+ return false;
+ }
+ long startTime = EnvironmentEdgeManager.currentTime();
+ LOG.info("Starting to complete redistribute");
+ synchronized (balancer) {
+ // Only allow one balance run at at time.
+ Map groupRIT = rsGroupGetRegionsInTransition(name);
+ if (groupRIT.size() > 0) {
+ LOG.debug("Not running complete redistribute because "
+ + groupRIT.size()
+ + " region(s) in transition : "
+ + StringUtils.abbreviate(master.getAssignmentManager().getRegionStates()
+ .getRegionsInTransition().toString(), 256));
+ return false;
+ }
+ if (serverManager.areDeadServersInProgress()) {
+ LOG.debug("Not running complete redistribute because processing dead regionserver(s): "
+ + serverManager.getDeadServers());
+ return false;
+ }
+ Map> assignment = getRSGroupAssignments(name);
+ List regionPlans = balancer.completeRedistribute(assignment);
+ if (regionPlans != null && !regionPlans.isEmpty()) {
+ for (RegionPlan plan : regionPlans) {
+ master.getAssignmentManager().balance(plan);
+ }
+ }
+ LOG.info("Group complete redistribute " + name + " completed after "
+ + (EnvironmentEdgeManager.currentTime() - startTime) + " seconds");
+ return true;
+ }
+ }
+
+ @Override
+ public List checkGroupFavoredNodes(String groupName) throws IOException {
+ List result = Lists.newArrayList();
+ if (this.master.getFavoredNodesManager() != null) {
+ Map> deadServers = ((FavoredGroupLoadBalancer) this.master
+ .getLoadBalancer()).checkGroupFavoredNodes(groupName, master.getServerManager()
+ .getOnlineServersList());
+ for (Map.Entry> entry : deadServers.entrySet()) {
+ LOG.info(entry.getKey() + " is dead and referenced by the regions = "
+ + StringUtils.join(entry.getValue().iterator(), ","));
+ result.add(entry.getKey());
+ }
+ }
+ return result;
+ }
+
@InterfaceAudience.Private
public RSGroupInfoManager getRSGroupInfoManager() throws IOException {
return RSGroupInfoManager;
@@ -423,6 +559,26 @@ public class RSGroupAdminServer extends RSGroupAdmin {
return rit;
}
+ private Map> getRSGroupAssignments(String groupName)
+ throws IOException {
+ Map> result = Maps.newHashMap();
+ RSGroupInfo RSGroupInfo = getRSGroupInfo(groupName);
+ for (ServerName serverName : master.getServerManager().getOnlineServers().keySet()) {
+ if (RSGroupInfo.getServers().contains(serverName.getHostPort())) {
+ result.put(serverName, new ArrayList());
+ }
+ }
+ for (Map.Entry entry : master.getAssignmentManager()
+ .getRegionStates().getRegionAssignments().entrySet()) {
+ ServerName currServer = entry.getValue();
+ HRegionInfo currRegion = entry.getKey();
+ if (result.containsKey(currServer)) {
+ result.get(currServer).add(currRegion);
+ }
+ }
+ return result;
+ }
+
private Map>>
getRSGroupAssignmentsByTable(String groupName) throws IOException {
Map>> result = Maps.newHashMap();
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index c4b4bf3..0c8c814 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -77,11 +77,11 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
private static final Log LOG = LogFactory.getLog(RSGroupBasedLoadBalancer.class);
- private Configuration config;
- private ClusterStatus clusterStatus;
- private MasterServices masterServices;
- private RSGroupInfoManager RSGroupInfoManager;
- private LoadBalancer internalBalancer;
+ protected Configuration config;
+ protected ClusterStatus clusterStatus;
+ protected MasterServices masterServices;
+ protected RSGroupInfoManager RSGroupInfoManager;
+ protected LoadBalancer internalBalancer;
//used during reflection by LoadBalancerFactory
@InterfaceAudience.Private
@@ -269,7 +269,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
}
}
- private List filterOfflineServers(RSGroupInfo RSGroupInfo,
+ protected List filterOfflineServers(RSGroupInfo RSGroupInfo,
List onlineServers) {
if (RSGroupInfo != null) {
return filterServers(RSGroupInfo.getServers(), onlineServers);
@@ -288,7 +288,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
* List of servers which are online.
* @return the list
*/
- private List filterServers(Collection servers,
+ protected List filterServers(Collection servers,
Collection onlineServers) {
ArrayList finalList = new ArrayList();
for (HostAndPort server : servers) {
@@ -370,14 +370,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
public void initialize() throws HBaseIOException {
try {
if (RSGroupInfoManager == null) {
- List cps =
- masterServices.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class);
- if (cps.size() != 1) {
- String msg = "Expected one implementation of GroupAdminEndpoint but found " + cps.size();
- LOG.error(msg);
- throw new HBaseIOException(msg);
- }
- RSGroupInfoManager = cps.get(0).getGroupInfoManager();
+ initializeRSGroupManager();
}
} catch (IOException e) {
throw new HBaseIOException("Failed to initialize GroupInfoManagerImpl", e);
@@ -394,6 +387,17 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalanc
internalBalancer.initialize();
}
+ protected void initializeRSGroupManager() throws HBaseIOException {
+ List cps =
+ masterServices.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class);
+ if (cps.size() != 1) {
+ String msg = "Expected one implementation of GroupAdminEndpoint but found " + cps.size();
+ LOG.error(msg);
+ throw new HBaseIOException(msg);
+ }
+ RSGroupInfoManager = cps.get(0).getGroupInfoManager();
+ }
+
public boolean isOnline() {
return RSGroupInfoManager != null && RSGroupInfoManager.isOnline();
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RegionPlacementMaintainer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RegionPlacementMaintainer.java
new file mode 100644
index 0000000..062648a
--- /dev/null
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RegionPlacementMaintainer.java
@@ -0,0 +1,1703 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rsgroup;
+
+import java.io.IOException;
+import java.text.DecimalFormat;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.Random;
+import java.util.Scanner;
+import java.util.Set;
+import java.util.TreeMap;
+
+import com.google.common.net.HostAndPort;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.master.RackManager;
+import org.apache.hadoop.hbase.master.SnapshotOfRegionAssignmentFromMeta;
+import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
+import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodesInfoPair;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ReplicaLoad;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerReplicaLoadPair;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetFavoredNodesForRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetFavoredNodesForRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetFavoredNodesForTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetFavoredNodesForTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetReplicaLoadRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetReplicaLoadResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.MunkresAssignment;
+import org.apache.hadoop.hbase.util.Pair;
+
+import com.google.common.collect.LinkedListMultimap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+import com.google.protobuf.ServiceException;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+/**
+ * A tool that is used for manipulating and viewing favored nodes information
+ * for regions. Run with -h to get a list of the options
+ */
+@InterfaceAudience.Private
+// TODO: Remove? Unused. Partially implemented only.
+public class RegionPlacementMaintainer {
+ private static final Log LOG = LogFactory.getLog(RegionPlacementMaintainer.class
+ .getName());
+ //The cost of a placement that should never be assigned.
+ private static final float MAX_COST = Float.POSITIVE_INFINITY;
+
+ // The cost of a placement that is undesirable but acceptable.
+ private static final float AVOID_COST = 100000f;
+
+ // The amount by which the cost of a placement is increased if it is the
+ // last slot of the server. This is done to more evenly distribute the slop
+ // amongst servers.
+ private static final float LAST_SLOT_COST_PENALTY = 0.5f;
+
+ // The amount by which the cost of a primary placement is penalized if it is
+ // not the host currently serving the region. This is done to minimize moves.
+ private static final float NOT_CURRENT_HOST_PENALTY = 0.1f;
+
+ private static boolean USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = false;
+
+ private Configuration conf;
+ private final boolean enforceLocality;
+ private final boolean enforceMinAssignmentMove;
+ private Admin admin;
+ private RSGroupAdmin groupAdmin;
+ private RackManager rackManager;
+ private Set targetTableSet;
+ private MasterService.BlockingInterface master;
+ private final Connection connection;
+
+ public RegionPlacementMaintainer(Configuration conf) throws IOException {
+ this(conf, true, true);
+ }
+
+ public RegionPlacementMaintainer(Configuration conf, boolean enforceLocality,
+ boolean enforceMinAssignmentMove) throws IOException {
+ this.conf = conf;
+ this.enforceLocality = enforceLocality;
+ this.enforceMinAssignmentMove = enforceMinAssignmentMove;
+ this.targetTableSet = new HashSet();
+ this.rackManager = new RackManager(conf);
+ try {
+ this.connection = ConnectionFactory.createConnection(this.conf);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ this.master = ((ClusterConnection)connection).getMaster();
+ this.admin = connection.getAdmin();
+ }
+
+ private static void printHelp(Options opt) {
+ new HelpFormatter().printHelp(
+ "RegionPlacement < -w | -u | -n | -v | -t | -h | -overwrite -r regionName -f favoredNodes " +
+ "-diff>" +
+ " [-l false] [-m false] [-d] [-tables t1,t2,...tn] [-zk zk1,zk2,zk3]" +
+ " [-fs hdfs://a.b.c.d:9000] [-hbase_root /HBASE]", opt);
+ }
+
+ public void setTargetTableName(String[] tableNames) {
+ if (tableNames != null) {
+ for (String table : tableNames) {
+ this.targetTableSet.add(TableName.valueOf(table));
+ }
+ }
+ }
+
+ private RSGroupAdmin getGroupAdmin() throws IOException {
+ if (this.groupAdmin == null) {
+ this.groupAdmin = new RSGroupAdminClient(admin.getConnection());
+ }
+ return this.groupAdmin;
+ }
+
+ private List getFavoredNodes(HRegionInfo hri)
+ throws ServiceException {
+ List favNodes = Lists.newArrayList();
+ GetFavoredNodesForRegionRequest.Builder request = GetFavoredNodesForRegionRequest.newBuilder();
+ request.setRegionInfo(HRegionInfo.convert(hri));
+ GetFavoredNodesForRegionResponse response =
+ master.getFavoredNodesForRegion(null, request.build());
+ for (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName sn : response
+ .getServersList()) {
+ favNodes.add(ProtobufUtil.toServerName(sn));
+ }
+ return favNodes;
+ }
+
+ private List getServers(String groupName) throws IOException {
+ List result = new ArrayList();
+ RSGroupInfo gInfo = getGroupAdmin().getRSGroupInfo(groupName);
+ if (gInfo == null) {
+ throw new IOException("Group information for group : " + groupName + " found null.");
+ } else {
+ for (HostAndPort sn : gInfo.getServers()) {
+ result.add(ServerName.valueOf(sn, ServerName.NON_STARTCODE));
+ }
+ }
+ return result;
+ }
+
+ private boolean isGroupMultiRack(TableName table) throws IOException {
+ Set racks = Sets.newHashSet();
+ RSGroupInfo gInfo = getGroupAdmin().getRSGroupInfoOfTable(table);
+ if (gInfo == null) {
+ throw new IOException("Group information for table : " + table + " found null.");
+ } else {
+ for (HostAndPort sn : gInfo.getServers()) {
+ racks.add(this.rackManager.getRack(ServerName.valueOf(sn, ServerName.NON_STARTCODE)));
+ }
+ }
+ return racks.size() > 1;
+ }
+
+ private RackManager getRackManager() {
+ return this.rackManager;
+ }
+ /**
+ * @return the new RegionAssignmentSnapshot
+ * @throws IOException
+ */
+ public SnapshotOfRegionAssignmentFromMeta getRegionAssignmentSnapshot()
+ throws IOException {
+ SnapshotOfRegionAssignmentFromMeta currentAssignmentShapshot =
+ new SnapshotOfRegionAssignmentFromMeta(admin.getConnection());
+ new SnapshotOfRegionAssignmentFromMeta(ConnectionFactory.createConnection(conf));
+ currentAssignmentShapshot.initialize();
+ return currentAssignmentShapshot;
+ }
+
+ /**
+ * Verify the region placement is consistent with the assignment plan
+ * @param isDetailMode
+ * @return reports
+ * @throws IOException
+ */
+ public List verifyRegionPlacement(boolean isDetailMode)
+ throws IOException {
+ System.out.println("Start to verify the region assignment and " +
+ "generate the verification report");
+ // Get the region assignment snapshot
+ SnapshotOfRegionAssignmentFromMeta snapshot = this.getRegionAssignmentSnapshot();
+
+ // Get all the tables
+ Set tables = snapshot.getTableSet();
+
+ // Get the region locality map
+ Map> regionLocalityMap = null;
+ if (this.enforceLocality == true) {
+ regionLocalityMap = FSUtils.getRegionDegreeLocalityMappingFromFS(conf);
+ }
+ List reports = new ArrayList();
+ // Iterate all the tables to fill up the verification report
+ for (TableName table : tables) {
+ if (!this.targetTableSet.isEmpty() &&
+ !this.targetTableSet.contains(table)) {
+ continue;
+ }
+ AssignmentVerificationReport report = new AssignmentVerificationReport();
+ report.fillUp(table, snapshot, regionLocalityMap);
+ report.print(isDetailMode);
+ reports.add(report);
+ }
+ return reports;
+ }
+
+ /**
+ * Generate the assignment plan for the existing table
+ *
+ * @param tableName
+ * @param assignmentSnapshot
+ * @param regionLocalityMap
+ * @param plan
+ * @param munkresForSecondaryAndTertiary if set on true the assignment plan
+ * for the tertiary and secondary will be generated with Munkres algorithm,
+ * otherwise will be generated using placeSecondaryAndTertiaryRS
+ * @throws IOException
+ */
+ private void genAssignmentPlan(TableName tableName,
+ SnapshotOfRegionAssignmentFromMeta assignmentSnapshot,
+ Map> regionLocalityMap, FavoredNodesPlan plan,
+ boolean munkresForSecondaryAndTertiary) throws IOException {
+ // Get the all the regions for the current table
+ List regions =
+ assignmentSnapshot.getTableToRegionMap().get(tableName);
+ int numRegions = regions.size();
+
+ // Get the current assignment map
+ Map currentAssignmentMap =
+ assignmentSnapshot.getRegionToRegionServerMap();
+
+ // Get the all the region servers
+ List servers = new ArrayList();
+ try (Admin admin = this.connection.getAdmin()) {
+ servers.addAll(admin.getClusterStatus().getServers());
+ }
+ servers.addAll(getServers(null));
+
+ LOG.info("Start to generate assignment plan for " + numRegions +
+ " regions from table " + tableName + " with " +
+ servers.size() + " region servers");
+
+ int slotsPerServer = (int) Math.ceil((float) numRegions /
+ servers.size());
+ int regionSlots = slotsPerServer * servers.size();
+
+ // Compute the primary, secondary and tertiary costs for each region/server
+ // pair. These costs are based only on node locality and rack locality, and
+ // will be modified later.
+ float[][] primaryCost = new float[numRegions][regionSlots];
+ float[][] secondaryCost = new float[numRegions][regionSlots];
+ float[][] tertiaryCost = new float[numRegions][regionSlots];
+
+ if (this.enforceLocality && regionLocalityMap != null) {
+ // Transform the locality mapping into a 2D array, assuming that any
+ // unspecified locality value is 0.
+ float[][] localityPerServer = new float[numRegions][regionSlots];
+ for (int i = 0; i < numRegions; i++) {
+ Map serverLocalityMap =
+ regionLocalityMap.get(regions.get(i).getEncodedName());
+ if (serverLocalityMap == null) {
+ continue;
+ }
+ for (int j = 0; j < servers.size(); j++) {
+ String serverName = servers.get(j).getHostname();
+ if (serverName == null) {
+ continue;
+ }
+ Float locality = serverLocalityMap.get(serverName);
+ if (locality == null) {
+ continue;
+ }
+ for (int k = 0; k < slotsPerServer; k++) {
+ // If we can't find the locality of a region to a server, which occurs
+ // because locality is only reported for servers which have some
+ // blocks of a region local, then the locality for that pair is 0.
+ localityPerServer[i][j * slotsPerServer + k] = locality.floatValue();
+ }
+ }
+ }
+
+ // Compute the total rack locality for each region in each rack. The total
+ // rack locality is the sum of the localities of a region on all servers in
+ // a rack.
+ Map> rackRegionLocality =
+ new HashMap>();
+ for (int i = 0; i < numRegions; i++) {
+ HRegionInfo region = regions.get(i);
+ for (int j = 0; j < regionSlots; j += slotsPerServer) {
+ String rack = rackManager.getRack(servers.get(j / slotsPerServer));
+ Map rackLocality = rackRegionLocality.get(rack);
+ if (rackLocality == null) {
+ rackLocality = new HashMap();
+ rackRegionLocality.put(rack, rackLocality);
+ }
+ Float localityObj = rackLocality.get(region);
+ float locality = localityObj == null ? 0 : localityObj.floatValue();
+ locality += localityPerServer[i][j];
+ rackLocality.put(region, locality);
+ }
+ }
+ for (int i = 0; i < numRegions; i++) {
+ for (int j = 0; j < regionSlots; j++) {
+ String rack = rackManager.getRack(servers.get(j / slotsPerServer));
+ Float totalRackLocalityObj =
+ rackRegionLocality.get(rack).get(regions.get(i));
+ float totalRackLocality = totalRackLocalityObj == null ?
+ 0 : totalRackLocalityObj.floatValue();
+
+ // Primary cost aims to favor servers with high node locality and low
+ // rack locality, so that secondaries and tertiaries can be chosen for
+ // nodes with high rack locality. This might give primaries with
+ // slightly less locality at first compared to a cost which only
+ // considers the node locality, but should be better in the long run.
+ primaryCost[i][j] = 1 - (2 * localityPerServer[i][j] -
+ totalRackLocality);
+
+ // Secondary cost aims to favor servers with high node locality and high
+ // rack locality since the tertiary will be chosen from the same rack as
+ // the secondary. This could be negative, but that is okay.
+ secondaryCost[i][j] = 2 - (localityPerServer[i][j] + totalRackLocality);
+
+ // Tertiary cost is only concerned with the node locality. It will later
+ // be restricted to only hosts on the same rack as the secondary.
+ tertiaryCost[i][j] = 1 - localityPerServer[i][j];
+ }
+ }
+ }
+
+ if (this.enforceMinAssignmentMove && currentAssignmentMap != null) {
+ // We want to minimize the number of regions which move as the result of a
+ // new assignment. Therefore, slightly penalize any placement which is for
+ // a host that is not currently serving the region.
+ for (int i = 0; i < numRegions; i++) {
+ for (int j = 0; j < servers.size(); j++) {
+ ServerName currentAddress = currentAssignmentMap.get(regions.get(i));
+ if (currentAddress != null &&
+ !currentAddress.equals(servers.get(j))) {
+ for (int k = 0; k < slotsPerServer; k++) {
+ primaryCost[i][j * slotsPerServer + k] += NOT_CURRENT_HOST_PENALTY;
+ }
+ }
+ }
+ }
+ }
+
+ // Artificially increase cost of last slot of each server to evenly
+ // distribute the slop, otherwise there will be a few servers with too few
+ // regions and many servers with the max number of regions.
+ for (int i = 0; i < numRegions; i++) {
+ for (int j = 0; j < regionSlots; j += slotsPerServer) {
+ primaryCost[i][j] += LAST_SLOT_COST_PENALTY;
+ secondaryCost[i][j] += LAST_SLOT_COST_PENALTY;
+ tertiaryCost[i][j] += LAST_SLOT_COST_PENALTY;
+ }
+ }
+
+ RandomizedMatrix randomizedMatrix = new RandomizedMatrix(numRegions,
+ regionSlots);
+ primaryCost = randomizedMatrix.transform(primaryCost);
+ int[] primaryAssignment = new MunkresAssignment(primaryCost).solve();
+ primaryAssignment = randomizedMatrix.invertIndices(primaryAssignment);
+
+ // Modify the secondary and tertiary costs for each region/server pair to
+ // prevent a region from being assigned to the same rack for both primary
+ // and either one of secondary or tertiary.
+ for (int i = 0; i < numRegions; i++) {
+ int slot = primaryAssignment[i];
+ String rack = rackManager.getRack(servers.get(slot / slotsPerServer));
+ for (int k = 0; k < servers.size(); k++) {
+ if (!rackManager.getRack(servers.get(k)).equals(rack)) {
+ continue;
+ }
+ if (k == slot / slotsPerServer) {
+ // Same node, do not place secondary or tertiary here ever.
+ for (int m = 0; m < slotsPerServer; m++) {
+ secondaryCost[i][k * slotsPerServer + m] = MAX_COST;
+ tertiaryCost[i][k * slotsPerServer + m] = MAX_COST;
+ }
+ } else {
+ // Same rack, do not place secondary or tertiary here if possible.
+ for (int m = 0; m < slotsPerServer; m++) {
+ secondaryCost[i][k * slotsPerServer + m] = AVOID_COST;
+ tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST;
+ }
+ }
+ }
+ }
+ if (munkresForSecondaryAndTertiary) {
+ randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots);
+ secondaryCost = randomizedMatrix.transform(secondaryCost);
+ int[] secondaryAssignment = new MunkresAssignment(secondaryCost).solve();
+ secondaryAssignment = randomizedMatrix.invertIndices(secondaryAssignment);
+
+ // Modify the tertiary costs for each region/server pair to ensure that a
+ // region is assigned to a tertiary server on the same rack as its secondary
+ // server, but not the same server in that rack.
+ for (int i = 0; i < numRegions; i++) {
+ int slot = secondaryAssignment[i];
+ String rack = rackManager.getRack(servers.get(slot / slotsPerServer));
+ for (int k = 0; k < servers.size(); k++) {
+ if (k == slot / slotsPerServer) {
+ // Same node, do not place tertiary here ever.
+ for (int m = 0; m < slotsPerServer; m++) {
+ tertiaryCost[i][k * slotsPerServer + m] = MAX_COST;
+ }
+ } else {
+ if (rackManager.getRack(servers.get(k)).equals(rack)) {
+ continue;
+ }
+ // Different rack, do not place tertiary here if possible.
+ for (int m = 0; m < slotsPerServer; m++) {
+ tertiaryCost[i][k * slotsPerServer + m] = AVOID_COST;
+ }
+ }
+ }
+ }
+
+ randomizedMatrix = new RandomizedMatrix(numRegions, regionSlots);
+ tertiaryCost = randomizedMatrix.transform(tertiaryCost);
+ int[] tertiaryAssignment = new MunkresAssignment(tertiaryCost).solve();
+ tertiaryAssignment = randomizedMatrix.invertIndices(tertiaryAssignment);
+
+ for (int i = 0; i < numRegions; i++) {
+ List