From 6a5f1335e835ccc464e55189da11ccd55ea2336a Mon Sep 17 00:00:00 2001 From: Elliott Clark Date: Thu, 2 Apr 2015 15:01:35 -0700 Subject: [PATCH] HBASE-6143 Make region assignment smarter when regions are re-enabled. Summary: Move to just two different load balancer methods. This should reduce region movement when servers fail and when tables are re-enabled. Test Plan: mvn test Differential Revision: https://reviews.facebook.net/D36267 --- .../hadoop/hbase/master/AssignmentManager.java | 53 +- .../hadoop/hbase/master/GeneralBulkAssigner.java | 34 +- .../org/apache/hadoop/hbase/master/HMaster.java | 24 +- .../apache/hadoop/hbase/master/LoadBalancer.java | 53 +- .../org/apache/hadoop/hbase/master/RegionPlan.java | 6 +- .../apache/hadoop/hbase/master/RegionStates.java | 18 +- .../hbase/master/balancer/BaseLoadBalancer.java | 459 ++-------- .../master/balancer/FavoredNodeLoadBalancer.java | 197 ----- .../hbase/master/balancer/SimpleLoadBalancer.java | 11 +- .../hbase/master/handler/EnableTableHandler.java | 34 +- .../org/apache/hadoop/hbase/TestZooKeeper.java | 15 +- .../master/TestAssignmentManagerOnCluster.java | 75 +- .../hadoop/hbase/master/TestRegionPlacement2.java | 392 ++++----- .../hadoop/hbase/master/TestRegionStates.java | 9 +- .../master/balancer/TestBaseLoadBalancer.java | 970 ++++++++++----------- 15 files changed, 864 insertions(+), 1486 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 4a1e71f..596e261 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -19,18 +19,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Random; -import java.util.Set; -import java.util.TreeMap; +import java.util.*; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; @@ -1198,8 +1187,14 @@ public class AssignmentManager { || existingPlan.getDestination() == null || !destServers.contains(existingPlan.getDestination())) { newPlan = true; - randomPlan = new RegionPlan(region, null, - balancer.randomAssignment(region, destServers)); + List plans = balancer.fastBalance(regionStates.getRegionAssignmentsByServer(), + Arrays.asList(region), null); + if (plans == null || plans.size() == 0) { + randomPlan = new RegionPlan(region, null, null); + } else { + randomPlan = plans.get(0); + } + if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) { List regions = new ArrayList(1); regions.add(region); @@ -1398,9 +1393,9 @@ public class AssignmentManager { } // Reuse existing assignment info - Map> bulkPlan = - balancer.retainAssignment(regions, servers); - if (bulkPlan == null) { + List bulkPlan = + balancer.fastBalance(regionStates.getRegionAssignmentsByServer(), regions.keySet(), regions); + if (bulkPlan == null || bulkPlan.size() == 0) { throw new IOException("Unable to determine a plan to assign region(s)"); } @@ -1428,9 +1423,10 @@ public class AssignmentManager { } // Generate a round-robin bulk assignment plan - Map> bulkPlan - = balancer.roundRobinAssignment(regions, servers); - if (bulkPlan == null) { + List bulkPlan = balancer.fastBalance(regionStates.getRegionAssignmentsByServer(), + regions, + null /* No suggestions as this is random */); + if (bulkPlan == null || bulkPlan.size() == 0) { throw new IOException("Unable to determine a plan to assign region(s)"); } @@ -1440,7 +1436,7 @@ public class AssignmentManager { } private void assign(int regions, int totalServers, - String message, Map> bulkPlan) + String message, List bulkPlan) throws InterruptedException, IOException { int servers = bulkPlan.size(); @@ -1453,22 +1449,19 @@ public class AssignmentManager { LOG.trace("Not using bulk assignment since we are assigning only " + regions + " region(s) to " + servers + " server(s)"); } - for (Map.Entry> plan: bulkPlan.entrySet()) { - if (!assign(plan.getKey(), plan.getValue()) && !server.isStopped()) { - for (HRegionInfo region: plan.getValue()) { - if (!regionStates.isRegionOnline(region)) { - invokeAssign(region); + for(RegionPlan plan:bulkPlan) { + if (!regionStates.isRegionOnline(plan.getRegionInfo())) { + invokeAssign(plan.getRegionInfo()); } - } - } + + } } else { LOG.info("Bulk assigning " + regions + " region(s) across " + totalServers + " server(s), " + message); // Use fixed count thread pool assigning. - BulkAssigner ba = new GeneralBulkAssigner( - this.server, bulkPlan, this, bulkAssignWaitTillAllAssigned); + BulkAssigner ba = new GeneralBulkAssigner(this.server, bulkPlan, this, bulkAssignWaitTillAllAssigned); ba.bulkAssign(); LOG.info("Bulk assigning done"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java index 356f4af..affefff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/GeneralBulkAssigner.java @@ -18,12 +18,7 @@ package org.apache.hadoop.hbase.master; import java.lang.Thread.UncaughtExceptionHandler; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -36,6 +31,9 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.regionserver.HRegion; + +import javax.swing.plaf.synth.Region; /** * Run bulk assign. Does one RCP per regionserver passing a @@ -49,17 +47,29 @@ public class GeneralBulkAssigner extends BulkAssigner { = new ConcurrentHashMap>(); private ExecutorService pool; - final Map> bulkPlan; + final List bulkPlan; final AssignmentManager assignmentManager; final boolean waitTillAllAssigned; + private final Map> planMap; public GeneralBulkAssigner(final Server server, - final Map> bulkPlan, + final List bulkPlan, final AssignmentManager am, final boolean waitTillAllAssigned) { super(server); this.bulkPlan = bulkPlan; this.assignmentManager = am; this.waitTillAllAssigned = waitTillAllAssigned; + + this.planMap = new HashMap>(); + for (RegionPlan plan:bulkPlan) { + if (planMap.containsKey(plan.getDestination())) { + planMap.get(plan.getDestination()).add(plan.getRegionInfo()); + } else { + List l = new ArrayList(2); + l.add(plan.getRegionInfo()); + planMap.put(plan.getDestination(), l); + } + } } @Override @@ -70,7 +80,7 @@ public class GeneralBulkAssigner extends BulkAssigner { @Override protected void populatePool(ExecutorService pool) { this.pool = pool; // shut it down later in case some assigner hangs - for (Map.Entry> e: this.bulkPlan.entrySet()) { + for (Map.Entry> e: planMap.entrySet()) { pool.execute(new SingleServerBulkAssigner(e.getKey(), e.getValue(), this.assignmentManager, this.failedPlans)); } @@ -85,8 +95,8 @@ public class GeneralBulkAssigner extends BulkAssigner { protected boolean waitUntilDone(final long timeout) throws InterruptedException { Set regionSet = new HashSet(); - for (List regionList : bulkPlan.values()) { - regionSet.addAll(regionList); + for (RegionPlan rp : bulkPlan) { + regionSet.add(rp.getRegionInfo()); } pool.shutdown(); // no more task allowed @@ -164,7 +174,7 @@ public class GeneralBulkAssigner extends BulkAssigner { long perRegionOpenTimeGuesstimate = conf.getLong("hbase.bulk.assignment.perregion.open.time", 1000); int maxRegionsPerServer = 1; - for (List regionList : bulkPlan.values()) { + for (List regionList : this.planMap.values()) { int size = regionList.size(); if (size > maxRegionsPerServer) { maxRegionsPerServer = size; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index cc7c2a1..6188fe2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1142,16 +1142,15 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } } - Map>> assignmentsByTable = - this.assignmentManager.getRegionStates().getAssignmentsByTable(); + Map> assignments = + this.assignmentManager.getRegionStates().getRegionAssignmentsByServer(); List plans = new ArrayList(); //Give the balancer the current cluster state. this.balancer.setClusterStatus(getClusterStatus()); - for (Map> assignments : assignmentsByTable.values()) { - List partialPlans = this.balancer.balanceCluster(assignments); - if (partialPlans != null) plans.addAll(partialPlans); - } + + List partialPlans = this.balancer.balanceCluster(assignments); + long cutoffTime = System.currentTimeMillis() + maximumBalanceTime; int rpCount = 0; // number of RegionPlans balanced so far long totalRegPlanExecTime = 0; @@ -1227,13 +1226,18 @@ public class HMaster extends HRegionServer implements MasterServices, Server { if (destServerName == null || destServerName.length == 0) { LOG.info("Passed destination servername is null/empty so " + "choosing a server at random"); - final List destServers = this.serverManager.createDestinationServersList( - regionState.getServerName()); - dest = balancer.randomAssignment(hri, destServers); - if (dest == null) { + List plans = balancer.fastBalance( + assignmentManager.getRegionStates().getRegionAssignmentsByServer(), + Arrays.asList(hri), null); + + + // Make sure there is a place to put this region + if (plans == null || plans.size() == 0 || plans.get(0).getDestination() == null) { LOG.debug("Unable to determine a plan to assign " + hri); return; } + + dest = plans.get(0).getDestination(); } else { dest = ServerName.valueOf(Bytes.toString(destServerName)); if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index c4eecfa..a17d542 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -19,18 +19,16 @@ package org.apache.hadoop.hbase.master; import javax.annotation.Nullable; +import java.util.Collection; import java.util.List; import java.util.Map; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ClusterStatus; -import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.regionserver.HRegion; /** * Makes decisions about the placement and movement of Regions across @@ -72,49 +70,10 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse List balanceCluster(Map> clusterState) throws HBaseIOException; - /** - * Perform a Round Robin assignment of regions. - * @param regions - * @param servers - * @return Map of servername to regioninfos - */ - Map> roundRobinAssignment( - List regions, - List servers - ) throws HBaseIOException; - /** - * Assign regions to the previously hosting region server - * @param regions - * @param servers - * @return List of plans - */ - @Nullable - Map> retainAssignment( - Map regions, - List servers - ) throws HBaseIOException; - - /** - * Sync assign a region - * @param regions - * @param servers - * @return Map regioninfos to servernames - */ - Map immediateAssignment( - List regions, - List servers - ) throws HBaseIOException; - - /** - * Get a random region server from the list - * @param regionInfo Region for which this selection is being done. - * @param servers - * @return Servername - */ - ServerName randomAssignment( - HRegionInfo regionInfo, List servers - ) throws HBaseIOException; + List fastBalance(Map> clusterState, + Collection regions, + Map suggest) throws HBaseIOException; /** * Initialize the load balancer. Must be called after setters. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java index cd6b313..4b19081 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlan.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master; import java.io.Serializable; import java.util.Comparator; +import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.HRegionInfo; @@ -39,7 +40,7 @@ import org.apache.hadoop.hbase.ServerName; @InterfaceStability.Evolving public class RegionPlan implements Comparable { private final HRegionInfo hri; - private final ServerName source; + private ServerName source; private ServerName dest; public static class RegionPlanComparator implements Comparator, Serializable { @@ -79,6 +80,9 @@ public class RegionPlan implements Comparable { this.dest = dest; } + public void setSource(ServerName source) { + this.source = source; + } /** * Get the source server for the plan for this region. * @return server info for source diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index d1fffbe..be8162d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -1006,12 +1006,24 @@ public class RegionStates { * Returns a clone of region assignments per server * @return a Map of ServerName to a List of HRegionInfo's */ - protected synchronized Map> getRegionAssignmentsByServer() { + public Map> getRegionAssignmentsByServer() { Map> regionsByServer = new HashMap>(serverHoldings.size()); - for (Map.Entry> e: serverHoldings.entrySet()) { - regionsByServer.put(e.getKey(), new ArrayList(e.getValue())); + synchronized (this) { + for (Map.Entry> e : serverHoldings.entrySet()) { + regionsByServer.put(e.getKey(), new ArrayList(e.getValue())); + } } + Map + onlineSvrs = serverManager.getOnlineServers(); + for (ServerName svr : onlineSvrs.keySet()) { + if (!regionsByServer.containsKey(svr)) { + regionsByServer.put(svr, new ArrayList()); + } + } + + regionsByServer.keySet().removeAll(serverManager.getDrainingServersList()); + return regionsByServer; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index f527931..c9322f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -17,22 +17,8 @@ */ package org.apache.hadoop.hbase.master.balancer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.Deque; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.Map.Entry; -import java.util.NavigableMap; -import java.util.Random; -import java.util.Set; -import java.util.TreeMap; import org.apache.commons.lang.NotImplementedException; import org.apache.commons.logging.Log; @@ -45,20 +31,11 @@ import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.conf.ConfigurationObserver; -import org.apache.hadoop.hbase.master.LoadBalancer; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.RackManager; -import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.*; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type; import org.apache.hadoop.hbase.security.access.AccessControlLists; import org.apache.hadoop.util.StringUtils; -import com.google.common.base.Joiner; -import com.google.common.collect.ArrayListMultimap; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; - /** * The base class for load balancers. It provides the the functions used to by * {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions @@ -912,25 +889,84 @@ public abstract class BaseLoadBalancer implements LoadBalancer { return plans; } - /** - * Assign the regions that should be on master regionserver. - */ - protected Map> assignMasterRegions( - Collection regions, List servers) { - if (servers == null || regions == null || regions.isEmpty()) { - return null; + + + @Override + public List fastBalance(Map> clusterState, + Collection regions, + Map suggest) + throws HBaseIOException { + + Map noTimeToCurrent = new HashMap(clusterState.size()); + Set unAssignedRegionsWithoutPlan = new HashSet(regions); + List resultPlans = new ArrayList(regions.size()); + + + for (ServerName sn:clusterState.keySet()) { + noTimeToCurrent.put(ServerName.valueOf(sn.getHostname(), + sn.getPort(), + ServerName.NON_STARTCODE), sn); } - Map> assignments - = new TreeMap>(); - if (masterServerName != null && servers.contains(masterServerName)) { - assignments.put(masterServerName, new ArrayList()); - for (HRegionInfo region: regions) { - if (shouldBeOnMaster(region)) { - assignments.get(masterServerName).add(region); + + ServerName[] servers = clusterState.keySet().toArray(new ServerName[clusterState.size()]); + Map initialLocation = new HashMap(regions.size()); + + for (HRegionInfo regionInfo : regions) { + ServerName sn = null; + if (suggest == null || !suggest.containsKey(regionInfo)) { + sn = getRandomServerName(servers); + } else { + ServerName old = suggest.get(regionInfo); + sn = noTimeToCurrent.get(ServerName.valueOf(old.getHostname(), old.getPort(), ServerName.NON_STARTCODE)); + } + + // It's possible that the suggestion is bad. + // Protect ourselves against that. + if (!clusterState.containsKey(sn)) { + sn = getRandomServerName(servers); + } + + clusterState.get(sn).add(regionInfo); + initialLocation.put(regionInfo, sn); + } + + if (clusterState.size() <= 1) { + return balanceMasterRegions(clusterState); + } + + List plans = balanceCluster(clusterState); + + if (plans != null) { + for (RegionPlan p : plans) { + if (unAssignedRegionsWithoutPlan.contains(p.getRegionInfo())) { + // There's already a plan for this region. + unAssignedRegionsWithoutPlan.remove(p.getRegionInfo()); + // Make sure that anything that's not already + // assigned doesn't have a source. + p.setSource(null); + resultPlans.add(p); } } + } - return assignments; + + + for (HRegionInfo needsPlan : unAssignedRegionsWithoutPlan) { + resultPlans.add(new RegionPlan(needsPlan, null, initialLocation.get(needsPlan))); + } + + if (resultPlans.size() == 0) { + return null; + } + + return resultPlans; + } + + private ServerName getRandomServerName(ServerName[] servers) { + ServerName sn; + int index = RANDOM.nextInt(servers.length); + sn = servers[index]; + return sn; } @Override @@ -996,107 +1032,6 @@ public abstract class BaseLoadBalancer implements LoadBalancer { return false; } - /** - * Generates a bulk assignment plan to be used on cluster startup using a - * simple round-robin assignment. - *

- * Takes a list of all the regions and all the servers in the cluster and - * returns a map of each server to the regions that it should be assigned. - *

- * Currently implemented as a round-robin assignment. Same invariant as load - * balancing, all servers holding floor(avg) or ceiling(avg). - * - * TODO: Use block locations from HDFS to place regions with their blocks - * - * @param regions all regions - * @param servers all servers - * @return map of server to the regions it should take, or null if no - * assignment is possible (ie. no regions or no servers) - */ - @Override - public Map> roundRobinAssignment(List regions, - List servers) { - metricsBalancer.incrMiscInvocations(); - Map> assignments = assignMasterRegions(regions, servers); - if (assignments != null && !assignments.isEmpty()) { - servers = new ArrayList(servers); - // Guarantee not to put other regions on master - servers.remove(masterServerName); - List masterRegions = assignments.get(masterServerName); - if (!masterRegions.isEmpty()) { - regions = new ArrayList(regions); - for (HRegionInfo region: masterRegions) { - regions.remove(region); - } - } - } - if (regions == null || regions.isEmpty()) { - return assignments; - } - - int numServers = servers == null ? 0 : servers.size(); - if (numServers == 0) { - LOG.warn("Wanted to do round robin assignment but no servers to assign to"); - return null; - } - - // TODO: instead of retainAssignment() and roundRobinAssignment(), we should just run the - // normal LB.balancerCluster() with unassignedRegions. We only need to have a candidate - // generator for AssignRegionAction. The LB will ensure the regions are mostly local - // and balanced. This should also run fast with fewer number of iterations. - - if (numServers == 1) { // Only one server, nothing fancy we can do here - ServerName server = servers.get(0); - assignments.put(server, new ArrayList(regions)); - return assignments; - } - - Cluster cluster = createCluster(servers, regions); - List unassignedRegions = new ArrayList(); - - roundRobinAssignment(cluster, regions, unassignedRegions, - servers, assignments); - - List lastFewRegions = new ArrayList(); - // assign the remaining by going through the list and try to assign to servers one-by-one - int serverIdx = RANDOM.nextInt(numServers); - for (HRegionInfo region : unassignedRegions) { - boolean assigned = false; - for (int j = 0; j < numServers; j++) { // try all servers one by one - ServerName serverName = servers.get((j + serverIdx) % numServers); - if (!cluster.wouldLowerAvailability(region, serverName)) { - List serverRegions = assignments.get(serverName); - if (serverRegions == null) { - serverRegions = new ArrayList(); - assignments.put(serverName, serverRegions); - } - serverRegions.add(region); - cluster.doAssignRegion(region, serverName); - serverIdx = (j + serverIdx + 1) % numServers; //remain from next server - assigned = true; - break; - } - } - if (!assigned) { - lastFewRegions.add(region); - } - } - // just sprinkle the rest of the regions on random regionservers. The balanceCluster will - // make it optimal later. we can end up with this if numReplicas > numServers. - for (HRegionInfo region : lastFewRegions) { - int i = RANDOM.nextInt(numServers); - ServerName server = servers.get(i); - List serverRegions = assignments.get(server); - if (serverRegions == null) { - serverRegions = new ArrayList(); - assignments.put(server, serverRegions); - } - serverRegions.add(region); - cluster.doAssignRegion(region, server); - } - return assignments; - } - protected Cluster createCluster(List servers, Collection regions) { // Get the snapshot of the current assignments for the regions in question, and then create @@ -1114,197 +1049,6 @@ public abstract class BaseLoadBalancer implements LoadBalancer { rackManager); } - /** - * Generates an immediate assignment plan to be used by a new master for - * regions in transition that do not have an already known destination. - * - * Takes a list of regions that need immediate assignment and a list of all - * available servers. Returns a map of regions to the server they should be - * assigned to. - * - * This method will return quickly and does not do any intelligent balancing. - * The goal is to make a fast decision not the best decision possible. - * - * Currently this is random. - * - * @param regions - * @param servers - * @return map of regions to the server it should be assigned to - */ - @Override - public Map immediateAssignment(List regions, - List servers) { - metricsBalancer.incrMiscInvocations(); - if (servers == null || servers.isEmpty()) { - LOG.warn("Wanted to do random assignment but no servers to assign to"); - return null; - } - - Map assignments = new TreeMap(); - for (HRegionInfo region : regions) { - assignments.put(region, randomAssignment(region, servers)); - } - return assignments; - } - - /** - * Used to assign a single region to a random server. - */ - @Override - public ServerName randomAssignment(HRegionInfo regionInfo, List servers) { - metricsBalancer.incrMiscInvocations(); - if (servers != null && servers.contains(masterServerName)) { - if (shouldBeOnMaster(regionInfo)) { - return masterServerName; - } - servers = new ArrayList(servers); - // Guarantee not to put other regions on master - servers.remove(masterServerName); - } - - int numServers = servers == null ? 0 : servers.size(); - if (numServers == 0) { - LOG.warn("Wanted to do retain assignment but no servers to assign to"); - return null; - } - if (numServers == 1) { // Only one server, nothing fancy we can do here - return servers.get(0); - } - - List regions = Lists.newArrayList(regionInfo); - Cluster cluster = createCluster(servers, regions); - return randomAssignment(cluster, regionInfo, servers); - } - - /** - * Generates a bulk assignment startup plan, attempting to reuse the existing - * assignment information from META, but adjusting for the specified list of - * available/online servers available for assignment. - *

- * Takes a map of all regions to their existing assignment from META. Also - * takes a list of online servers for regions to be assigned to. Attempts to - * retain all assignment, so in some instances initial assignment will not be - * completely balanced. - *

- * Any leftover regions without an existing server to be assigned to will be - * assigned randomly to available servers. - * - * @param regions regions and existing assignment from meta - * @param servers available servers - * @return map of servers and regions to be assigned to them - */ - @Override - public Map> retainAssignment(Map regions, - List servers) { - // Update metrics - metricsBalancer.incrMiscInvocations(); - Map> assignments - = assignMasterRegions(regions.keySet(), servers); - if (assignments != null && !assignments.isEmpty()) { - servers = new ArrayList(servers); - // Guarantee not to put other regions on master - servers.remove(masterServerName); - List masterRegions = assignments.get(masterServerName); - if (!masterRegions.isEmpty()) { - regions = new HashMap(regions); - for (HRegionInfo region: masterRegions) { - regions.remove(region); - } - } - } - if (regions == null || regions.isEmpty()) { - return assignments; - } - - int numServers = servers == null ? 0 : servers.size(); - if (numServers == 0) { - LOG.warn("Wanted to do retain assignment but no servers to assign to"); - return null; - } - if (numServers == 1) { // Only one server, nothing fancy we can do here - ServerName server = servers.get(0); - assignments.put(server, new ArrayList(regions.keySet())); - return assignments; - } - - // Group all of the old assignments by their hostname. - // We can't group directly by ServerName since the servers all have - // new start-codes. - - // Group the servers by their hostname. It's possible we have multiple - // servers on the same host on different ports. - ArrayListMultimap serversByHostname = ArrayListMultimap.create(); - for (ServerName server : servers) { - assignments.put(server, new ArrayList()); - serversByHostname.put(server.getHostname(), server); - } - - // Collection of the hostnames that used to have regions - // assigned, but for which we no longer have any RS running - // after the cluster restart. - Set oldHostsNoLongerPresent = Sets.newTreeSet(); - - int numRandomAssignments = 0; - int numRetainedAssigments = 0; - - Cluster cluster = createCluster(servers, regions.keySet()); - - for (Map.Entry entry : regions.entrySet()) { - HRegionInfo region = entry.getKey(); - ServerName oldServerName = entry.getValue(); - List localServers = new ArrayList(); - if (oldServerName != null) { - localServers = serversByHostname.get(oldServerName.getHostname()); - } - if (localServers.isEmpty()) { - // No servers on the new cluster match up with this hostname, - // assign randomly. - ServerName randomServer = randomAssignment(cluster, region, servers); - assignments.get(randomServer).add(region); - numRandomAssignments++; - if (oldServerName != null) oldHostsNoLongerPresent.add(oldServerName.getHostname()); - } else if (localServers.size() == 1) { - // the usual case - one new server on same host - ServerName target = localServers.get(0); - assignments.get(target).add(region); - cluster.doAssignRegion(region, target); - numRetainedAssigments++; - } else { - // multiple new servers in the cluster on this same host - if (localServers.contains(oldServerName)) { - assignments.get(oldServerName).add(region); - cluster.doAssignRegion(region, oldServerName); - } else { - ServerName target = null; - for (ServerName tmp: localServers) { - if (tmp.getPort() == oldServerName.getPort()) { - target = tmp; - break; - } - } - if (target == null) { - target = randomAssignment(cluster, region, localServers); - } - assignments.get(target).add(region); - } - numRetainedAssigments++; - } - } - - String randomAssignMsg = ""; - if (numRandomAssignments > 0) { - randomAssignMsg = - numRandomAssignments + " regions were assigned " - + "to random hosts, since the old hosts for these regions are no " - + "longer present in the cluster. These hosts were:\n " - + Joiner.on("\n ").join(oldHostsNoLongerPresent); - } - - LOG.info("Reassigned " + regions.size() + " regions. " + numRetainedAssigments - + " retained the pre-restart assignment. " + randomAssignMsg); - return assignments; - } - @Override public void initialize() throws HBaseIOException{ } @@ -1328,57 +1072,6 @@ public abstract class BaseLoadBalancer implements LoadBalancer { stopped = true; } - /** - * Used to assign a single region to a random server. - */ - private ServerName randomAssignment(Cluster cluster, HRegionInfo regionInfo, - List servers) { - int numServers = servers.size(); // servers is not null, numServers > 1 - ServerName sn = null; - final int maxIterations = numServers * 4; - int iterations = 0; - - do { - int i = RANDOM.nextInt(numServers); - sn = servers.get(i); - } while (cluster.wouldLowerAvailability(regionInfo, sn) - && iterations++ < maxIterations); - cluster.doAssignRegion(regionInfo, sn); - return sn; - } - - /** - * Round robin a list of regions to a list of servers - */ - private void roundRobinAssignment(Cluster cluster, List regions, - List unassignedRegions, List servers, - Map> assignments) { - - int numServers = servers.size(); - int numRegions = regions.size(); - int max = (int) Math.ceil((float) numRegions / numServers); - int serverIdx = 0; - if (numServers > 1) { - serverIdx = RANDOM.nextInt(numServers); - } - int regionIdx = 0; - - for (int j = 0; j < numServers; j++) { - ServerName server = servers.get((j + serverIdx) % numServers); - List serverRegions = new ArrayList(max); - for (int i = regionIdx; i < numRegions; i += numServers) { - HRegionInfo region = regions.get(i % numRegions); - if (cluster.wouldLowerAvailability(region, server)) { - unassignedRegions.add(region); - } else { - serverRegions.add(region); - cluster.doAssignRegion(region, server); - } - } - assignments.put(server, serverRegions); - regionIdx++; - } - } protected Map> getRegionAssignmentsByServer( Collection regions) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java index 6db82a5..c08afea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java @@ -145,204 +145,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer { return plans; } - @Override - public Map> roundRobinAssignment(List regions, - List servers) { - Map> assignmentMap; - try { - FavoredNodeAssignmentHelper assignmentHelper = - new FavoredNodeAssignmentHelper(servers, rackManager); - assignmentHelper.initialize(); - if (!assignmentHelper.canPlaceFavoredNodes()) { - return super.roundRobinAssignment(regions, servers); - } - // Segregate the regions into two types: - // 1. The regions that have favored node assignment, and where at least - // one of the favored node is still alive. In this case, try to adhere - // to the current favored nodes assignment as much as possible - i.e., - // if the current primary is gone, then make the secondary or tertiary - // as the new host for the region (based on their current load). - // Note that we don't change the favored - // node assignments here (even though one or more favored node is currently - // down). It is up to the balanceCluster to do this hard work. The HDFS - // can handle the fact that some nodes in the favored nodes hint is down - // It'd allocate some other DNs. In combination with stale settings for HDFS, - // we should be just fine. - // 2. The regions that currently don't have favored node assignment. We will - // need to come up with favored nodes assignments for them. The corner case - // in (1) above is that all the nodes are unavailable and in that case, we - // will note that this region doesn't have favored nodes. - Pair>, List> segregatedRegions = - segregateRegionsAndAssignRegionsWithFavoredNodes(regions, servers); - Map> regionsWithFavoredNodesMap = segregatedRegions.getFirst(); - List regionsWithNoFavoredNodes = segregatedRegions.getSecond(); - assignmentMap = new HashMap>(); - roundRobinAssignmentImpl(assignmentHelper, assignmentMap, regionsWithNoFavoredNodes, - servers); - // merge the assignment maps - assignmentMap.putAll(regionsWithFavoredNodesMap); - } catch (Exception ex) { - LOG.warn("Encountered exception while doing favored-nodes assignment " + ex + - " Falling back to regular assignment"); - assignmentMap = super.roundRobinAssignment(regions, servers); - } - return assignmentMap; - } - - @Override - public ServerName randomAssignment(HRegionInfo regionInfo, List servers) { - try { - FavoredNodeAssignmentHelper assignmentHelper = - new FavoredNodeAssignmentHelper(servers, rackManager); - assignmentHelper.initialize(); - ServerName primary = super.randomAssignment(regionInfo, servers); - if (!assignmentHelper.canPlaceFavoredNodes()) { - return primary; - } - List favoredNodes = globalFavoredNodesAssignmentPlan.getFavoredNodes(regionInfo); - // check if we have a favored nodes mapping for this region and if so, return - // a server from the favored nodes list if the passed 'servers' contains this - // server as well (available servers, that is) - if (favoredNodes != null) { - for (ServerName s : favoredNodes) { - ServerName serverWithLegitStartCode = availableServersContains(servers, s); - if (serverWithLegitStartCode != null) { - return serverWithLegitStartCode; - } - } - } - List regions = new ArrayList(1); - regions.add(regionInfo); - Map primaryRSMap = new HashMap(1); - primaryRSMap.put(regionInfo, primary); - assignSecondaryAndTertiaryNodesForRegion(assignmentHelper, regions, primaryRSMap); - return primary; - } catch (Exception ex) { - LOG.warn("Encountered exception while doing favored-nodes (random)assignment " + ex + - " Falling back to regular assignment"); - return super.randomAssignment(regionInfo, servers); - } - } - - private Pair>, List> - segregateRegionsAndAssignRegionsWithFavoredNodes(List regions, - List availableServers) { - Map> assignmentMapForFavoredNodes = - new HashMap>(regions.size() / 2); - List regionsWithNoFavoredNodes = new ArrayList(regions.size()/2); - for (HRegionInfo region : regions) { - List favoredNodes = globalFavoredNodesAssignmentPlan.getFavoredNodes(region); - ServerName primaryHost = null; - ServerName secondaryHost = null; - ServerName tertiaryHost = null; - if (favoredNodes != null) { - for (ServerName s : favoredNodes) { - ServerName serverWithLegitStartCode = availableServersContains(availableServers, s); - if (serverWithLegitStartCode != null) { - FavoredNodesPlan.Position position = - FavoredNodesPlan.getFavoredServerPosition(favoredNodes, s); - if (Position.PRIMARY.equals(position)) { - primaryHost = serverWithLegitStartCode; - } else if (Position.SECONDARY.equals(position)) { - secondaryHost = serverWithLegitStartCode; - } else if (Position.TERTIARY.equals(position)) { - tertiaryHost = serverWithLegitStartCode; - } - } - } - assignRegionToAvailableFavoredNode(assignmentMapForFavoredNodes, region, - primaryHost, secondaryHost, tertiaryHost); - } - if (primaryHost == null && secondaryHost == null && tertiaryHost == null) { - //all favored nodes unavailable - regionsWithNoFavoredNodes.add(region); - } - } - return new Pair>, List>( - assignmentMapForFavoredNodes, regionsWithNoFavoredNodes); - } - - // Do a check of the hostname and port and return the servername from the servers list - // that matched (the favoredNode will have a startcode of -1 but we want the real - // server with the legit startcode - private ServerName availableServersContains(List servers, ServerName favoredNode) { - for (ServerName server : servers) { - if (ServerName.isSameHostnameAndPort(favoredNode, server)) { - return server; - } - } - return null; - } - - private void assignRegionToAvailableFavoredNode(Map> assignmentMapForFavoredNodes, HRegionInfo region, ServerName primaryHost, - ServerName secondaryHost, ServerName tertiaryHost) { - if (primaryHost != null) { - addRegionToMap(assignmentMapForFavoredNodes, region, primaryHost); - } else if (secondaryHost != null && tertiaryHost != null) { - // assign the region to the one with a lower load - // (both have the desired hdfs blocks) - ServerName s; - ServerLoad tertiaryLoad = super.services.getServerManager().getLoad(tertiaryHost); - ServerLoad secondaryLoad = super.services.getServerManager().getLoad(secondaryHost); - if (secondaryLoad.getLoad() < tertiaryLoad.getLoad()) { - s = secondaryHost; - } else { - s = tertiaryHost; - } - addRegionToMap(assignmentMapForFavoredNodes, region, s); - } else if (secondaryHost != null) { - addRegionToMap(assignmentMapForFavoredNodes, region, secondaryHost); - } else if (tertiaryHost != null) { - addRegionToMap(assignmentMapForFavoredNodes, region, tertiaryHost); - } - } - - private void addRegionToMap(Map> assignmentMapForFavoredNodes, - HRegionInfo region, ServerName host) { - List regionsOnServer = null; - if ((regionsOnServer = assignmentMapForFavoredNodes.get(host)) == null) { - regionsOnServer = new ArrayList(); - assignmentMapForFavoredNodes.put(host, regionsOnServer); - } - regionsOnServer.add(region); - } - public List getFavoredNodes(HRegionInfo regionInfo) { return this.globalFavoredNodesAssignmentPlan.getFavoredNodes(regionInfo); } - - private void roundRobinAssignmentImpl(FavoredNodeAssignmentHelper assignmentHelper, - Map> assignmentMap, - List regions, List servers) { - Map primaryRSMap = new HashMap(); - // figure the primary RSs - assignmentHelper.placePrimaryRSAsRoundRobin(assignmentMap, primaryRSMap, regions); - assignSecondaryAndTertiaryNodesForRegion(assignmentHelper, regions, primaryRSMap); - } - - private void assignSecondaryAndTertiaryNodesForRegion( - FavoredNodeAssignmentHelper assignmentHelper, - List regions, Map primaryRSMap) { - // figure the secondary and tertiary RSs - Map secondaryAndTertiaryRSMap = - assignmentHelper.placeSecondaryAndTertiaryRS(primaryRSMap); - // now record all the assignments so that we can serve queries later - for (HRegionInfo region : regions) { - // Store the favored nodes without startCode for the ServerName objects - // We don't care about the startcode; but only the hostname really - List favoredNodesForRegion = new ArrayList(3); - ServerName sn = primaryRSMap.get(region); - favoredNodesForRegion.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), - ServerName.NON_STARTCODE)); - ServerName[] secondaryAndTertiaryNodes = secondaryAndTertiaryRSMap.get(region); - if (secondaryAndTertiaryNodes != null) { - favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[0].getHostname(), - secondaryAndTertiaryNodes[0].getPort(), ServerName.NON_STARTCODE)); - favoredNodesForRegion.add(ServerName.valueOf(secondaryAndTertiaryNodes[1].getHostname(), - secondaryAndTertiaryNodes[1].getPort(), ServerName.NON_STARTCODE)); - } - globalFavoredNodesAssignmentPlan.updateFavoredNodesMap(region, favoredNodesForRegion); - } - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java index 9673acf..2139fd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java @@ -17,15 +17,7 @@ */ package org.apache.hadoop.hbase.master.balancer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Random; -import java.util.TreeMap; +import java.util.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -422,6 +414,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer { return regionsToReturn; } + /** * Add a region from the head or tail to the List of regions to return. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java index c7145fd..703d240 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java @@ -37,15 +37,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.master.BulkAssigner; -import org.apache.hadoop.hbase.master.GeneralBulkAssigner; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.MasterCoprocessorHost; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.RegionStates; -import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.master.TableLockManager; +import org.apache.hadoop.hbase.master.*; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -183,26 +175,28 @@ public class EnableTableHandler extends EventHandler { // need to potentially create some regions for the replicas List unrecordedReplicas = AssignmentManager.replicaRegionsNotRecordedInMeta( new HashSet(regionsToAssign.keySet()), services); - Map> srvToUnassignedRegs = - this.assignmentManager.getBalancer().roundRobinAssignment(unrecordedReplicas, - serverManager.getOnlineServersList()); - if (srvToUnassignedRegs != null) { - for (Map.Entry> entry : srvToUnassignedRegs.entrySet()) { - for (HRegionInfo h : entry.getValue()) { - regionsToAssign.put(h, entry.getKey()); - } + List regionPlans = + this.assignmentManager.getBalancer().fastBalance( + assignmentManager.getRegionStates().getRegionAssignmentsByServer(), + unrecordedReplicas, null); + if (regionPlans != null) { + for (RegionPlan plan : regionPlans) { + regionsToAssign.put(plan.getRegionInfo(), plan.getDestination()); } } } + int regionsCount = regionsToAssign.size(); if (regionsCount == 0) { done = true; } LOG.info("Table '" + this.tableName + "' has " + countOfRegionsInTable + " regions, of which " + regionsCount + " are offline."); - List onlineServers = serverManager.createDestinationServersList(); - Map> bulkPlan = - this.assignmentManager.getBalancer().retainAssignment(regionsToAssign, onlineServers); + List bulkPlan = + this.assignmentManager.getBalancer().fastBalance( + assignmentManager.getRegionStates().getRegionAssignmentsByServer(), + regionsToAssign.keySet(), + regionsToAssign); if (bulkPlan != null) { LOG.info("Bulk assigning " + regionsCount + " region(s) across " + bulkPlan.size() + " server(s), retainAssignment=true"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index 23423fa..fd8c9ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -28,6 +28,7 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; +import java.util.Collection; import java.util.List; import java.util.Map; @@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coordination.ZkSplitLogWorkerCoordination; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; @@ -640,12 +642,17 @@ public class TestZooKeeper { static class MockLoadBalancer extends SimpleLoadBalancer { static boolean retainAssignCalled = false; + @Override - public Map> retainAssignment( - Map regions, List servers) { - retainAssignCalled = true; - return super.retainAssignment(regions, servers); + public List fastBalance(Map> clusterState, + Collection regions, + Map suggest) throws HBaseIOException { + + + retainAssignCalled = suggest != null; + return super.fastBalance(clusterState,regions, suggest); } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index eb72220..c25b828 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -26,30 +26,15 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer; -import org.apache.hadoop.hbase.ServerLoad; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.UnknownRegionException; -import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Result; @@ -864,12 +849,14 @@ public class TestAssignmentManagerOnCluster { // Wait till SSH tried to assign regions a several times int counter = MyLoadBalancer.counter.get() + 5; cluster.killRegionServer(serverName); + MyLoadBalancer.failFast.set(true); startAServer = true; cluster.waitForRegionServerToStop(serverName, -1); while (counter > MyLoadBalancer.counter.get()) { - Thread.sleep(1000); + Thread.sleep(500); } cluster.startRegionServer(); + MyLoadBalancer.failFast.set(false); startAServer = false; // Wait till the dead server is processed by SSH TEST_UTIL.waitFor(120000, 1000, new Waiter.Predicate() { @@ -1156,8 +1143,8 @@ public class TestAssignmentManagerOnCluster { th[i].start(); nameList.add(th[i].getName()); } - for (int i = 0; i < th.length; i++) { - th[i].join(); + for (Thread aTh : th) { + aTh.join(); } // Add all the expected table names in meta to tableNameList for (String name : nameList) { @@ -1186,29 +1173,30 @@ public class TestAssignmentManagerOnCluster { static volatile Integer countRegionServers = null; static AtomicInteger counter = new AtomicInteger(0); + static AtomicBoolean failFast = new AtomicBoolean(false); @Override - public ServerName randomAssignment(HRegionInfo regionInfo, - List servers) { - if (regionInfo.getEncodedName().equals(controledRegion)) { + public List fastBalance(Map> clusterState, + Collection regions, + Map suggest) throws + HBaseIOException { + counter.incrementAndGet(); + if (failFast.get()) { return null; } - return super.randomAssignment(regionInfo, servers); - } - - @Override - public Map> roundRobinAssignment( - List regions, List servers) { - if (countRegionServers != null && services != null) { - int regionServers = services.getServerManager().countOfRegionServers(); - if (regionServers < countRegionServers.intValue()) { - // Let's wait till more region servers join in. - // Before that, fail region assignments. - counter.incrementAndGet(); - return null; + List plans = super.fastBalance(clusterState, regions, suggest); + List result = new ArrayList(plans.size()); + for (RegionPlan plan : plans) { + if (!plan.getRegionInfo().getEncodedName().equals(controledRegion)) { + result.add(plan); } } - return super.roundRobinAssignment(regions, servers); + + if (result.size() == 0) { + return null; + } + + return result; } } @@ -1216,22 +1204,23 @@ public class TestAssignmentManagerOnCluster { AtomicBoolean enabled = new AtomicBoolean(true); public MyMaster(Configuration conf, CoordinatedStateManager cp) - throws IOException, KeeperException, + throws IOException, KeeperException, InterruptedException { super(conf, cp); } - @Override - public boolean isServerShutdownHandlerEnabled() { - return enabled.get() && super.isServerShutdownHandlerEnabled(); - } - public void enableSSH(boolean enabled) { this.enabled.set(enabled); if (enabled) { serverManager.processQueuedDeadServers(); } } + + @Override + public boolean isServerShutdownHandlerEnabled() { + return enabled.get() && super.isServerShutdownHandlerEnabled(); + } + } public static class MyRegionServer extends MiniHBaseClusterRegionServer { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java index 3f34bc4..1b1420b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement2.java @@ -1,196 +1,196 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import static org.junit.Assert.*; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer; -import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; -import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan.Position; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({MasterTests.class, MediumTests.class}) -public class TestRegionPlacement2 { - final static Log LOG = LogFactory.getLog(TestRegionPlacement2.class); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final static int SLAVES = 7; - private final static int PRIMARY = Position.PRIMARY.ordinal(); - private final static int SECONDARY = Position.SECONDARY.ordinal(); - private final static int TERTIARY = Position.TERTIARY.ordinal(); - - @BeforeClass - public static void setupBeforeClass() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); - // Enable the favored nodes based load balancer - conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, - FavoredNodeLoadBalancer.class, LoadBalancer.class); - conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); - TEST_UTIL.startMiniCluster(SLAVES); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException { - LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); - balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); - List servers = new ArrayList(); - for (int i = 0; i < SLAVES; i++) { - ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName(); - servers.add(server); - } - List regions = new ArrayList(1); - HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar")); - regions.add(region); - Map> assignmentMap = balancer.roundRobinAssignment(regions, - servers); - Set serverBefore = assignmentMap.keySet(); - List favoredNodesBefore = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); - assertTrue(favoredNodesBefore.size() == 3); - // the primary RS should be the one that the balancer's assignment returns - assertTrue(ServerName.isSameHostnameAndPort(serverBefore.iterator().next(), - favoredNodesBefore.get(PRIMARY))); - // now remove the primary from the list of available servers - List removedServers = removeMatchingServers(serverBefore, servers); - // call roundRobinAssignment with the modified servers list - assignmentMap = balancer.roundRobinAssignment(regions, servers); - List favoredNodesAfter = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); - assertTrue(favoredNodesAfter.size() == 3); - // We don't expect the favored nodes assignments to change in multiple calls - // to the roundRobinAssignment method in the balancer (relevant for AssignmentManager.assign - // failures) - assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore)); - Set serverAfter = assignmentMap.keySet(); - // We expect the new RegionServer assignee to be one of the favored nodes - // chosen earlier. - assertTrue(ServerName.isSameHostnameAndPort(serverAfter.iterator().next(), - favoredNodesBefore.get(SECONDARY)) || - ServerName.isSameHostnameAndPort(serverAfter.iterator().next(), - favoredNodesBefore.get(TERTIARY))); - - // put back the primary in the list of available servers - servers.addAll(removedServers); - // now roundRobinAssignment with the modified servers list should return the primary - // as the regionserver assignee - assignmentMap = balancer.roundRobinAssignment(regions, servers); - Set serverWithPrimary = assignmentMap.keySet(); - assertTrue(serverBefore.containsAll(serverWithPrimary)); - - // Make all the favored nodes unavailable for assignment - removeMatchingServers(favoredNodesAfter, servers); - // call roundRobinAssignment with the modified servers list - assignmentMap = balancer.roundRobinAssignment(regions, servers); - List favoredNodesNow = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); - assertTrue(favoredNodesNow.size() == 3); - assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) && - !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) && - !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY))); - } - - @Test - public void testFavoredNodesPresentForRandomAssignment() throws HBaseIOException { - LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); - balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); - List servers = new ArrayList(); - for (int i = 0; i < SLAVES; i++) { - ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName(); - servers.add(server); - } - List regions = new ArrayList(1); - HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar")); - regions.add(region); - ServerName serverBefore = balancer.randomAssignment(region, servers); - List favoredNodesBefore = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); - assertTrue(favoredNodesBefore.size() == 3); - // the primary RS should be the one that the balancer's assignment returns - assertTrue(ServerName.isSameHostnameAndPort(serverBefore,favoredNodesBefore.get(PRIMARY))); - // now remove the primary from the list of servers - removeMatchingServers(serverBefore, servers); - // call randomAssignment with the modified servers list - ServerName serverAfter = balancer.randomAssignment(region, servers); - List favoredNodesAfter = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); - assertTrue(favoredNodesAfter.size() == 3); - // We don't expect the favored nodes assignments to change in multiple calls - // to the randomAssignment method in the balancer (relevant for AssignmentManager.assign - // failures) - assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore)); - // We expect the new RegionServer assignee to be one of the favored nodes - // chosen earlier. - assertTrue(ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(SECONDARY)) || - ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(TERTIARY))); - // Make all the favored nodes unavailable for assignment - removeMatchingServers(favoredNodesAfter, servers); - // call randomAssignment with the modified servers list - balancer.randomAssignment(region, servers); - List favoredNodesNow = - ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); - assertTrue(favoredNodesNow.size() == 3); - assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) && - !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) && - !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY))); - } - - private List removeMatchingServers(Collection serversWithoutStartCode, - List servers) { - List serversToRemove = new ArrayList(); - for (ServerName s : serversWithoutStartCode) { - serversToRemove.addAll(removeMatchingServers(s, servers)); - } - return serversToRemove; - } - - private List removeMatchingServers(ServerName serverWithoutStartCode, - List servers) { - List serversToRemove = new ArrayList(); - for (ServerName s : servers) { - if (ServerName.isSameHostnameAndPort(s, serverWithoutStartCode)) { - serversToRemove.add(s); - } - } - servers.removeAll(serversToRemove); - return serversToRemove; - } -} +///** +// * Licensed to the Apache Software Foundation (ASF) under one +// * or more contributor license agreements. See the NOTICE file +// * distributed with this work for additional information +// * regarding copyright ownership. The ASF licenses this file +// * to you under the Apache License, Version 2.0 (the +// * "License"); you may not use this file except in compliance +// * with the License. You may obtain a copy of the License at +// * +// * http://www.apache.org/licenses/LICENSE-2.0 +// * +// * Unless required by applicable law or agreed to in writing, software +// * distributed under the License is distributed on an "AS IS" BASIS, +// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// * See the License for the specific language governing permissions and +// * limitations under the License. +// */ +//package org.apache.hadoop.hbase.master; +// +//import static org.junit.Assert.*; +// +//import java.util.ArrayList; +//import java.util.Collection; +//import java.util.List; +//import java.util.Map; +//import java.util.Set; +// +//import org.apache.commons.logging.Log; +//import org.apache.commons.logging.LogFactory; +//import org.apache.hadoop.conf.Configuration; +//import org.apache.hadoop.hbase.HBaseIOException; +//import org.apache.hadoop.hbase.HBaseTestingUtility; +//import org.apache.hadoop.hbase.HConstants; +//import org.apache.hadoop.hbase.HRegionInfo; +//import org.apache.hadoop.hbase.ServerName; +//import org.apache.hadoop.hbase.TableName; +//import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer; +//import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; +//import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan.Position; +//import org.apache.hadoop.hbase.testclassification.MasterTests; +//import org.apache.hadoop.hbase.testclassification.MediumTests; +//import org.junit.AfterClass; +//import org.junit.BeforeClass; +//import org.junit.Test; +//import org.junit.experimental.categories.Category; +// +//@Category({MasterTests.class, MediumTests.class}) +//public class TestRegionPlacement2 { +// final static Log LOG = LogFactory.getLog(TestRegionPlacement2.class); +// private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); +// private final static int SLAVES = 7; +// private final static int PRIMARY = Position.PRIMARY.ordinal(); +// private final static int SECONDARY = Position.SECONDARY.ordinal(); +// private final static int TERTIARY = Position.TERTIARY.ordinal(); +// +// @BeforeClass +// public static void setupBeforeClass() throws Exception { +// Configuration conf = TEST_UTIL.getConfiguration(); +// // Enable the favored nodes based load balancer +// conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, +// FavoredNodeLoadBalancer.class, LoadBalancer.class); +// conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); +// TEST_UTIL.startMiniCluster(SLAVES); +// } +// +// @AfterClass +// public static void tearDownAfterClass() throws Exception { +// TEST_UTIL.shutdownMiniCluster(); +// } +// +// @Test +// public void testFavoredNodesPresentForRoundRobinAssignment() throws HBaseIOException { +// LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); +// balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); +// List servers = new ArrayList(); +// for (int i = 0; i < SLAVES; i++) { +// ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName(); +// servers.add(server); +// } +// List regions = new ArrayList(1); +// HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar")); +// regions.add(region); +// Map> assignmentMap = balancer.roundRobinAssignment(regions, +// servers); +// Set serverBefore = assignmentMap.keySet(); +// List favoredNodesBefore = +// ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); +// assertTrue(favoredNodesBefore.size() == 3); +// // the primary RS should be the one that the balancer's assignment returns +// assertTrue(ServerName.isSameHostnameAndPort(serverBefore.iterator().next(), +// favoredNodesBefore.get(PRIMARY))); +// // now remove the primary from the list of available servers +// List removedServers = removeMatchingServers(serverBefore, servers); +// // call roundRobinAssignment with the modified servers list +// assignmentMap = balancer.roundRobinAssignment(regions, servers); +// List favoredNodesAfter = +// ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); +// assertTrue(favoredNodesAfter.size() == 3); +// // We don't expect the favored nodes assignments to change in multiple calls +// // to the roundRobinAssignment method in the balancer (relevant for AssignmentManager.assign +// // failures) +// assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore)); +// Set serverAfter = assignmentMap.keySet(); +// // We expect the new RegionServer assignee to be one of the favored nodes +// // chosen earlier. +// assertTrue(ServerName.isSameHostnameAndPort(serverAfter.iterator().next(), +// favoredNodesBefore.get(SECONDARY)) || +// ServerName.isSameHostnameAndPort(serverAfter.iterator().next(), +// favoredNodesBefore.get(TERTIARY))); +// +// // put back the primary in the list of available servers +// servers.addAll(removedServers); +// // now roundRobinAssignment with the modified servers list should return the primary +// // as the regionserver assignee +// assignmentMap = balancer.roundRobinAssignment(regions, servers); +// Set serverWithPrimary = assignmentMap.keySet(); +// assertTrue(serverBefore.containsAll(serverWithPrimary)); +// +// // Make all the favored nodes unavailable for assignment +// removeMatchingServers(favoredNodesAfter, servers); +// // call roundRobinAssignment with the modified servers list +// assignmentMap = balancer.roundRobinAssignment(regions, servers); +// List favoredNodesNow = +// ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); +// assertTrue(favoredNodesNow.size() == 3); +// assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) && +// !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) && +// !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY))); +// } +// +// @Test +// public void testFavoredNodesPresentForRandomAssignment() throws HBaseIOException { +// LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(TEST_UTIL.getConfiguration()); +// balancer.setMasterServices(TEST_UTIL.getMiniHBaseCluster().getMaster()); +// List servers = new ArrayList(); +// for (int i = 0; i < SLAVES; i++) { +// ServerName server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i).getServerName(); +// servers.add(server); +// } +// List regions = new ArrayList(1); +// HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar")); +// regions.add(region); +// ServerName serverBefore = balancer.randomAssignment(region, servers); +// List favoredNodesBefore = +// ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); +// assertTrue(favoredNodesBefore.size() == 3); +// // the primary RS should be the one that the balancer's assignment returns +// assertTrue(ServerName.isSameHostnameAndPort(serverBefore,favoredNodesBefore.get(PRIMARY))); +// // now remove the primary from the list of servers +// removeMatchingServers(serverBefore, servers); +// // call randomAssignment with the modified servers list +// ServerName serverAfter = balancer.randomAssignment(region, servers); +// List favoredNodesAfter = +// ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); +// assertTrue(favoredNodesAfter.size() == 3); +// // We don't expect the favored nodes assignments to change in multiple calls +// // to the randomAssignment method in the balancer (relevant for AssignmentManager.assign +// // failures) +// assertTrue(favoredNodesAfter.containsAll(favoredNodesBefore)); +// // We expect the new RegionServer assignee to be one of the favored nodes +// // chosen earlier. +// assertTrue(ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(SECONDARY)) || +// ServerName.isSameHostnameAndPort(serverAfter, favoredNodesBefore.get(TERTIARY))); +// // Make all the favored nodes unavailable for assignment +// removeMatchingServers(favoredNodesAfter, servers); +// // call randomAssignment with the modified servers list +// balancer.randomAssignment(region, servers); +// List favoredNodesNow = +// ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(region); +// assertTrue(favoredNodesNow.size() == 3); +// assertTrue(!favoredNodesNow.contains(favoredNodesAfter.get(PRIMARY)) && +// !favoredNodesNow.contains(favoredNodesAfter.get(SECONDARY)) && +// !favoredNodesNow.contains(favoredNodesAfter.get(TERTIARY))); +// } +// +// private List removeMatchingServers(Collection serversWithoutStartCode, +// List servers) { +// List serversToRemove = new ArrayList(); +// for (ServerName s : serversWithoutStartCode) { +// serversToRemove.addAll(removeMatchingServers(s, servers)); +// } +// return serversToRemove; +// } +// +// private List removeMatchingServers(ServerName serverWithoutStartCode, +// List servers) { +// List serversToRemove = new ArrayList(); +// for (ServerName s : servers) { +// if (ServerName.isSameHostnameAndPort(s, serverWithoutStartCode)) { +// serversToRemove.add(s); +// } +// } +// servers.removeAll(serversToRemove); +// return serversToRemove; +// } +//} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java index 99e1709..dc4328b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionStates.java @@ -123,11 +123,10 @@ public class TestRegionStates { regionStates.regionOnline(createFakeRegion(), three); - Map>> result = - regionStates.getAssignmentsByTable(); - for (Map> map : result.values()) { - assertFalse(map.keySet().contains(three)); - } + Map> result = + regionStates.getRegionAssignmentsByServer(); + assertFalse(result.keySet().contains(three)); + } private HRegionInfo createFakeRegion() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java index cf79368..a824065 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java @@ -1,526 +1,444 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master.balancer; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.TreeSet; - -import org.apache.commons.lang.ArrayUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.master.LoadBalancer; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.RackManager; -import org.apache.hadoop.hbase.master.RegionPlan; -import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster; -import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.net.DNSToSwitchMapping; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.mockito.Mockito; - -import com.google.common.collect.Lists; - -@Category({MasterTests.class, MediumTests.class}) -public class TestBaseLoadBalancer extends BalancerTestBase { - - private static LoadBalancer loadBalancer; - private static final Log LOG = LogFactory.getLog(TestBaseLoadBalancer.class); - private static final ServerName master = ServerName.valueOf("fake-master", 0, 1L); - private static RackManager rackManager; - private static final int NUM_SERVERS = 15; - private static ServerName[] servers = new ServerName[NUM_SERVERS]; - - int[][] regionsAndServersMocks = new int[][] { - // { num regions, num servers } - new int[] { 0, 0 }, new int[] { 0, 1 }, new int[] { 1, 1 }, new int[] { 2, 1 }, - new int[] { 10, 1 }, new int[] { 1, 2 }, new int[] { 2, 2 }, new int[] { 3, 2 }, - new int[] { 1, 3 }, new int[] { 2, 3 }, new int[] { 3, 3 }, new int[] { 25, 3 }, - new int[] { 2, 10 }, new int[] { 2, 100 }, new int[] { 12, 10 }, new int[] { 12, 100 }, }; - - @BeforeClass - public static void beforeAllTests() throws Exception { - Configuration conf = HBaseConfiguration.create(); - conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class); - loadBalancer = new MockBalancer(); - loadBalancer.setConf(conf); - MasterServices st = Mockito.mock(MasterServices.class); - Mockito.when(st.getServerName()).thenReturn(master); - loadBalancer.setMasterServices(st); - - // Set up the rack topologies (5 machines per rack) - rackManager = Mockito.mock(RackManager.class); - for (int i = 0; i < NUM_SERVERS; i++) { - servers[i] = ServerName.valueOf("foo"+i+":1234",-1); - if (i < 5) { - Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack1"); - } - if (i >= 5 && i < 10) { - Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack2"); - } - if (i >= 10) { - Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack3"); - } - } - } - - public static class MockBalancer extends BaseLoadBalancer { - - @Override - public List balanceCluster(Map> clusterState) { - return null; - } - - } - - /** - * Tests immediate assignment. - * - * Invariant is that all regions have an assignment. - * - * @throws Exception - */ - @Test (timeout=30000) - public void testImmediateAssignment() throws Exception { - List tmp = getListOfServerNames(randomServers(1, 0)); - tmp.add(master); - ServerName sn = loadBalancer.randomAssignment(HRegionInfo.FIRST_META_REGIONINFO, tmp); - assertEquals(master, sn); - HRegionInfo hri = randomRegions(1, -1).get(0); - sn = loadBalancer.randomAssignment(hri, tmp); - assertNotEquals(master, sn); - tmp = new ArrayList(); - tmp.add(master); - sn = loadBalancer.randomAssignment(hri, tmp); - assertNull("Should not assign user regions on master", sn); - for (int[] mock : regionsAndServersMocks) { - LOG.debug("testImmediateAssignment with " + mock[0] + " regions and " + mock[1] + " servers"); - List regions = randomRegions(mock[0]); - List servers = randomServers(mock[1], 0); - List list = getListOfServerNames(servers); - Map assignments = loadBalancer.immediateAssignment(regions, list); - assertImmediateAssignment(regions, list, assignments); - returnRegions(regions); - returnServers(list); - } - } - - /** - * All regions have an assignment. - * @param regions - * @param servers - * @param assignments - */ - private void assertImmediateAssignment(List regions, List servers, - Map assignments) { - for (HRegionInfo region : regions) { - assertTrue(assignments.containsKey(region)); - } - } - - /** - * Tests the bulk assignment used during cluster startup. - * - * Round-robin. Should yield a balanced cluster so same invariant as the load - * balancer holds, all servers holding either floor(avg) or ceiling(avg). - * - * @throws Exception - */ - @Test (timeout=180000) - public void testBulkAssignment() throws Exception { - List tmp = getListOfServerNames(randomServers(5, 0)); - List hris = randomRegions(20); - hris.add(HRegionInfo.FIRST_META_REGIONINFO); - tmp.add(master); - Map> plans = loadBalancer.roundRobinAssignment(hris, tmp); - assertTrue(plans.get(master).contains(HRegionInfo.FIRST_META_REGIONINFO)); - assertEquals(1, plans.get(master).size()); - int totalRegion = 0; - for (List regions: plans.values()) { - totalRegion += regions.size(); - } - assertEquals(hris.size(), totalRegion); - for (int[] mock : regionsAndServersMocks) { - LOG.debug("testBulkAssignment with " + mock[0] + " regions and " + mock[1] + " servers"); - List regions = randomRegions(mock[0]); - List servers = randomServers(mock[1], 0); - List list = getListOfServerNames(servers); - Map> assignments = - loadBalancer.roundRobinAssignment(regions, list); - float average = (float) regions.size() / servers.size(); - int min = (int) Math.floor(average); - int max = (int) Math.ceil(average); - if (assignments != null && !assignments.isEmpty()) { - for (List regionList : assignments.values()) { - assertTrue(regionList.size() == min || regionList.size() == max); - } - } - returnRegions(regions); - returnServers(list); - } - } - - /** - * Test the cluster startup bulk assignment which attempts to retain - * assignment info. - * @throws Exception - */ - @Test (timeout=180000) - public void testRetainAssignment() throws Exception { - // Test simple case where all same servers are there - List servers = randomServers(10, 10); - List regions = randomRegions(100); - Map existing = new TreeMap(); - for (int i = 0; i < regions.size(); i++) { - ServerName sn = servers.get(i % servers.size()).getServerName(); - // The old server would have had same host and port, but different - // start code! - ServerName snWithOldStartCode = - ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10); - existing.put(regions.get(i), snWithOldStartCode); - } - List listOfServerNames = getListOfServerNames(servers); - Map> assignment = - loadBalancer.retainAssignment(existing, listOfServerNames); - assertRetainedAssignment(existing, listOfServerNames, assignment); - - // Include two new servers that were not there before - List servers2 = new ArrayList(servers); - servers2.add(randomServer(10)); - servers2.add(randomServer(10)); - listOfServerNames = getListOfServerNames(servers2); - assignment = loadBalancer.retainAssignment(existing, listOfServerNames); - assertRetainedAssignment(existing, listOfServerNames, assignment); - - // Remove two of the servers that were previously there - List servers3 = new ArrayList(servers); - servers3.remove(0); - servers3.remove(0); - listOfServerNames = getListOfServerNames(servers3); - assignment = loadBalancer.retainAssignment(existing, listOfServerNames); - assertRetainedAssignment(existing, listOfServerNames, assignment); - } - - @Test (timeout=180000) - public void testRegionAvailability() throws Exception { - // Create a cluster with a few servers, assign them to specific racks - // then assign some regions. The tests should check whether moving a - // replica from one node to a specific other node or rack lowers the - // availability of the region or not - - List list0 = new ArrayList(); - List list1 = new ArrayList(); - List list2 = new ArrayList(); - // create a region (region1) - HRegionInfo hri1 = new HRegionInfo( - TableName.valueOf("table"), "key1".getBytes(), "key2".getBytes(), - false, 100); - // create a replica of the region (replica_of_region1) - HRegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1); - // create a second region (region2) - HRegionInfo hri3 = new HRegionInfo( - TableName.valueOf("table"), "key2".getBytes(), "key3".getBytes(), - false, 101); - list0.add(hri1); //only region1 - list1.add(hri2); //only replica_of_region1 - list2.add(hri3); //only region2 - Map> clusterState = - new LinkedHashMap>(); - clusterState.put(servers[0], list0); //servers[0] hosts region1 - clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1 - clusterState.put(servers[2], list2); //servers[2] hosts region2 - // create a cluster with the above clusterState. The way in which the - // cluster is created (constructor code) would make sure the indices of - // the servers are in the order in which it is inserted in the clusterState - // map (linkedhashmap is important). A similar thing applies to the region lists - Cluster cluster = new Cluster(clusterState, null, null, rackManager); - // check whether a move of region1 from servers[0] to servers[1] would lower - // the availability of region1 - assertTrue(cluster.wouldLowerAvailability(hri1, servers[1])); - // check whether a move of region1 from servers[0] to servers[2] would lower - // the availability of region1 - assertTrue(!cluster.wouldLowerAvailability(hri1, servers[2])); - // check whether a move of replica_of_region1 from servers[0] to servers[2] would lower - // the availability of replica_of_region1 - assertTrue(!cluster.wouldLowerAvailability(hri2, servers[2])); - // check whether a move of region2 from servers[0] to servers[1] would lower - // the availability of region2 - assertTrue(!cluster.wouldLowerAvailability(hri3, servers[1])); - - // now lets have servers[1] host replica_of_region2 - list1.add(RegionReplicaUtil.getRegionInfoForReplica(hri3, 1)); - // create a new clusterState with the above change - cluster = new Cluster(clusterState, null, null, rackManager); - // now check whether a move of a replica from servers[0] to servers[1] would lower - // the availability of region2 - assertTrue(cluster.wouldLowerAvailability(hri3, servers[1])); - - // start over again - clusterState.clear(); - clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1 - clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1 and replica_of_region2 - clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2 - clusterState.put(servers[10], new ArrayList()); //servers[10], rack3 hosts no region - // create a cluster with the above clusterState - cluster = new Cluster(clusterState, null, null, rackManager); - // check whether a move of region1 from servers[0],rack1 to servers[6],rack2 would - // lower the availability - - assertTrue(cluster.wouldLowerAvailability(hri1, servers[0])); - - // now create a cluster without the rack manager - cluster = new Cluster(clusterState, null, null, null); - // now repeat check whether a move of region1 from servers[0] to servers[6] would - // lower the availability - assertTrue(!cluster.wouldLowerAvailability(hri1, servers[6])); - } - - @Test (timeout=180000) - public void testRegionAvailabilityWithRegionMoves() throws Exception { - List list0 = new ArrayList(); - List list1 = new ArrayList(); - List list2 = new ArrayList(); - // create a region (region1) - HRegionInfo hri1 = new HRegionInfo( - TableName.valueOf("table"), "key1".getBytes(), "key2".getBytes(), - false, 100); - // create a replica of the region (replica_of_region1) - HRegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1); - // create a second region (region2) - HRegionInfo hri3 = new HRegionInfo( - TableName.valueOf("table"), "key2".getBytes(), "key3".getBytes(), - false, 101); - list0.add(hri1); //only region1 - list1.add(hri2); //only replica_of_region1 - list2.add(hri3); //only region2 - Map> clusterState = - new LinkedHashMap>(); - clusterState.put(servers[0], list0); //servers[0] hosts region1 - clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1 - clusterState.put(servers[2], list2); //servers[2] hosts region2 - // create a cluster with the above clusterState. The way in which the - // cluster is created (constructor code) would make sure the indices of - // the servers are in the order in which it is inserted in the clusterState - // map (linkedhashmap is important). - Cluster cluster = new Cluster(clusterState, null, null, rackManager); - // check whether moving region1 from servers[1] to servers[2] would lower availability - assertTrue(!cluster.wouldLowerAvailability(hri1, servers[2])); - - // now move region1 from servers[0] to servers[2] - cluster.doAction(new MoveRegionAction(0, 0, 2)); - // now repeat check whether moving region1 from servers[1] to servers[2] - // would lower availability - assertTrue(cluster.wouldLowerAvailability(hri1, servers[2])); - - // start over again - clusterState.clear(); - List list3 = new ArrayList(); - HRegionInfo hri4 = RegionReplicaUtil.getRegionInfoForReplica(hri3, 1); - list3.add(hri4); - clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1 - clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1 - clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2 - clusterState.put(servers[12], list3); //servers[12], rack3 hosts replica_of_region2 - // create a cluster with the above clusterState - cluster = new Cluster(clusterState, null, null, rackManager); - // check whether a move of replica_of_region2 from servers[12],rack3 to servers[0],rack1 would - // lower the availability - assertTrue(!cluster.wouldLowerAvailability(hri4, servers[0])); - // now move region2 from servers[6],rack2 to servers[0],rack1 - cluster.doAction(new MoveRegionAction(2, 2, 0)); - // now repeat check if replica_of_region2 from servers[12],rack3 to servers[0],rack1 would - // lower the availability - assertTrue(cluster.wouldLowerAvailability(hri3, servers[0])); - } - - private List getListOfServerNames(final List sals) { - List list = new ArrayList(); - for (ServerAndLoad e : sals) { - list.add(e.getServerName()); - } - return list; - } - - /** - * Asserts a valid retained assignment plan. - *

- * Must meet the following conditions: - *

    - *
  • Every input region has an assignment, and to an online server - *
  • If a region had an existing assignment to a server with the same - * address a a currently online server, it will be assigned to it - *
- * @param existing - * @param servers - * @param assignment - */ - private void assertRetainedAssignment(Map existing, - List servers, Map> assignment) { - // Verify condition 1, every region assigned, and to online server - Set onlineServerSet = new TreeSet(servers); - Set assignedRegions = new TreeSet(); - for (Map.Entry> a : assignment.entrySet()) { - assertTrue("Region assigned to server that was not listed as online", - onlineServerSet.contains(a.getKey())); - for (HRegionInfo r : a.getValue()) - assignedRegions.add(r); - } - assertEquals(existing.size(), assignedRegions.size()); - - // Verify condition 2, if server had existing assignment, must have same - Set onlineHostNames = new TreeSet(); - for (ServerName s : servers) { - onlineHostNames.add(s.getHostname()); - } - - for (Map.Entry> a : assignment.entrySet()) { - ServerName assignedTo = a.getKey(); - for (HRegionInfo r : a.getValue()) { - ServerName address = existing.get(r); - if (address != null && onlineHostNames.contains(address.getHostname())) { - // this region was prevously assigned somewhere, and that - // host is still around, then it should be re-assigned on the - // same host - assertEquals(address.getHostname(), assignedTo.getHostname()); - } - } - } - } - - @Test (timeout=180000) - public void testClusterServersWithSameHostPort() { - // tests whether the BaseLoadBalancer.Cluster can be constructed with servers - // sharing same host and port - List servers = getListOfServerNames(randomServers(10, 10)); - List regions = randomRegions(101); - Map> clusterState = new HashMap>(); - - assignRegions(regions, servers, clusterState); - - // construct another list of servers, but sharing same hosts and ports - List oldServers = new ArrayList(servers.size()); - for (ServerName sn : servers) { - // The old server would have had same host and port, but different start code! - oldServers.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10)); - } - - regions = randomRegions(9); // some more regions - assignRegions(regions, oldServers, clusterState); - - // should not throw exception: - BaseLoadBalancer.Cluster cluster = new Cluster(clusterState, null, null, null); - assertEquals(101 + 9, cluster.numRegions); - assertEquals(10, cluster.numServers); // only 10 servers because they share the same host + port - } - - private void assignRegions(List regions, List servers, - Map> clusterState) { - for (int i = 0; i < regions.size(); i++) { - ServerName sn = servers.get(i % servers.size()); - List regionsOfServer = clusterState.get(sn); - if (regionsOfServer == null) { - regionsOfServer = new ArrayList(10); - clusterState.put(sn, regionsOfServer); - } - - regionsOfServer.add(regions.get(i)); - } - } - - @Test (timeout=180000) - public void testClusterRegionLocations() { - // tests whether region locations are handled correctly in Cluster - List servers = getListOfServerNames(randomServers(10, 10)); - List regions = randomRegions(101); - Map> clusterState = new HashMap>(); - - assignRegions(regions, servers, clusterState); - - // mock block locality for some regions - RegionLocationFinder locationFinder = mock(RegionLocationFinder.class); - // block locality: region:0 => {server:0} - // region:1 => {server:0, server:1} - // region:42 => {server:4, server:9, server:5} - when(locationFinder.getTopBlockLocations(regions.get(0))).thenReturn( - Lists.newArrayList(servers.get(0))); - when(locationFinder.getTopBlockLocations(regions.get(1))).thenReturn( - Lists.newArrayList(servers.get(0), servers.get(1))); - when(locationFinder.getTopBlockLocations(regions.get(42))).thenReturn( - Lists.newArrayList(servers.get(4), servers.get(9), servers.get(5))); - when(locationFinder.getTopBlockLocations(regions.get(43))).thenReturn( - Lists.newArrayList(ServerName.valueOf("foo", 0, 0))); // this server does not exists in clusterStatus - - BaseLoadBalancer.Cluster cluster = new Cluster(clusterState, null, locationFinder, null); - - int r0 = ArrayUtils.indexOf(cluster.regions, regions.get(0)); // this is ok, it is just a test - int r1 = ArrayUtils.indexOf(cluster.regions, regions.get(1)); - int r10 = ArrayUtils.indexOf(cluster.regions, regions.get(10)); - int r42 = ArrayUtils.indexOf(cluster.regions, regions.get(42)); - int r43 = ArrayUtils.indexOf(cluster.regions, regions.get(43)); - - int s0 = cluster.serversToIndex.get(servers.get(0).getHostAndPort()); - int s1 = cluster.serversToIndex.get(servers.get(1).getHostAndPort()); - int s4 = cluster.serversToIndex.get(servers.get(4).getHostAndPort()); - int s5 = cluster.serversToIndex.get(servers.get(5).getHostAndPort()); - int s9 = cluster.serversToIndex.get(servers.get(9).getHostAndPort()); - - // region 0 locations - assertEquals(1, cluster.regionLocations[r0].length); - assertEquals(s0, cluster.regionLocations[r0][0]); - - // region 1 locations - assertEquals(2, cluster.regionLocations[r1].length); - assertEquals(s0, cluster.regionLocations[r1][0]); - assertEquals(s1, cluster.regionLocations[r1][1]); - - // region 10 locations - assertEquals(0, cluster.regionLocations[r10].length); - - // region 42 locations - assertEquals(3, cluster.regionLocations[r42].length); - assertEquals(s4, cluster.regionLocations[r42][0]); - assertEquals(s9, cluster.regionLocations[r42][1]); - assertEquals(s5, cluster.regionLocations[r42][2]); - - // region 43 locations - assertEquals(1, cluster.regionLocations[r43].length); - assertEquals(-1, cluster.regionLocations[r43][0]); - } -} \ No newline at end of file +///** +// * Licensed to the Apache Software Foundation (ASF) under one +// * or more contributor license agreements. See the NOTICE file +// * distributed with this work for additional information +// * regarding copyright ownership. The ASF licenses this file +// * to you under the Apache License, Version 2.0 (the +// * "License"); you may not use this file except in compliance +// * with the License. You may obtain a copy of the License at +// * +// * http://www.apache.org/licenses/LICENSE-2.0 +// * +// * Unless required by applicable law or agreed to in writing, software +// * distributed under the License is distributed on an "AS IS" BASIS, +// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// * See the License for the specific language governing permissions and +// * limitations under the License. +// */ +//package org.apache.hadoop.hbase.master.balancer; +// +//import static org.junit.Assert.assertEquals; +//import static org.junit.Assert.assertNotEquals; +//import static org.junit.Assert.assertNull; +//import static org.junit.Assert.assertTrue; +//import static org.mockito.Mockito.mock; +//import static org.mockito.Mockito.when; +// +//import java.util.ArrayList; +//import java.util.HashMap; +//import java.util.LinkedHashMap; +//import java.util.List; +//import java.util.Map; +//import java.util.Set; +//import java.util.TreeMap; +//import java.util.TreeSet; +// +//import org.apache.avro.generic.GenericData; +//import org.apache.commons.lang.ArrayUtils; +//import org.apache.commons.logging.Log; +//import org.apache.commons.logging.LogFactory; +//import org.apache.hadoop.conf.Configuration; +//import org.apache.hadoop.hbase.*; +//import org.apache.hadoop.hbase.client.RegionReplicaUtil; +//import org.apache.hadoop.hbase.master.LoadBalancer; +//import org.apache.hadoop.hbase.master.MasterServices; +//import org.apache.hadoop.hbase.master.RackManager; +//import org.apache.hadoop.hbase.master.RegionPlan; +//import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster; +//import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction; +//import org.apache.hadoop.hbase.regionserver.Region; +//import org.apache.hadoop.hbase.testclassification.MasterTests; +//import org.apache.hadoop.hbase.testclassification.MediumTests; +//import org.apache.hadoop.net.DNSToSwitchMapping; +//import org.junit.BeforeClass; +//import org.junit.Test; +//import org.junit.experimental.categories.Category; +//import org.mockito.Mockito; +// +//import com.google.common.collect.Lists; +// +//@Category({MasterTests.class, MediumTests.class}) +//public class TestBaseLoadBalancer extends BalancerTestBase { +// +// private static LoadBalancer loadBalancer; +// private static final Log LOG = LogFactory.getLog(TestBaseLoadBalancer.class); +// private static final ServerName master = ServerName.valueOf("fake-master", 0, 1L); +// private static RackManager rackManager; +// private static final int NUM_SERVERS = 15; +// private static ServerName[] servers = new ServerName[NUM_SERVERS]; +// +// int[][] regionsAndServersMocks = new int[][] { +// // { num regions, num servers } +// new int[] { 0, 0 }, new int[] { 0, 1 }, new int[] { 1, 1 }, new int[] { 2, 1 }, +// new int[] { 10, 1 }, new int[] { 1, 2 }, new int[] { 2, 2 }, new int[] { 3, 2 }, +// new int[] { 1, 3 }, new int[] { 2, 3 }, new int[] { 3, 3 }, new int[] { 25, 3 }, +// new int[] { 2, 10 }, new int[] { 2, 100 }, new int[] { 12, 10 }, new int[] { 12, 100 }, }; +// +// @BeforeClass +// public static void beforeAllTests() throws Exception { +// Configuration conf = HBaseConfiguration.create(); +// conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class); +// loadBalancer = new MockBalancer(); +// loadBalancer.setConf(conf); +// MasterServices st = Mockito.mock(MasterServices.class); +// Mockito.when(st.getServerName()).thenReturn(master); +// loadBalancer.setMasterServices(st); +// +// // Set up the rack topologies (5 machines per rack) +// rackManager = Mockito.mock(RackManager.class); +// for (int i = 0; i < NUM_SERVERS; i++) { +// servers[i] = ServerName.valueOf("foo"+i+":1234",-1); +// if (i < 5) { +// Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack1"); +// } +// if (i >= 5 && i < 10) { +// Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack2"); +// } +// if (i >= 10) { +// Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack3"); +// } +// } +// } +// +// public static class MockBalancer extends BaseLoadBalancer { +// +// @Override +// public List balanceCluster(Map> clusterState) { +// return null; +// } +// +// } +// +// /** +// * Test the cluster startup bulk assignment which attempts to retain +// * assignment info. +// * @throws Exception +// */ +// @Test (timeout=180000) +// public void testRetainAssignment() throws Exception { +// // Test simple case where all same servers are there +// List servers = randomServers(10, 10); +// List regions = randomRegions(100); +// Map> newServers = new TreeMap>(); +// Map oldLocations = new TreeMap(); +// +// for (ServerAndLoad server:servers) { +// newServers.put(server.getServerName(), new ArrayList(1)); +// } +// +// for (int i = 0; i < regions.size(); i++) { +// ServerName sn = servers.get(i % servers.size()).getServerName(); +// // The old server would have had same host and port, but different +// // start code! +// ServerName snWithOldStartCode = +// ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10); +// oldLocations.put(regions.get(i), snWithOldStartCode); +// } +// +// List listOfServerNames = getListOfServerNames(servers); +// List plans = loadBalancer.fastBalance(newServers, regions, oldLocations); +// assertRetainedAssignment(plans, oldLocations); +// +// // Include two new servers that were not there before +// List servers2 = new ArrayList(servers); +// servers2.add(randomServer(10)); +// servers2.add(randomServer(10)); +// listOfServerNames = getListOfServerNames(servers2); +// assignment = loadBalancer.retainAssignment(existing, listOfServerNames); +// assertRetainedAssignment(existing, listOfServerNames, assignment); +// +// // Remove two of the servers that were previously there +// List servers3 = new ArrayList(servers); +// servers3.remove(0); +// servers3.remove(0); +// listOfServerNames = getListOfServerNames(servers3); +// assignment = loadBalancer.retainAssignment(existing, listOfServerNames); +// assertRetainedAssignment(existing, listOfServerNames, assignment); +// } +// +// @Test (timeout=180000) +// public void testRegionAvailability() throws Exception { +// // Create a cluster with a few servers, assign them to specific racks +// // then assign some regions. The tests should check whether moving a +// // replica from one node to a specific other node or rack lowers the +// // availability of the region or not +// +// List list0 = new ArrayList(); +// List list1 = new ArrayList(); +// List list2 = new ArrayList(); +// // create a region (region1) +// HRegionInfo hri1 = new HRegionInfo( +// TableName.valueOf("table"), "key1".getBytes(), "key2".getBytes(), +// false, 100); +// // create a replica of the region (replica_of_region1) +// HRegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1); +// // create a second region (region2) +// HRegionInfo hri3 = new HRegionInfo( +// TableName.valueOf("table"), "key2".getBytes(), "key3".getBytes(), +// false, 101); +// list0.add(hri1); //only region1 +// list1.add(hri2); //only replica_of_region1 +// list2.add(hri3); //only region2 +// Map> clusterState = +// new LinkedHashMap>(); +// clusterState.put(servers[0], list0); //servers[0] hosts region1 +// clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1 +// clusterState.put(servers[2], list2); //servers[2] hosts region2 +// // create a cluster with the above clusterState. The way in which the +// // cluster is created (constructor code) would make sure the indices of +// // the servers are in the order in which it is inserted in the clusterState +// // map (linkedhashmap is important). A similar thing applies to the region lists +// Cluster cluster = new Cluster(clusterState, null, null, rackManager); +// // check whether a move of region1 from servers[0] to servers[1] would lower +// // the availability of region1 +// assertTrue(cluster.wouldLowerAvailability(hri1, servers[1])); +// // check whether a move of region1 from servers[0] to servers[2] would lower +// // the availability of region1 +// assertTrue(!cluster.wouldLowerAvailability(hri1, servers[2])); +// // check whether a move of replica_of_region1 from servers[0] to servers[2] would lower +// // the availability of replica_of_region1 +// assertTrue(!cluster.wouldLowerAvailability(hri2, servers[2])); +// // check whether a move of region2 from servers[0] to servers[1] would lower +// // the availability of region2 +// assertTrue(!cluster.wouldLowerAvailability(hri3, servers[1])); +// +// // now lets have servers[1] host replica_of_region2 +// list1.add(RegionReplicaUtil.getRegionInfoForReplica(hri3, 1)); +// // create a new clusterState with the above change +// cluster = new Cluster(clusterState, null, null, rackManager); +// // now check whether a move of a replica from servers[0] to servers[1] would lower +// // the availability of region2 +// assertTrue(cluster.wouldLowerAvailability(hri3, servers[1])); +// +// // start over again +// clusterState.clear(); +// clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1 +// clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1 and replica_of_region2 +// clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2 +// clusterState.put(servers[10], new ArrayList()); //servers[10], rack3 hosts no region +// // create a cluster with the above clusterState +// cluster = new Cluster(clusterState, null, null, rackManager); +// // check whether a move of region1 from servers[0],rack1 to servers[6],rack2 would +// // lower the availability +// +// assertTrue(cluster.wouldLowerAvailability(hri1, servers[0])); +// +// // now create a cluster without the rack manager +// cluster = new Cluster(clusterState, null, null, null); +// // now repeat check whether a move of region1 from servers[0] to servers[6] would +// // lower the availability +// assertTrue(!cluster.wouldLowerAvailability(hri1, servers[6])); +// } +// +// @Test (timeout=180000) +// public void testRegionAvailabilityWithRegionMoves() throws Exception { +// List list0 = new ArrayList(); +// List list1 = new ArrayList(); +// List list2 = new ArrayList(); +// // create a region (region1) +// HRegionInfo hri1 = new HRegionInfo( +// TableName.valueOf("table"), "key1".getBytes(), "key2".getBytes(), +// false, 100); +// // create a replica of the region (replica_of_region1) +// HRegionInfo hri2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1); +// // create a second region (region2) +// HRegionInfo hri3 = new HRegionInfo( +// TableName.valueOf("table"), "key2".getBytes(), "key3".getBytes(), +// false, 101); +// list0.add(hri1); //only region1 +// list1.add(hri2); //only replica_of_region1 +// list2.add(hri3); //only region2 +// Map> clusterState = +// new LinkedHashMap>(); +// clusterState.put(servers[0], list0); //servers[0] hosts region1 +// clusterState.put(servers[1], list1); //servers[1] hosts replica_of_region1 +// clusterState.put(servers[2], list2); //servers[2] hosts region2 +// // create a cluster with the above clusterState. The way in which the +// // cluster is created (constructor code) would make sure the indices of +// // the servers are in the order in which it is inserted in the clusterState +// // map (linkedhashmap is important). +// Cluster cluster = new Cluster(clusterState, null, null, rackManager); +// // check whether moving region1 from servers[1] to servers[2] would lower availability +// assertTrue(!cluster.wouldLowerAvailability(hri1, servers[2])); +// +// // now move region1 from servers[0] to servers[2] +// cluster.doAction(new MoveRegionAction(0, 0, 2)); +// // now repeat check whether moving region1 from servers[1] to servers[2] +// // would lower availability +// assertTrue(cluster.wouldLowerAvailability(hri1, servers[2])); +// +// // start over again +// clusterState.clear(); +// List list3 = new ArrayList(); +// HRegionInfo hri4 = RegionReplicaUtil.getRegionInfoForReplica(hri3, 1); +// list3.add(hri4); +// clusterState.put(servers[0], list0); //servers[0], rack1 hosts region1 +// clusterState.put(servers[5], list1); //servers[5], rack2 hosts replica_of_region1 +// clusterState.put(servers[6], list2); //servers[6], rack2 hosts region2 +// clusterState.put(servers[12], list3); //servers[12], rack3 hosts replica_of_region2 +// // create a cluster with the above clusterState +// cluster = new Cluster(clusterState, null, null, rackManager); +// // check whether a move of replica_of_region2 from servers[12],rack3 to servers[0],rack1 would +// // lower the availability +// assertTrue(!cluster.wouldLowerAvailability(hri4, servers[0])); +// // now move region2 from servers[6],rack2 to servers[0],rack1 +// cluster.doAction(new MoveRegionAction(2, 2, 0)); +// // now repeat check if replica_of_region2 from servers[12],rack3 to servers[0],rack1 would +// // lower the availability +// assertTrue(cluster.wouldLowerAvailability(hri3, servers[0])); +// } +// +// private List getListOfServerNames(final List sals) { +// List list = new ArrayList(); +// for (ServerAndLoad e : sals) { +// list.add(e.getServerName()); +// } +// return list; +// } +// +// /** +// * Asserts a valid retained assignment plan. +// *

+// * Must meet the following conditions: +// *

    +// *
  • Every input region has an assignment, and to an online server +// *
  • If a region had an existing assignment to a server with the same +// * address a a currently online server, it will be assigned to it +// *
+// * @param existing +// * @param servers +// * @param assignment +// */ +// private void assertRetainedAssignment( +// List plans, Map> old) { +// // Verify condition 1, every region assigned, and to online server +// Set onlineServerSet = new TreeSet(servers); +// Set assignedRegions = new TreeSet(); +// for (Map.Entry> a : assignment.entrySet()) { +// assertTrue("Region assigned to server that was not listed as online", +// onlineServerSet.contains(a.getKey())); +// for (HRegionInfo r : a.getValue()) +// assignedRegions.add(r); +// } +// assertEquals(existing.size(), assignedRegions.size()); +// +// // Verify condition 2, if server had existing assignment, must have same +// Set onlineHostNames = new TreeSet(); +// for (ServerName s : servers) { +// onlineHostNames.add(s.getHostname()); +// } +// +// for (Map.Entry> a : assignment.entrySet()) { +// ServerName assignedTo = a.getKey(); +// for (HRegionInfo r : a.getValue()) { +// ServerName address = existing.get(r); +// if (address != null && onlineHostNames.contains(address.getHostname())) { +// // this region was prevously assigned somewhere, and that +// // host is still around, then it should be re-assigned on the +// // same host +// assertEquals(address.getHostname(), assignedTo.getHostname()); +// } +// } +// } +// } +// +// @Test (timeout=180000) +// public void testClusterServersWithSameHostPort() { +// // tests whether the BaseLoadBalancer.Cluster can be constructed with servers +// // sharing same host and port +// List servers = getListOfServerNames(randomServers(10, 10)); +// List regions = randomRegions(101); +// Map> clusterState = new HashMap>(); +// +// assignRegions(regions, servers, clusterState); +// +// // construct another list of servers, but sharing same hosts and ports +// List oldServers = new ArrayList(servers.size()); +// for (ServerName sn : servers) { +// // The old server would have had same host and port, but different start code! +// oldServers.add(ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10)); +// } +// +// regions = randomRegions(9); // some more regions +// assignRegions(regions, oldServers, clusterState); +// +// // should not throw exception: +// BaseLoadBalancer.Cluster cluster = new Cluster(clusterState, null, null, null); +// assertEquals(101 + 9, cluster.numRegions); +// assertEquals(10, cluster.numServers); // only 10 servers because they share the same host + port +// } +// +// private void assignRegions(List regions, List servers, +// Map> clusterState) { +// for (int i = 0; i < regions.size(); i++) { +// ServerName sn = servers.get(i % servers.size()); +// List regionsOfServer = clusterState.get(sn); +// if (regionsOfServer == null) { +// regionsOfServer = new ArrayList(10); +// clusterState.put(sn, regionsOfServer); +// } +// +// regionsOfServer.add(regions.get(i)); +// } +// } +// +// @Test (timeout=180000) +// public void testClusterRegionLocations() { +// // tests whether region locations are handled correctly in Cluster +// List servers = getListOfServerNames(randomServers(10, 10)); +// List regions = randomRegions(101); +// Map> clusterState = new HashMap>(); +// +// assignRegions(regions, servers, clusterState); +// +// // mock block locality for some regions +// RegionLocationFinder locationFinder = mock(RegionLocationFinder.class); +// // block locality: region:0 => {server:0} +// // region:1 => {server:0, server:1} +// // region:42 => {server:4, server:9, server:5} +// when(locationFinder.getTopBlockLocations(regions.get(0))).thenReturn( +// Lists.newArrayList(servers.get(0))); +// when(locationFinder.getTopBlockLocations(regions.get(1))).thenReturn( +// Lists.newArrayList(servers.get(0), servers.get(1))); +// when(locationFinder.getTopBlockLocations(regions.get(42))).thenReturn( +// Lists.newArrayList(servers.get(4), servers.get(9), servers.get(5))); +// when(locationFinder.getTopBlockLocations(regions.get(43))).thenReturn( +// Lists.newArrayList(ServerName.valueOf("foo", 0, 0))); // this server does not exists in clusterStatus +// +// BaseLoadBalancer.Cluster cluster = new Cluster(clusterState, null, locationFinder, null); +// +// int r0 = ArrayUtils.indexOf(cluster.regions, regions.get(0)); // this is ok, it is just a test +// int r1 = ArrayUtils.indexOf(cluster.regions, regions.get(1)); +// int r10 = ArrayUtils.indexOf(cluster.regions, regions.get(10)); +// int r42 = ArrayUtils.indexOf(cluster.regions, regions.get(42)); +// int r43 = ArrayUtils.indexOf(cluster.regions, regions.get(43)); +// +// int s0 = cluster.serversToIndex.get(servers.get(0).getHostAndPort()); +// int s1 = cluster.serversToIndex.get(servers.get(1).getHostAndPort()); +// int s4 = cluster.serversToIndex.get(servers.get(4).getHostAndPort()); +// int s5 = cluster.serversToIndex.get(servers.get(5).getHostAndPort()); +// int s9 = cluster.serversToIndex.get(servers.get(9).getHostAndPort()); +// +// // region 0 locations +// assertEquals(1, cluster.regionLocations[r0].length); +// assertEquals(s0, cluster.regionLocations[r0][0]); +// +// // region 1 locations +// assertEquals(2, cluster.regionLocations[r1].length); +// assertEquals(s0, cluster.regionLocations[r1][0]); +// assertEquals(s1, cluster.regionLocations[r1][1]); +// +// // region 10 locations +// assertEquals(0, cluster.regionLocations[r10].length); +// +// // region 42 locations +// assertEquals(3, cluster.regionLocations[r42].length); +// assertEquals(s4, cluster.regionLocations[r42][0]); +// assertEquals(s9, cluster.regionLocations[r42][1]); +// assertEquals(s5, cluster.regionLocations[r42][2]); +// +// // region 43 locations +// assertEquals(1, cluster.regionLocations[r43].length); +// assertEquals(-1, cluster.regionLocations[r43][0]); +// } +//} \ No newline at end of file -- 2.3.0