diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 43ec390..ccb6411 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -108,6 +108,12 @@ public static final String USER_LIMIT_FACTOR = "user-limit-factor"; @Private + public static final String USER_WEIGHT = "weight"; + + @Private + public static final float DEFAULT_USER_WEIGHT = 1.0f; + + @Private public static final String STATE = "state"; @Private @@ -1392,4 +1398,30 @@ public void setPUOrderingPolicyUnderUtilizedPreemptionMoveReservation( QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY, UNDER_UTILIZED_PREEMPTION_MOVE_RESERVATION), allowMoveReservation); } + + /** + * Get the weight of a user. Used in computing user-specific user limit, + * relative to other users. + * @param queuePath full queue path + * @param user user name to check for a specific weight + * @return user-specific weight, if it exists. Otherwise, return 1.0f + */ + public float getUserWeight(String queuePath, String user) { + // Walk the queue's hierarchy from the bottom up. Look for a weight that is + // specific to user at each level and stop when the value is not the + // default. If none exists for this user at any level of the queue path, + // use the default. + float userWeight = DEFAULT_USER_WEIGHT; + int offset = queuePath.length(); + do { + String qp = queuePath.substring(0, offset); + String weightKey = + getQueuePrefix(qp) + "user-settings." + user + "." + USER_WEIGHT; + userWeight = getFloat(weightKey, DEFAULT_USER_WEIGHT); + if (userWeight != DEFAULT_USER_WEIGHT) { + return userWeight; + } + } while ((offset = queuePath.lastIndexOf(".", offset-1)) > -1); + return userWeight; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java index 76cb5d6..7bd2892 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java @@ -154,6 +154,7 @@ public void initializeQueues(CapacitySchedulerConfiguration conf) setQueueAcls(authorizer, appPriorityACLManager, queues); labelManager.reinitializeQueueLabels(getQueueToLabels()); this.queueStateManager.initialize(this); + updateUserWeights(conf, queues); LOG.info("Initialized root queue " + root); } @@ -183,6 +184,7 @@ public void reinitializeQueues(CapacitySchedulerConfiguration newConf) labelManager.reinitializeQueueLabels(getQueueToLabels()); this.queueStateManager.initialize(this); + updateUserWeights(newConf, queues); } /** @@ -407,4 +409,18 @@ public Priority getDefaultPriorityForQueue(String queueName) { getQueueStateManager() { return this.queueStateManager; } + /** + * Update weights for users currently in leaf queues. + * @param conf the CapacitySchedulerConfiguration + * @param queues the queues + */ + private static void updateUserWeights(CapacitySchedulerConfiguration conf, + Map queues) { + for (CSQueue queue : queues.values()) { + if (queue instanceof LeafQueue) { + LeafQueue lQueue = (LeafQueue) queue; + lQueue.getUsersManager().updateUserWeights(conf); + } + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 1b20556..7459bbc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -617,11 +617,21 @@ public Resource calculateAndGetAMResourceLimit() { @VisibleForTesting public Resource getUserAMResourceLimit() { - return getUserAMResourceLimitPerPartition(RMNodeLabelsManager.NO_LABEL); + return getUserAMResourceLimitPerPartition(RMNodeLabelsManager.NO_LABEL, + null); } public Resource getUserAMResourceLimitPerPartition( String nodePartition) { + return getUserAMResourceLimitPerPartition(nodePartition, null); + } + + public Resource getUserAMResourceLimitPerPartition( + String nodePartition, String userName) { + float userWeight = 1.0f; + if (userName != null && getUser(userName) != null) { + userWeight = getUser(userName).getUserWeight(); + } try { readLock.lock(); /* @@ -632,6 +642,7 @@ public Resource getUserAMResourceLimitPerPartition( */ float effectiveUserLimit = Math.max(usersManager.getUserLimit() / 100.0f, 1.0f / Math.max(getAbstractUsersManager().getNumActiveUsers(), 1)); + effectiveUserLimit = Math.min(effectiveUserLimit * userWeight, 1.0f); Resource queuePartitionResource = Resources .multiplyAndNormalizeUp(resourceCalculator, @@ -772,7 +783,8 @@ private void activateApplications() { // Verify whether we already calculated user-am-limit for this label. if (userAMLimit == null) { - userAMLimit = getUserAMResourceLimitPerPartition(partitionName); + userAMLimit = getUserAMResourceLimitPerPartition(partitionName, + application.getUser()); userAmPartitionLimit.put(partitionName, userAMLimit); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java index ff9d304..a1a8ecf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java @@ -37,11 +37,14 @@ protected ResourceInfo AMResourceUsed; protected ResourceInfo userResourceLimit; protected ResourcesInfo resources; + private float userWeight; + private boolean isActive; UserInfo() {} UserInfo(String username, Resource resUsed, int activeApps, int pendingApps, - Resource amResUsed, Resource resourceLimit, ResourceUsage resourceUsage) { + Resource amResUsed, Resource resourceLimit, ResourceUsage resourceUsage, + float weight, boolean isActive) { this.username = username; this.resourcesUsed = new ResourceInfo(resUsed); this.numActiveApplications = activeApps; @@ -49,6 +52,8 @@ this.AMResourceUsed = new ResourceInfo(amResUsed); this.userResourceLimit = new ResourceInfo(resourceLimit); this.resources = new ResourcesInfo(resourceUsage); + this.userWeight = weight; + this.isActive = isActive; } public String getUsername() { @@ -78,4 +83,12 @@ public ResourceInfo getUserResourceLimit() { public ResourcesInfo getResourceUsageInfo() { return resources; } + + public float getUserWeight() { + return userWeight; + } + + public boolean getIsActive() { + return isActive; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java index c2134eb..9cb9b53 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java @@ -159,6 +159,9 @@ private void setUsageRatio(String label, float ratio) { private UsageRatios userUsageRatios = new UsageRatios(); private WriteLock writeLock; + // User-specific weight for computing user-specific minimum user limit pct. + private float weight; + public User(String name) { ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); // Nobody uses read-lock now, will add it when necessary @@ -253,6 +256,14 @@ public Resource getUserResourceLimit() { public void setUserResourceLimit(Resource userResourceLimit) { this.userResourceLimit = userResourceLimit; } + + public void setUserWeight(float userWeight) { + weight = userWeight; + } + + public float getUserWeight() { + return weight; + } } /* End of User class */ /** @@ -409,6 +420,8 @@ public User getUserAndAddIfAbsent(String userName) { */ private void addUser(String userName, User user) { this.users.put(userName, user); + user.setUserWeight(scheduler.getConfiguration() + .getUserWeight(lQueue.getQueuePath(), userName)); } /** @@ -425,7 +438,8 @@ private void addUser(String userName, User user) { user.getActiveApplications(), user.getPendingApplications(), Resources.clone(user.getConsumedAMResources()), Resources.clone(user.getUserResourceLimit()), - user.getResourceUsage())); + user.getResourceUsage(), user.getUserWeight(), + activeUsersSet.contains(user.userName))); } return usersToReturn; } finally { @@ -471,13 +485,24 @@ public Resource getComputedResourceLimitForActiveUsers(String userName, writeLock.unlock(); } - if (LOG.isDebugEnabled()) { - LOG.debug("userLimit is fetched. userLimit = " - + userLimitPerSchedulingMode.get(schedulingMode) + ", schedulingMode=" - + schedulingMode + ", partition=" + nodePartition); + Resource userLimitResource = userLimitPerSchedulingMode.get(schedulingMode); + User user = getUser(userName); + float weight = (user == null) ? 1.0f : user.getUserWeight(); + Resource userSpecificUserLimit = + Resources.multiplyAndNormalizeUp(resourceCalculator, + userLimitResource, weight, lQueue.getMinimumAllocation()); + + if (user != null) { + user.setUserResourceLimit(userSpecificUserLimit); } - return userLimitPerSchedulingMode.get(schedulingMode); + if (LOG.isDebugEnabled()) { + LOG.debug("userLimit is fetched. userLimit=" + userLimitResource + + ", userSpecificUserLimit=" + userSpecificUserLimit + + ", schedulingMode=" + schedulingMode + + ", partition=" + nodePartition); + } + return userSpecificUserLimit; } /** @@ -518,13 +543,21 @@ public Resource getComputedResourceLimitForAllUsers(String userName, writeLock.unlock(); } + Resource userLimitResource = userLimitPerSchedulingMode.get(schedulingMode); + User user = getUser(userName); + float weight = (user == null) ? 1.0f : user.getUserWeight(); + Resource userSpecificUserLimit = + Resources.multiplyAndNormalizeUp(resourceCalculator, + userLimitResource, weight, lQueue.getMinimumAllocation()); + if (LOG.isDebugEnabled()) { - LOG.debug("userLimit is fetched. userLimit = " - + userLimitPerSchedulingMode.get(schedulingMode) + ", schedulingMode=" - + schedulingMode + ", partition=" + nodePartition); + LOG.debug("userLimit is fetched. userLimit=" + userLimitResource + + ", userSpecificUserLimit=" + userSpecificUserLimit + + ", schedulingMode=" + schedulingMode + + ", partition=" + nodePartition); } - return userLimitPerSchedulingMode.get(schedulingMode); + return userSpecificUserLimit; } /* @@ -647,16 +680,19 @@ private Resource computeUserLimit(String userName, Resource clusterResource, queueCapacity, required); /* - * We want to base the userLimit calculation on max(queueCapacity, - * usedResources+required). However, we want usedResources to be based on - * the combined ratios of all the users in the queue so we use consumedRatio - * to calculate such. The calculation is dependent on how the - * resourceCalculator calculates the ratio between two Resources. DRF - * Example: If usedResources is greater than queueCapacity and users have - * the following [mem,cpu] usages: User1: [10%,20%] - Dominant resource is - * 20% User2: [30%,10%] - Dominant resource is 30% Then total consumedRatio - * is then 20+30=50%. Yes, this value can be larger than 100% but for the - * purposes of making sure all users are getting their fair share, it works. + * We want to base the userLimit calculation on + * max(queueCapacity, usedResources+required). However, we want + * usedResources to be based on the combined ratios of all the users in the + * queue so we use consumedRatio to calculate such. + * The calculation is dependent on how the resourceCalculator calculates the + * ratio between two Resources. DRF Example: If usedResources is greater + * than queueCapacity and users have the following [mem,cpu] usages: + * + * User1: [10%,20%] - Dominant resource is 20% + * User2: [30%,10%] - Dominant resource is 30% + * Then total consumedRatio is then 20+30=50%. Yes, this value can be + * larger than 100% but for the purposes of making sure all users are + * getting their fair share, it works. */ Resource consumed = Resources.multiplyAndNormalizeUp(resourceCalculator, partitionResource, getUsageRatio(nodePartition), @@ -718,18 +754,26 @@ private Resource computeUserLimit(String userName, Resource clusterResource, lQueue.getMinimumAllocation()); if (LOG.isDebugEnabled()) { - LOG.debug("User limit computation for " + userName + " in queue " - + lQueue.getQueueName() + " userLimitPercent=" + lQueue.getUserLimit() - + " userLimitFactor=" + lQueue.getUserLimitFactor() + " required: " - + required + " consumed: " + consumed + " user-limit-resource: " - + userLimitResource + " queueCapacity: " + queueCapacity - + " qconsumed: " + lQueue.getQueueResourceUsage().getUsed() - + " currentCapacity: " + currentCapacity + " activeUsers: " - + usersCount + " clusterCapacity: " + clusterResource - + " resourceByLabel: " + partitionResource + " usageratio: " - + getUsageRatio(nodePartition) + " Partition: " + nodePartition); - } - getUser(userName).setUserResourceLimit(userLimitResource); + LOG.debug("User limit computation for " + userName + + ", in queue: " + lQueue.getQueueName() + + ", userLimitPercent=" + lQueue.getUserLimit() + + ", userLimitFactor=" + lQueue.getUserLimitFactor() + + ", required=" + required + + ", consumed=" + consumed + + ", user-limit-resource=" + userLimitResource + + ", queueCapacity=" + queueCapacity + + ", qconsumed=" + lQueue.getQueueResourceUsage().getUsed() + + ", currentCapacity=" + currentCapacity + + ", activeUsers=" + usersCount + + ", clusterCapacity=" + clusterResource + + ", resourceByLabel=" + partitionResource + + ", usageratio=" + getUsageRatio(nodePartition) + + ", Partition=" + nodePartition + + ", resourceUsed=" + resourceUsed + + ", maxUserLimit=" + maxUserLimit + + ", userWeight=" + getUser(userName).getUserWeight() + ); + } return userLimitResource; } @@ -981,4 +1025,18 @@ private void updateResourceUsagePerUser(User user, Resource resource, + totalResUsageForNonActiveUsers.getAllUsed()); } } -} + + void updateUserWeights(CapacitySchedulerConfiguration conf) { + try { + this.writeLock.lock(); + for (Map.Entry u : users.entrySet()) { + String userName = u.getKey(); + float weight = conf.getUserWeight(lQueue.getQueuePath(), userName); + u.getValue().setUserWeight(weight); + } + } finally { + this.writeLock.unlock(); + } + userLimitNeedsRecompute(); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index fea29bb..31fed84 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -880,8 +880,8 @@ protected void getPendingAppDiagnosticMessage( .append(queue.getAMResourceLimitPerPartition(appAMNodePartitionName)); diagnosticMessage.append("; "); diagnosticMessage.append("User AM Resource Limit of the queue = "); - diagnosticMessage.append( - queue.getUserAMResourceLimitPerPartition(appAMNodePartitionName)); + diagnosticMessage.append(queue.getUserAMResourceLimitPerPartition( + appAMNodePartitionName, getUser())); diagnosticMessage.append("; "); diagnosticMessage.append("Queue AM Resource Usage = "); diagnosticMessage.append( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java index b972428..292c5f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java @@ -68,6 +68,7 @@ "left:0%;background:none;border:1px dashed #BFBFBF"; static final String Q_OVER = "background:#FFA333"; static final String Q_UNDER = "background:#5BD75B"; + static final String ACTIVE_USER = "background:#FFFF00"; // Yellow highlight @RequestScoped static class CSQInfo { @@ -209,6 +210,7 @@ protected void render(Block html) { html.table("#userinfo").thead().$class("ui-widget-header").tr().th() .$class("ui-state-default")._("User Name")._().th() .$class("ui-state-default")._("Max Resource")._().th() + .$class("ui-state-default")._("Weight")._().th() .$class("ui-state-default")._("Used Resource")._().th() .$class("ui-state-default")._("Max AM Resource")._().th() .$class("ui-state-default")._("Used AM Resource")._().th() @@ -229,8 +231,11 @@ protected void render(Block html) { ResourceInfo amUsed = (resourceUsages.getAmUsed() == null) ? new ResourceInfo(Resources.none()) : resourceUsages.getAmUsed(); - tbody.tr().td(userInfo.getUsername()) + String highlightIfAsking = + userInfo.getIsActive() ? ACTIVE_USER : null; + tbody.tr().$style(highlightIfAsking).td(userInfo.getUsername()) .td(userInfo.getUserResourceLimit().toString()) + .td(String.valueOf(userInfo.getUserWeight())) .td(resourcesUsed.toString()) .td(resourceUsages.getAMLimit().toString()) .td(amUsed.toString()) @@ -399,6 +404,8 @@ public void render(Block html) { _("Used (over capacity)")._(). span().$class("qlegend ui-corner-all ui-state-default"). _("Max Capacity")._(). + span().$class("qlegend ui-corner-all").$style(ACTIVE_USER). + _("Users Requesting Resources")._(). _(); float used = 0; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 3fbbae3..1b8f969 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -954,7 +954,120 @@ public void testUserLimits() throws Exception { // app_0 doesn't have outstanding resources, there's only one active user. assertEquals("There should only be 1 active user!", 1, a.getAbstractUsersManager().getNumActiveUsers()); + } + + @Test + public void testUserSpecificUserLimits() throws Exception { + // Mock the queue + LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A)); + //unset maxCapacity + a.setMaxCapacity(1.0f); + + // Set minimum user-limit-percent + a.setUserLimit(50); + csConf.setUserLimit(a.getQueuePath(), 50); + // Set weight for "user_0" to be 1.5 for all queues. + csConf.setFloat("yarn.scheduler.capacity.root.user-settings.user_0." + + CapacitySchedulerConfiguration.USER_WEIGHT, 1.5f); + a.setUserLimitFactor(2); + + when(csContext.getClusterResource()) + .thenReturn(Resources.createResource(16 * GB, 32)); + + // Users + final String user_0 = "user_0"; + final String user_1 = "user_1"; + + // Submit applications + final ApplicationAttemptId appAttemptId_0 = + TestUtils.getMockApplicationAttemptId(0, 0); + FiCaSchedulerApp app_0 = + new FiCaSchedulerApp(appAttemptId_0, user_0, a, + a.getAbstractUsersManager(), spyRMContext); + a.submitApplicationAttempt(app_0, user_0); + + final ApplicationAttemptId appAttemptId_1 = + TestUtils.getMockApplicationAttemptId(1, 0); + FiCaSchedulerApp app_1 = + new FiCaSchedulerApp(appAttemptId_1, user_1, a, + a.getAbstractUsersManager(), spyRMContext); + a.submitApplicationAttempt(app_1, user_1); // different user + + // Setup some nodes + String host_0 = "127.0.0.1"; + FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); + String host_1 = "127.0.0.2"; + FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8*GB); + + final int numNodes = 2; + Resource clusterResource = + Resources.createResource(numNodes * (8*GB), numNodes * 16); + when(csContext.getNumClusterNodes()).thenReturn(numNodes); + + // Setup resource-requests + // app_0 asks for 3 3-GB containers + Priority priority = TestUtils.createMockPriority(1); + app_0.updateResourceRequests(Collections.singletonList( + TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 3, true, + priority, recordFactory))); + + // app_1 asks for 2 1-GB containers + app_1.updateResourceRequests(Collections.singletonList( + TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, true, + priority, recordFactory))); + + Map apps = ImmutableMap.of( + app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), + app_1); + Map nodes = ImmutableMap.of(node_0.getNodeID(), + node_0, node_1.getNodeID(), node_1); + + /** + * Start testing... + */ + + // There're two active users + assertEquals(2, a.getAbstractUsersManager().getNumActiveUsers()); + + // 1 container to user_0. Since queue starts out empty, user limit would + // normally be calculated to be the minumum container size (1024GB). + // However, in this case, user_0 has a weight of 1.5, so the UL is 2048GB + // because 1024 * 1.5 rounded up to container size is 2048GB. + applyCSAssignment(clusterResource, + a.assignContainers(clusterResource, node_0, + new ResourceLimits(clusterResource), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps); + assertEquals(4*GB, a.getUsedResources().getMemorySize()); + assertEquals(4*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + + // At this point the queue-wide user limit is 3072GB, but since user_0 has a + // weight of 1.5, its user limit is 5120GB. So, even though user_0 already + // has 4096GB, it is under its user limit, so it gets another container. + applyCSAssignment(clusterResource, + a.assignContainers(clusterResource, node_0, + new ResourceLimits(clusterResource), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps); + assertEquals(8*GB, a.getUsedResources().getMemorySize()); + assertEquals(8*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + + // Queue-wide user limit at this point is 4069GB and user_0's user limit is + // 6144GB. user_0 has 8192GB. + // Now that user_0 is above its user limit, the next container should go to user_1 + applyCSAssignment(clusterResource, + a.assignContainers(clusterResource, node_1, + new ResourceLimits(clusterResource), + SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps); + assertEquals(9*GB, a.getUsedResources().getMemorySize()); + assertEquals(8*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); + + assertEquals(4*GB, + app_0.getTotalPendingRequestsPerPartition().get("").getMemorySize()); + assertEquals(1*GB, + app_1.getTotalPendingRequestsPerPartition().get("").getMemorySize()); } @SuppressWarnings({ "unchecked", "rawtypes" }) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md index 737bdc2..5c464fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md @@ -124,6 +124,7 @@ Configuration | `yarn.scheduler.capacity..user-limit-factor` | The multiple of the queue capacity which can be configured to allow a single user to acquire more resources. By default this is set to 1 which ensures that a single user can never take more than the queue's configured capacity irrespective of how idle the cluster is. Value is specified as a float. | | `yarn.scheduler.capacity..maximum-allocation-mb` | The per queue maximum limit of memory to allocate to each container request at the Resource Manager. This setting overrides the cluster configuration `yarn.scheduler.maximum-allocation-mb`. This value must be smaller than or equal to the cluster maximum. | | `yarn.scheduler.capacity..maximum-allocation-vcores` | The per queue maximum limit of virtual cores to allocate to each container request at the Resource Manager. This setting overrides the cluster configuration `yarn.scheduler.maximum-allocation-vcores`. This value must be smaller than or equal to the cluster maximum. | +| `yarn.scheduler.capacity..user-settings..weight` | This floating point value is used when calculating the user limit resource values for users in a queue. This value will weight each user more or less than the other users in the queue. This value is inherited by subqueues of queue-path. | * Running and Pending Application Limits