diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/CustomResourceMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/CustomResourceMetrics.java index 926f8520604..390389a5fee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/CustomResourceMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/metrics/CustomResourceMetrics.java @@ -99,6 +99,36 @@ public void registerCustomResources(Map customResources, } } + /** + * As and when this metric object construction happens for any queue, all + * custom resource metrics value would be initialized with '0' like any other + * mandatory resources metrics. + * @param customResources Map containing all custom resource types + * @param registry of the metric type + * @param metricPrefix prefix in metric name + * @param metricDesc suffix for metric name + * @param partition for register. + */ + public void registerCustomResourcesWithPartiton(Map customResources, + MetricsRegistry registry, String metricPrefix, String metricDesc, String partition) { + for (Map.Entry entry : customResources.entrySet()) { + String resourceName = entry.getKey(); + Long resourceValue = entry.getValue(); + + MutableGaugeLong resourceMetric = + (MutableGaugeLong) registry.get(partition + "." + + metricPrefix + resourceName); + + if (resourceMetric == null) { + resourceMetric = registry.newGauge(partition + "." + + metricPrefix + resourceName, + "Partition " + partition + " " + + metricDesc.replace("NAME", resourceName), 0L); + } + resourceMetric.set(resourceValue); + } + } + public void setAvailable(Resource res) { available.set(res); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java index 586f837f147..8b50b2b8f27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java @@ -103,6 +103,30 @@ protected void registerCustomResources() { super.registerCustomResources(); } + protected void registerResourceWithPartition(String partition, + String metricsName, String metricDesc, long resourceValue) { + MutableGaugeLong resourceMetric = + (MutableGaugeLong) registry.get(partition + "." + metricsName); + + if (resourceMetric == null) { + resourceMetric = registry.newGauge(partition + "." + metricsName, + metricDesc.replace("NAME", metricsName), 0L); + } + resourceMetric.set(resourceValue); + } + + protected void registerCapacityWithPartition(String partition, + String metricsName, String metricDesc, float capValue) { + MutableGaugeFloat resourceMetric = + (MutableGaugeFloat) registry.get(partition + "." + metricsName); + + if (resourceMetric == null) { + resourceMetric = registry.newGauge(partition + "." + metricsName, + metricDesc.replace("NAME", metricsName), 0F); + } + resourceMetric.set(capValue); + } + public long getAMResourceLimitMB() { return AMResourceLimitMB.value(); } @@ -181,10 +205,30 @@ public long getGuaranteedMB() { return guaranteedMB.value(); } + public long getGuarenteedMBWithPartition(String partition) { + MutableGaugeLong res = (MutableGaugeLong )registry + .get(partition + "." + "GuaranteedMB"); + if (res != null) { + return res.value(); + } else { + return 0L; + } + } + public int getGuaranteedVCores() { return guaranteedVCores.value(); } + public long getGuaranteedVCoresWithPartition(String partition) { + MutableGaugeLong res = (MutableGaugeLong )registry + .get(partition + "." + "GuaranteedVCores"); + if (res != null) { + return res.value(); + } else { + return 0L; + } + } + public void setGuaranteedResources(String partition, Resource res) { if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { guaranteedMB.set(res.getMemorySize()); @@ -195,6 +239,19 @@ public void setGuaranteedResources(String partition, Resource res) { csQueueMetricsForCustomResources.getGuaranteedCapacity(), registry, GUARANTEED_CAPACITY_METRIC_PREFIX, GUARANTEED_CAPACITY_METRIC_DESC); } + } else { + registerResourceWithPartition(partition, "GuaranteedMB", + "Partition NAME Guaranteed memory in MB", res.getMemorySize()); + registerResourceWithPartition(partition, "GuaranteedVCores", + "Partition NAME Guaranteed CPU in virtual cores", res.getVirtualCores()); + if (csQueueMetricsForCustomResources != null) { + csQueueMetricsForCustomResources.setGuaranteedCapacity(res); + csQueueMetricsForCustomResources. + registerCustomResourcesWithPartiton(csQueueMetricsForCustomResources. + getGuaranteedCapacity(), registry, + GUARANTEED_CAPACITY_METRIC_PREFIX, + GUARANTEED_CAPACITY_METRIC_DESC, partition); + } } } @@ -206,6 +263,26 @@ public int getMaxCapacityVCores() { return maxCapacityVCores.value(); } + public long getMaxCapacityMBWithPartition(String partition) { + MutableGaugeLong res = (MutableGaugeLong )registry + .get(partition + "." + "MaxCapacityMB"); + if (res != null) { + return res.value(); + } else { + return 0L; + } + } + + public long getMaxCapacityVCoresWithPartition(String partition) { + MutableGaugeLong res = (MutableGaugeLong )registry + .get(partition + "." + "MaxCapacityVCores"); + if (res != null) { + return res.value(); + } else { + return 0L; + } + } + public void setMaxCapacityResources(String partition, Resource res) { if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { maxCapacityMB.set(res.getMemorySize()); @@ -216,6 +293,19 @@ public void setMaxCapacityResources(String partition, Resource res) { csQueueMetricsForCustomResources.getMaxCapacity(), registry, MAX_CAPACITY_METRIC_PREFIX, MAX_CAPACITY_METRIC_DESC); } + } else { + registerResourceWithPartition(partition, "MaxCapacityMB", + "Partition NAME Maximum memory in MB", res.getMemorySize()); + registerResourceWithPartition(partition, "MaxCapacityVCores", + "Partition NAME Maximum CPU in virtual cores", res.getVirtualCores()); + if (csQueueMetricsForCustomResources != null) { + csQueueMetricsForCustomResources.setMaxCapacity(res); + csQueueMetricsForCustomResources. + registerCustomResourcesWithPartiton(csQueueMetricsForCustomResources. + getMaxCapacity(), registry, + MAX_CAPACITY_METRIC_PREFIX, + MAX_CAPACITY_METRIC_DESC, partition); + } } } @@ -277,11 +367,40 @@ public float getGuaranteedAbsoluteCapacity() { return guaranteedAbsoluteCapacity.value(); } + public float + getGuaranteedCapacityWithPartition(String partition) { + MutableGaugeFloat res = (MutableGaugeFloat ) registry + .get(partition + "." + "GuaranteedCapacity"); + if (res != null) { + return res.value(); + } else { + return 0f; + } + } + + public float + getGuaranteedAbsoluteCapacityWithPartition(String partition) { + MutableGaugeFloat res = (MutableGaugeFloat ) registry + .get(partition + "." + "GuaranteedAbsoluteCapacity"); + if (res != null) { + return res.value(); + } else { + return 0f; + } + } + public void setGuaranteedCapacities(String partition, float capacity, - float absoluteCapacity) { + float absoluteCapacity) { if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { guaranteedCapacity.set(capacity); guaranteedAbsoluteCapacity.set(absoluteCapacity); + } else { + registerCapacityWithPartition(partition, "GuaranteedCapacity", + "Partition NAME Guaranteed " + + "capacity in percentage relative to parent", capacity); + registerCapacityWithPartition(partition, "GuaranteedAbsoluteCapacity", + "Partition NAME Guaranteed capacity " + + "in percentage relative to total partition", absoluteCapacity); } } @@ -293,11 +412,39 @@ public float getMaxAbsoluteCapacity() { return maxAbsoluteCapacity.value(); } + public float + getMaxCapacityWithPartition(String partition) { + MutableGaugeFloat res = (MutableGaugeFloat ) registry + .get(partition + "." + "MaxCapacity"); + if (res != null) { + return res.value(); + } else { + return 0f; + } + } + + public float + getMaxAbsoluteCapacityWithPartition(String partition) { + MutableGaugeFloat res = (MutableGaugeFloat ) registry + .get(partition + "." + "MaxAbsoluteCapacity"); + if (res != null) { + return res.value(); + } else { + return 0f; + } + } + public void setMaxCapacities(String partition, float capacity, float absoluteCapacity) { if (partition == null || partition.equals(RMNodeLabelsManager.NO_LABEL)) { maxCapacity.set(capacity); maxAbsoluteCapacity.set(absoluteCapacity); + } else { + registerCapacityWithPartition(partition, "MaxCapacity", + "Partition NAME Maximum capacity in percentage relative to parent", capacity); + registerCapacityWithPartition(partition, "MaxAbsoluteCapacity", + "Partition NAME Maximum capacity " + + "in percentage relative to total partition", absoluteCapacity); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java index a4034768387..4f6517f466f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java @@ -276,10 +276,10 @@ public static void updateConfiguredCapacityMetrics(ResourceCalculator rc, queue.getMetrics().setMaxCapacityResources(partition, rc.multiplyAndNormalizeDown( partitionResource, queue.getQueueCapacities().getAbsoluteMaximumCapacity(partition), queue.getMinimumAllocation())); - queue.getMetrics().setGuaranteedCapacities(partition, + queue.getMetrics().setGuaranteedCapacities(partition, queue.getQueueCapacities().getCapacity(partition), queue.getQueueCapacities().getAbsoluteCapacity(partition)); - queue.getMetrics().setMaxCapacities(partition, + queue.getMetrics().setMaxCapacities(partition, queue.getQueueCapacities().getMaximumCapacity(partition), queue.getQueueCapacities().getAbsoluteMaximumCapacity(partition)); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 89e7f838003..684dd2e0121 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -1949,10 +1949,15 @@ public void updateClusterResource(Resource clusterResource, // Update metrics CSQueueUtils.updateQueueStatistics(resourceCalculator, clusterResource, this, labelManager, null); - // Update configured capacity/max-capacity for default partition only - CSQueueUtils.updateConfiguredCapacityMetrics(resourceCalculator, - labelManager.getResourceByLabel(null, clusterResource), - RMNodeLabelsManager.NO_LABEL, this); + + Set configuredNodelabels = + csContext.getConfiguration().getConfiguredNodeLabels(getQueuePath()); + for (String label : configuredNodelabels) { + // Update configured capacity/max-capacity for default partition only + CSQueueUtils.updateConfiguredCapacityMetrics(resourceCalculator, + labelManager.getResourceByLabel(label, clusterResource), + label, this); + } // queue metrics are updated, more resource may be available // activate the pending applications if possible diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 3d289331413..29e9dd83cd5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -1262,10 +1262,13 @@ public void updateClusterResource(Resource clusterResource, CSQueueUtils.updateQueueStatistics(resourceCalculator, clusterResource, this, labelManager, null); - // Update configured capacity/max-capacity for default partition only - CSQueueUtils.updateConfiguredCapacityMetrics(resourceCalculator, - labelManager.getResourceByLabel(null, clusterResource), - RMNodeLabelsManager.NO_LABEL, this); + + for (String label : configuredNodelabels) { + // Update configured capacity/max-capacity for default partition only + CSQueueUtils.updateConfiguredCapacityMetrics(resourceCalculator, + labelManager.getResourceByLabel(label, clusterResource), + label, this); + } } catch (IOException e) { LOG.error("Fatal issue found: e", e); throw new YarnRuntimeException("Fatal issue during scheduling", e); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index d8dc4dc61a9..d3f786f8537 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -1050,6 +1050,12 @@ public void testParseQueueWithAbsoluteResource() { cs.init(conf); cs.start(); + RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1); + RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2); + + cs.handle(new NodeAddedSchedulerEvent(n1)); + cs.handle(new NodeAddedSchedulerEvent(n2)); + Resource rootQueueLableCapacity = cs.getQueue("root").getQueueResourceQuotas() .getConfiguredMinResource(labelName); @@ -1066,6 +1072,11 @@ public void testParseQueueWithAbsoluteResource() { childQueueQuotas.getConfiguredMinResource(labelName); assertEquals(4096, childQueueLabelCapacity.getMemorySize()); assertEquals(10, childQueueLabelCapacity.getVirtualCores()); + + + System.out.println(((CSQueueMetrics)cs.getQueue("root").getMetrics()).getGuaranteedMB()); + + System.out.println(((CSQueueMetrics)cs.getQueue("root").getMetrics()).getGuarenteedMBWithPartition(labelName)); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java index e21a60f3d7e..11782cc380f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java @@ -29,6 +29,7 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -1379,7 +1380,7 @@ public void testPreferenceOfQueuesTowardsNodePartitions() csConf.setAccessibleNodeLabels(A, toSet("x")); csConf.setCapacityByLabel(A, "x", 33); csConf.setQueues(A, new String[] {"a1", "a2"}); - + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; csConf.setCapacity(B, 33); csConf.setAccessibleNodeLabels(B, toSet("x")); @@ -1540,6 +1541,189 @@ public RMNodeLabelsManager createNodeLabelManager() { rm1.close(); } + + @Test + public void testCSQueueConfiguredMetricsWithNodePartitions() + throws Exception { + /** + * Test case: have a following queue structure: + * + *
+     *            root
+     *         /   |   \
+     *        a     b    c
+     *       / \   / \  /  \
+     *      a1 a2 b1 b2 c1 c2
+     *     (x)    (x)   (x)
+     * 
+ * + * Only a1, b1, c1 can access label=x, and their default label=x Each each + * has one application, asks for 5 containers. NM1 has label=x + * + */ + + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(this.conf); + + // Define top-level queues + csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"}); + csConf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "x", 100); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + csConf.setCapacity(A, 33); + csConf.setAccessibleNodeLabels(A, toSet("x")); + csConf.setCapacityByLabel(A, "x", 33); + csConf.setQueues(A, new String[] {"a1", "a2"}); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + csConf.setCapacity(B, 33); + csConf.setAccessibleNodeLabels(B, toSet("x")); + csConf.setCapacityByLabel(B, "x", 33); + csConf.setQueues(B, new String[] {"b1", "b2"}); + + final String C = CapacitySchedulerConfiguration.ROOT + ".c"; + csConf.setCapacity(C, 34); + csConf.setAccessibleNodeLabels(C, toSet("x")); + csConf.setCapacityByLabel(C, "x", 34); + csConf.setQueues(C, new String[] {"c1", "c2"}); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + csConf.setCapacity(A1, 50); + csConf.setCapacityByLabel(A1, "x", 100); + csConf.setDefaultNodeLabelExpression(A1, "x"); + + final String A2 = A + ".a2"; + csConf.setCapacity(A2, 50); + csConf.setCapacityByLabel(A2, "x", 0); + + final String B1 = B + ".b1"; + csConf.setCapacity(B1, 50); + csConf.setCapacityByLabel(B1, "x", 100); + csConf.setDefaultNodeLabelExpression(B1, "x"); + + final String B2 = B + ".b2"; + csConf.setCapacity(B2, 50); + csConf.setCapacityByLabel(B2, "x", 0); + + final String C1 = C + ".c1"; + csConf.setCapacity(C1, 50); + csConf.setCapacityByLabel(C1, "x", 100); + csConf.setDefaultNodeLabelExpression(C1, "x"); + + final String C2 = C + ".c2"; + csConf.setCapacity(C2, 50); + csConf.setCapacityByLabel(C2, "x", 0); + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of( + NodeLabel.newInstance("x", false), NodeLabel.newInstance("y"))); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"))); + + // inject node label manager + MockRM rm1 = new MockRM(csConf) { + @Override + public RMNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB, 100); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 100 * GB, 500); // label = + + + // NM1 do 15 heartbeats + CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); + RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); + for (int i = 0; i < 15; i++) { + cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); + } + + // Root check. + Assert.assertEquals(cs.getResourceCalculator().multiplyAndNormalizeDown( + mgr.getResourceByLabel("x", cs.getClusterResource()), + cs.getQueue(CapacitySchedulerConfiguration.ROOT). + getQueueCapacities().getAbsoluteCapacity("x"), + cs.getQueue(CapacitySchedulerConfiguration.ROOT). + getMinimumAllocation()).getMemorySize(), + ((CSQueueMetrics)cs.getQueue(CapacitySchedulerConfiguration.ROOT). + getMetrics()).getGuarenteedMBWithPartition("x")); + + // Memory check. + Assert.assertEquals(cs.getResourceCalculator().multiplyAndNormalizeDown( + mgr.getResourceByLabel("x", cs.getClusterResource()), + cs.getQueue(A). + getQueueCapacities().getAbsoluteCapacity("x"), + cs.getQueue(A). + getMinimumAllocation()).getMemorySize(), + ((CSQueueMetrics)cs.getQueue(A). + getMetrics()).getGuarenteedMBWithPartition("x")); + + // Cpu check. + Assert.assertEquals(cs.getResourceCalculator().multiplyAndNormalizeDown( + mgr.getResourceByLabel("x", cs.getClusterResource()), + cs.getQueue(B). + getQueueCapacities().getAbsoluteCapacity("x"), + cs.getQueue(B). + getMinimumAllocation()).getVirtualCores(), + ((CSQueueMetrics)cs.getQueue(B). + getMetrics()).getGuaranteedVCoresWithPartition("x")); + + // Capacity check. + Assert.assertEquals( + cs.getQueue(A1). + getQueueCapacities().getCapacity("x"), + ((CSQueueMetrics)cs.getQueue(A1). + getMetrics()). + getGuaranteedCapacityWithPartition("x"), 1e-6); + + // Max Absolute Capacity check. + Assert.assertEquals( + cs.getQueue(A1). + getQueueCapacities().getAbsoluteMaximumCapacity("x"), + ((CSQueueMetrics)cs.getQueue(A1). + getMetrics()). + getMaxAbsoluteCapacityWithPartition("x"), 1e-6); + + // Absolute Capacity check. + Assert.assertEquals( + cs.getQueue(A2). + getQueueCapacities().getAbsoluteCapacity("x"), + ((CSQueueMetrics)cs.getQueue(A2). + getMetrics()). + getGuaranteedAbsoluteCapacityWithPartition("x"), 1e-6); + + // Max Capacity check. + Assert.assertEquals( + cs.getQueue(A2). + getQueueCapacities().getMaximumCapacity("x"), + ((CSQueueMetrics)cs.getQueue(A2). + getMetrics()).getMaxCapacityWithPartition("x"), 1e-6); + + // Max resource memory check. + Assert.assertEquals(cs.getResourceCalculator().multiplyAndNormalizeDown( + mgr.getResourceByLabel("x", cs.getClusterResource()), + cs.getQueue(B1). + getQueueCapacities().getAbsoluteMaximumCapacity("x"), + cs.getQueue(B1). + getMinimumAllocation()).getMemorySize(), + ((CSQueueMetrics)cs.getQueue(B1). + getMetrics()).getMaxCapacityMBWithPartition("x")); + + // Max resource cpu check. + Assert.assertEquals(cs.getResourceCalculator().multiplyAndNormalizeDown( + mgr.getResourceByLabel("x", cs.getClusterResource()), + cs.getQueue(B2). + getQueueCapacities().getAbsoluteMaximumCapacity("x"), + cs.getQueue(B2). + getMinimumAllocation()).getVirtualCores(), + ((CSQueueMetrics)cs.getQueue(B2). + getMetrics()).getMaxCapacityVCoresWithPartition("x")); + + rm1.close(); + } @Test public void testQueuesWithoutAccessUsingPartitionedNodes() throws Exception {