diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index e1050da..349464e 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -123,13 +123,21 @@ public Priority getPriority() { public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) { QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class); queueInfo.setQueueName(getQueueName()); - // TODO: we might change these queue metrics around a little bit - // to match the semantics of the fair scheduler. - queueInfo.setCapacity((float) getFairShare().getMemory() / - scheduler.getClusterResource().getMemory()); - queueInfo.setCapacity((float) getResourceUsage().getMemory() / - scheduler.getClusterResource().getMemory()); - + + if (scheduler.getClusterResource().getMemory() == 0) { + queueInfo.setCapacity(0.0f); + } else { + queueInfo.setCapacity((float) getFairShare().getMemory() / + scheduler.getClusterResource().getMemory()); + } + + if (getFairShare().getMemory() == 0) { + queueInfo.setCurrentCapacity(0.0f); + } else { + queueInfo.setCurrentCapacity((float) getResourceUsage().getMemory() / + getFairShare().getMemory()); + } + ArrayList childQueueInfos = new ArrayList(); if (includeChildQueues) { Collection childQueues = getChildQueues(); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 61cbdc1..b2ef45a 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -61,6 +61,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; @@ -506,6 +507,66 @@ public void testFairShareWithNoneZeroWeightNoneZeroMinRes() } @Test + public void testQueueInfo() throws IOException { + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println(".25"); + out.println(""); + out.println(""); + out.println(".75"); + out.println(""); + out.println(""); + out.close(); + + scheduler.init(conf); + scheduler.start(); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + // Add one big node (only care about aggregate capacity) + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, + "127.0.0.1"); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + + // Queue A wants 1 * 1024. + createSchedulingRequest(1 * 1024, "queueA", "user1"); + // Queue B wants 6 * 1024 + createSchedulingRequest(6 * 1024, "queueB", "user1"); + + scheduler.update(); + + // Capacity should be the same as weight of Queue, + // because the sum of all active Queues' weight are 1. + // Before NodeUpdate Event, CurrentCapacity should be 0 + QueueInfo queueInfo = scheduler.getQueueInfo("queueA", false, false); + Assert.assertEquals(0.25f, queueInfo.getCapacity(), 0.0f); + Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f); + queueInfo = scheduler.getQueueInfo("queueB", false, false); + Assert.assertEquals(0.75f, queueInfo.getCapacity(), 0.0f); + Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f); + + // Each NodeUpdate Event will only assign one container. + // To assign two containers, call handle NodeUpdate Event twice. + NodeUpdateSchedulerEvent nodeEvent2 = new NodeUpdateSchedulerEvent(node1); + scheduler.handle(nodeEvent2); + scheduler.handle(nodeEvent2); + + // After NodeUpdate Event, CurrentCapacity for queueA should be 1/2=0.5 + // and CurrentCapacity for queueB should be 6/6=1. + queueInfo = scheduler.getQueueInfo("queueA", false, false); + Assert.assertEquals(0.25f, queueInfo.getCapacity(), 0.0f); + Assert.assertEquals(0.5f, queueInfo.getCurrentCapacity(), 0.0f); + queueInfo = scheduler.getQueueInfo("queueB", false, false); + Assert.assertEquals(0.75f, queueInfo.getCapacity(), 0.0f); + Assert.assertEquals(1.0f, queueInfo.getCurrentCapacity(), 0.0f); + } + + @Test public void testSimpleHierarchicalFairShareCalculation() throws IOException { scheduler.init(conf); scheduler.start();