diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index aad2916..1d2e34f 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -525,7 +525,7 @@ private Resource computeMaxAMResource() {
* @param amResource resources required to run the AM
* @return true if this queue can run
*/
- public boolean canRunAppAM(Resource amResource) {
+ boolean canRunAppAM(Resource amResource) {
if (Math.abs(maxAMShare - -1.0f) < 0.0001) {
return true;
}
@@ -533,7 +533,8 @@ public boolean canRunAppAM(Resource amResource) {
Resource maxAMResource = computeMaxAMResource();
getMetrics().setMaxAMShare(maxAMResource);
Resource ifRunAMResource = Resources.add(amResourceUsage, amResource);
- return Resources.fitsIn(ifRunAMResource, maxAMResource);
+ return policy.getResourceCalculator().fitsIn(
+ scheduler.getClusterResource(), ifRunAMResource, maxAMResource);
}
void addAMResourceUsage(Resource amResource) {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 31dd7fe..857898a 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -1482,6 +1482,70 @@ public void testRequestAMResourceInZeroFairShareQueue() throws Exception {
1, app3.getLiveContainers().size());
}
+ @Test
+ public void testAMRunnableUnderDifferentPolicies() throws Exception {
+ conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+
+ PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
+ out.println("");
+ out.println("");
+ out.println("");
+ out.println("0.5");
+ out.println("");
+ out.println("fair" +
+ "");
+ out.println("");
+ out.close();
+
+ scheduler.init(conf);
+ scheduler.start();
+ scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+ RMNode node = MockNodes
+ .newNodeInfo(1, Resources.createResource(8 * GB, 1), 0, "127.0.0.1");
+ NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
+ NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);
+ scheduler.handle(nodeEvent);
+ scheduler.update();
+
+ // An AM which needs 1G memory, 2 vcores can run even if the cluster only
+ // has 1 vcore because fair policy ignores vcores.
+ Resource amResource = Resource.newInstance(GB, 2);
+ int amPriority = RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
+ ApplicationAttemptId attId1 = createAppAttemptId(1, 1);
+ createApplicationWithAMResource(attId1, "queue1", "user1", amResource);
+ createSchedulingRequestExistingApplication(GB, 1, amPriority, attId1);
+ FSAppAttempt app1 = scheduler.getSchedulerApp(attId1);
+ scheduler.update();
+ scheduler.handle(updateEvent);
+ assertEquals("Application 1 should be running",
+ 1, app1.getLiveContainers().size());
+
+ // The AM with the same resource request under DRF policy can't get running.
+ out = new PrintWriter(new FileWriter(ALLOC_FILE));
+ out.println("");
+ out.println("");
+ out.println("");
+ out.println("0.5");
+ out.println("");
+ out.println("drf" +
+ "");
+ out.println("");
+ out.close();
+
+ scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+ ApplicationAttemptId attId2 = createAppAttemptId(2, 1);
+ createApplicationWithAMResource(attId2, "queue1", "user1", amResource);
+ createSchedulingRequestExistingApplication(GB, 1, amPriority, attId2);
+ FSAppAttempt app2 = scheduler.getSchedulerApp(attId2);
+ scheduler.update();
+ scheduler.handle(updateEvent);
+ assertEquals("Application 2 shouldn't be running because there are not "
+ + "enough vcores in the cluster",
+ 0, app2.getLiveContainers().size());
+ }
+
@Test (timeout = 500000)
public void testContainerReservationNotExceedingQueueMax() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);