diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index 716e1ee..ccaf8af 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -200,4 +200,9 @@ protected boolean preemptContainerPreCheck() { return parent.getPolicy() .checkIfUsageOverFairShare(getResourceUsage(), getFairShare()); } + + /** Returns true if queue has atleast one app running */ + public boolean isActive() { + return this.getNumRunnableApps() > 0; + } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java index 1087c73..24825b5 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java @@ -25,7 +25,9 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessActiveOnlyPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairShareActiveOnlyPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; @@ -60,11 +62,14 @@ public static SchedulingPolicy getInstance(Class cla * Returns {@link SchedulingPolicy} instance corresponding to the * {@link SchedulingPolicy} passed as a string. The policy can be "fair" for * FairSharePolicy, "fifo" for FifoPolicy, or "drf" for - * DominantResourceFairnessPolicy. For a custom + * DominantResourceFairnessPolicy, "fair-active" for FairShareActiveOnlyPolicy + * and "drf-active" for DominantResourceFairnessActiveOnlyPolicy. For a custom * {@link SchedulingPolicy}s in the RM classpath, the policy should be * canonical class name of the {@link SchedulingPolicy}. * - * @param policy canonical class name or "drf" or "fair" or "fifo" + * @param policy + * canonical class name or "drf" or "fair" or "fifo" or "fair-active" + * or "drf-active" * @throws AllocationConfigurationException */ @SuppressWarnings("unchecked") @@ -79,6 +84,11 @@ public static SchedulingPolicy parse(String policy) clazz = FifoPolicy.class; } else if (text.equalsIgnoreCase(DominantResourceFairnessPolicy.NAME)) { clazz = DominantResourceFairnessPolicy.class; + } else if (text.equalsIgnoreCase(FairShareActiveOnlyPolicy.NAME)) { + clazz = FairShareActiveOnlyPolicy.class; + } else if (text + .equalsIgnoreCase(DominantResourceFairnessActiveOnlyPolicy.NAME)) { + clazz = DominantResourceFairnessActiveOnlyPolicy.class; } else { try { clazz = Class.forName(policy); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java index 77dad49..65ebff3 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java @@ -161,7 +161,7 @@ private static int getResourceValue(Resource resource, ResourceType type) { } } - private static void setResourceValue(int val, Resource resource, ResourceType type) { + static void setResourceValue(int val, Resource resource, ResourceType type) { switch (type) { case MEMORY: resource.setMemory(val); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessActiveOnlyPolicy.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessActiveOnlyPolicy.java new file mode 100644 index 0000000..157f32c --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessActiveOnlyPolicy.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies; + +import java.util.ArrayList; +import java.util.Collection; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AppSchedulable; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementRule.NestedUserQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementRule.User; + +/** + * Behavior of DominantResourceFairnessActiveOnlyPolicy is same as + * DominantResourceFairnessPolicy except in computing fair share for + * schedulables. DominantResourceFairnessActiveOnlyPolicy computes fair share + * only for active queues. This policy is useful for converging towards fairness + * more quickly in scenarios where there are many queues under a parent and only + * a subset of them are active at a point in time. {@link NestedUserQueue}, + * {@link User} are some use cases where this policy could be useful. + */ +@Private +@Unstable +public class DominantResourceFairnessActiveOnlyPolicy extends + DominantResourceFairnessPolicy { + public static final String NAME = "drf-active"; + + @Override + public void computeShares(Collection schedulables, + Resource totalResources) { + Collection activeSchedulables = new ArrayList(); + for (Schedulable sched : schedulables) { + if (sched instanceof AppSchedulable + || (sched instanceof FSQueue && ((FSQueue) sched).isActive())) { + activeSchedulables.add(sched); + } else { + for (ResourceType type : ResourceType.values()) { + ComputeFairShares.setResourceValue(0, sched.getFairShare(), type); + } + } + } + + for (ResourceType type : ResourceType.values()) { + ComputeFairShares.computeShares(activeSchedulables, totalResources, type); + } + } +} \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairShareActiveOnlyPolicy.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairShareActiveOnlyPolicy.java new file mode 100644 index 0000000..ac867fd --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairShareActiveOnlyPolicy.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies; + +import java.util.ArrayList; +import java.util.Collection; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AppSchedulable; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementRule.NestedUserQueue; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementRule.User; + +/** + * Behavior of FairShareActiveOnlyPolicy is same as FairSharePolicy except in + * computing fair share for schedulables. FairShareActiveOnlyPolicy computes + * fair share only for active queues. This policy is useful for converging + * towards fairness more quickly in scenarios where there are many queues under + * a parent and only a subset of them are active at a point in time. + * {@link NestedUserQueue}, {@link User} are some use cases where this policy + * could be useful. + */ +@Private +@Unstable +public class FairShareActiveOnlyPolicy extends FairSharePolicy { + public static final String NAME = "fair-active"; + + @Override + public void computeShares(Collection schedulables, + Resource totalResources) { + Collection activeSchedulables = new ArrayList(); + for (Schedulable sched : schedulables) { + if (sched instanceof AppSchedulable + || (sched instanceof FSQueue && ((FSQueue) sched).isActive())) { + activeSchedulables.add(sched); + } else { + ComputeFairShares.setResourceValue(0, sched.getFairShare(), + ResourceType.MEMORY); + } + } + ComputeFairShares.computeShares(activeSchedulables, totalResources, + ResourceType.MEMORY); + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index a54387a..3c5d117 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -3077,4 +3077,274 @@ public void testMoveToNonexistentQueue() throws Exception { createSchedulingRequest(1024, 1, "queue1", "user1", 3); scheduler.moveApplication(appAttId.getApplicationId(), "queue2"); } + + private void setupCluster(int nodeMem) throws IOException { + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println(" 8"); + out.println(" fair-active"); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(""); + out.println(""); + out.println(" 1"); + out.println(" fair"); + out.println(" "); + out.println(" "); + out.println(""); + out.println(""); + out.close(); + + scheduler.init(conf); + scheduler.start(); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(nodeMem), + 1, "127.0.0.1"); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + } + + @Test + public void testFairShareActiveOnlyNoAppsRunning() throws IOException { + int nodeCapacity = 16 * 1024; + setupCluster(nodeCapacity); + + scheduler.update(); + // No apps are running in the cluster,verify if fair share is zero for all + // queues under parentA(which uses fair-active policy) and 5% for + // queues under parentB(which uses fair policy). + Collection leafQueues = scheduler.getQueueManager() + .getLeafQueues(); + + for (FSLeafQueue leaf : leafQueues) { + if (leaf.getName().startsWith("root.parentA")) { + assertEquals(0, (double) leaf.getFairShare().getMemory() / nodeCapacity + * 100, 0); + } else if (leaf.getName().startsWith("root.parentB")) { + assertEquals(5, (double) leaf.getFairShare().getMemory() / nodeCapacity + * 100, 0.1); + } + } + } + + @Test + public void testFairShareActiveOnlyOneAppRunning() throws IOException { + int nodeCapacity = 16 * 1024; + setupCluster(nodeCapacity); + + // Run a app in a childA1 and see if that queue's fair share is 80%, + // which is basically all of parentA's fair share + createSchedulingRequest(2 * 1024, "root.parentA.childA1", "user1"); + + scheduler.update(); + assertEquals( + 80, + (double) scheduler.getQueueManager() + .getLeafQueue("root.parentA.childA1", false).getFairShare() + .getMemory() + / nodeCapacity * 100, 0.1); + } + + @Test + public void testFairShareActiveOnlyMultipleActiveQueuesUnderSameParent() + throws IOException { + int nodeCapacity = 16 * 1024; + setupCluster(nodeCapacity); + + // Run apps in childA1,childA2,childA3 + createSchedulingRequest(2 * 1024, "root.parentA.childA1", "user1"); + createSchedulingRequest(2 * 1024, "root.parentA.childA2", "user2"); + createSchedulingRequest(2 * 1024, "root.parentA.childA3", "user3"); + + scheduler.update(); + + // Fair share of root.parentA would be 80% and each of the three + // active child queues would get 80/3 = 26%. + for (int i = 1; i <= 3; i++) { + assertEquals( + 26, + (double) scheduler.getQueueManager() + .getLeafQueue("root.parentA.childA" + i, false).getFairShare() + .getMemory() + / nodeCapacity * 100, .9); + } + } + + @Test + public void testFairShareActiveOnlyMultipleActiveQueuesUnderDifferentParent() + throws IOException { + int nodeCapacity = 16 * 1024; + setupCluster(nodeCapacity); + + // Run apps in childA1,childA2 which are under parentA + createSchedulingRequest(2 * 1024, "root.parentA.childA1", "user1"); + createSchedulingRequest(3 * 1024, "root.parentA.childA2", "user2"); + + // Run app in childB1 which is under parentB + createSchedulingRequest(1 * 1024, "root.parentB.childB1", "user3"); + + scheduler.update(); + + // The two active child queues under parentA would + // get 80/2=40% + for (int i = 1; i <= 2; i++) { + assertEquals( + 40, + (double) scheduler.getQueueManager() + .getLeafQueue("root.parentA.childA" + i, false).getFairShare() + .getMemory() + / nodeCapacity * 100, .9); + } + + // The child queue under parentB would get 5% + assertEquals( + 5, + (double) scheduler.getQueueManager() + .getLeafQueue("root.parentB.childB1", false).getFairShare() + .getMemory() + / nodeCapacity * 100, .9); + + } + + @Test + public void testFairShareActiveOnly_ShareResetsToZeroWhenAppsComplete() + throws IOException { + int nodeCapacity = 16 * 1024; + setupCluster(nodeCapacity); + + // Run apps in childA1,childA2 which are under parentA + ApplicationAttemptId app1 = createSchedulingRequest(2 * 1024, + "root.parentA.childA1", "user1"); + ApplicationAttemptId app2 = createSchedulingRequest(3 * 1024, + "root.parentA.childA2", "user2"); + + scheduler.update(); + + // Verify if both the active queues under parentA get 40% fair + // share + for (int i = 1; i <= 2; i++) { + assertEquals( + 40, + (double) scheduler.getQueueManager() + .getLeafQueue("root.parentA.childA" + i, false).getFairShare() + .getMemory() + / nodeCapacity * 100, .9); + } + // Let app under childA1 complete. This should cause the fair share of queue + // childA1 to be reset to zero,since the queue has no apps running. + // Queue childA2's fair share would increase to 80% since its the only + // active queue. + AppAttemptRemovedSchedulerEvent appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent( + app1, RMAppAttemptState.FINISHED, false); + + scheduler.handle(appRemovedEvent1); + scheduler.update(); + + assertEquals( + 0, + (double) scheduler.getQueueManager() + .getLeafQueue("root.parentA.childA1", false).getFairShare() + .getMemory() + / nodeCapacity * 100, 0); + assertEquals( + 80, + (double) scheduler.getQueueManager() + .getLeafQueue("root.parentA.childA2", false).getFairShare() + .getMemory() + / nodeCapacity * 100, 0.1); + } + + private void setupCluster(int mem, int vCores) throws IOException { + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println(" drf"); + out.println(" "); + out.println(" 8"); + out.println(" drf-active"); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(" 1"); + out.println(" fair"); + out.println(" "); + out.println(" "); + out.println(" "); + out.println(""); + + out.println(""); + out.close(); + + scheduler.init(conf); + scheduler.start(); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + RMNode node1 = MockNodes.newNodeInfo(1, + Resources.createResource(mem, vCores), 1, "127.0.0.1"); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + } + + @Test + public void testDRFActiveOnlyMultipleActiveQueuesUnderDifferentParent() + throws IOException { + int nodeMem = 16 * 1024; + int nodeVcores = 10; + setupCluster(nodeMem, nodeVcores); + + // Run apps in childA1,childA2 which are under parentA + createSchedulingRequest(2 * 1024, "root.parentA.childA1", "user1"); + createSchedulingRequest(3 * 1024, "root.parentA.childA2", "user2"); + + // Run app in childB1 which is under parentB + createSchedulingRequest(1 * 1024, "root.parentB.childB1", "user3"); + + scheduler.update(); + + // The two active child queues under parentA would + // get 80/2=40% memory and vcores + for (int i = 1; i <= 2; i++) { + assertEquals( + 40, + (double) scheduler.getQueueManager() + .getLeafQueue("root.parentA.childA" + i, false).getFairShare() + .getMemory() + / nodeMem * 100, .9); + assertEquals( + 40, + (double) scheduler.getQueueManager() + .getLeafQueue("root.parentA.childA" + i, false).getFairShare() + .getVirtualCores() + / nodeVcores * 100, .9); + } + + // The child queue under parentB would get 5% memory + assertEquals( + 5, + (double) scheduler.getQueueManager() + .getLeafQueue("root.parentB.childB1", false).getFairShare() + .getMemory() + / nodeMem * 100, .9); + } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm index b9cda2c..d925512 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm @@ -92,7 +92,8 @@ Hadoop MapReduce Next Generation - Fair Scheduler each queue to allow sharing the queue's resources in any which way the user wants. A custom policy can be built by extending <<>>. - FifoPolicy, FairSharePolicy (default), and DominantResourceFairnessPolicy are + FifoPolicy, FairSharePolicy (default), DominantResourceFairnessPolicy, + FairShareActiveOnlyPolicy and DominantResourceFairnessActiveOnlyPolicy are built-in and can be readily used. Certain add-ons are not yet supported which existed in the original (MR1) @@ -248,11 +249,16 @@ Allocation file format as many resources as a queue with the default weight. * schedulingPolicy: to set the scheduling policy of any queue. The allowed - values are "fifo"/"fair"/"drf" or any class that extends + values are "fifo"/"fair"/"drf”/“fair-active”/“drf-active” or any class that extends <<>>. Defaults to "fair". If "fifo", apps with earlier submit times are given preference for containers, but apps submitted later may run concurrently if there is leftover space on the cluster after satisfying the earlier app's requests. + Use “fair-active”,”drf-active” for queues which have many child queues underneath + it and only a subset of them are active at a point in time. It helps in child queues + converge towards fairness more quickly through preemption and computes fair share + only for child queues which have running apps.Useful while using nestedUserQueue + queue placement policy. * aclSubmitApps: a list of users and/or groups that can submit apps to the queue. Refer to the ACLs section below for more info on the format of this @@ -352,6 +358,7 @@ Allocation file format user queues under it -—> 3.0 + fair-active