diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java index 6375c4afd9c..f5c4058beb0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java @@ -145,4 +145,9 @@ public Resource normalizeDown(Resource r, Resource stepFactor) { return Resources.createResource( roundDown((r.getMemorySize()), stepFactor.getMemorySize())); } + + @Override + public boolean isAnyMajorResourceZeroOrNegative(Resource resource) { + return resource.getMemorySize() <= 0f; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java index 6fed23b4c2a..06ce1e09a67 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java @@ -613,4 +613,17 @@ public Resource normalizeDown(Resource r, Resource stepFactor) { } return ret; } + + @Override + public boolean isAnyMajorResourceZeroOrNegative(Resource resource) { + int maxLength = ResourceUtils.getNumberOfKnownResourceTypes(); + for (int i = 0; i < maxLength; i++) { + ResourceInformation resourceInformation = resource.getResourceInformation( + i); + if (resourceInformation.getValue() <= 0L) { + return true; + } + } + return false; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java index 1c42126019e..0d901d34498 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java @@ -247,6 +247,15 @@ public abstract float divide( public abstract boolean isAnyMajorResourceZero(Resource resource); /** + * Check if resource has any major resource types (which are all NodeManagers + * included) a zero value or negative value. + * + * @param resource resource + * @return returns true if any resource is zero. + */ + public abstract boolean isAnyMajorResourceZeroOrNegative(Resource resource); + + /** * Get resource rand normalize down using step-factor * stepFactor. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java index 1c08844cf33..c99548dd8ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java @@ -552,6 +552,11 @@ public static boolean isAnyMajorResourceZero(ResourceCalculator rc, return rc.isAnyMajorResourceZero(resource); } + public static boolean isAnyMajorResourceZeroOrNegative(ResourceCalculator rc, + Resource resource) { + return rc.isAnyMajorResourceZeroOrNegative(resource); + } + public static Resource normalizeDown(ResourceCalculator calculator, Resource resource, Resource factor) { return calculator.normalizeDown(resource, factor); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java index 258997083db..4012ba596ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java @@ -24,6 +24,8 @@ import java.util.Iterator; import java.util.PriorityQueue; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy.PriorityUtilizationQueueOrderingPolicy; @@ -40,7 +42,10 @@ protected final CapacitySchedulerPreemptionContext context; protected final ResourceCalculator rc; - private boolean isReservedPreemptionCandidatesSelector; + protected boolean isReservedPreemptionCandidatesSelector; + + private static final Log LOG = + LogFactory.getLog(AbstractPreemptableResourceCalculator.class); static class TQComparator implements Comparator { private ResourceCalculator rc; @@ -122,23 +127,24 @@ protected void computeFixpointAllocation(Resource totGuarant, TQComparator tqComparator = new TQComparator(rc, totGuarant); PriorityQueue orderedByNeed = new PriorityQueue<>(10, tqComparator); - for (Iterator i = qAlloc.iterator(); i.hasNext();) { + for (Iterator i = qAlloc.iterator(); i.hasNext(); ) { TempQueuePerPartition q = i.next(); Resource used = q.getUsed(); Resource initIdealAssigned; if (Resources.greaterThan(rc, totGuarant, used, q.getGuaranteed())) { - initIdealAssigned = - Resources.add(q.getGuaranteed(), q.untouchableExtra); - } else { + initIdealAssigned = Resources.add( + Resources.componentwiseMin(q.getGuaranteed(), q.getUsed()), + q.untouchableExtra); + } else{ initIdealAssigned = Resources.clone(used); } // perform initial assignment initIdealAssignment(totGuarant, q, initIdealAssigned); - Resources.subtractFrom(unassigned, q.idealAssigned); + // If idealAssigned < (allocated + used + pending), q needs more // resources, so // add it to the list of underserved queues, ordered by need. @@ -148,11 +154,15 @@ protected void computeFixpointAllocation(Resource totGuarant, } } + Resource stepFactor = Resource.newInstance(0, 0); + for (ResourceInformation ri : stepFactor.getResources()) { + ri.setValue(1); + } + // assign all cluster resources until no more demand, or no resources are // left while (!orderedByNeed.isEmpty() && Resources.greaterThan(rc, totGuarant, unassigned, Resources.none())) { - Resource wQassigned = Resource.newInstance(0, 0); // we compute normalizedGuarantees capacity based on currently active // queues resetCapacity(unassigned, orderedByNeed, ignoreGuarantee); @@ -166,11 +176,27 @@ protected void computeFixpointAllocation(Resource totGuarant, Collection underserved = getMostUnderservedQueues( orderedByNeed, tqComparator); + // This value will be used in every round to calculate ideal allocation. + // So make a copy to avoid it changed during calculation. + Resource dupUnassignedForTheRound = Resources.clone(unassigned); + for (Iterator i = underserved.iterator(); i .hasNext();) { + if (Resources.lessThanOrEqual(rc, totGuarant, unassigned, + Resources.none())) { + break; + } + TempQueuePerPartition sub = i.next(); - Resource wQavail = Resources.multiplyAndNormalizeUp(rc, unassigned, - sub.normalizedGuarantee, Resource.newInstance(1, 1)); + + // How much resource we offer to the queue (to increase its ideal_alloc + Resource wQavail = Resources.multiplyAndNormalizeUp(rc, + dupUnassignedForTheRound, + sub.normalizedGuarantee, stepFactor); + + // Make sure it is not beyond unassigned + wQavail = Resources.componentwiseMin(wQavail, unassigned); + Resource wQidle = sub.offer(wQavail, rc, totGuarant, isReservedPreemptionCandidatesSelector); Resource wQdone = Resources.subtract(wQavail, wQidle); @@ -180,9 +206,12 @@ protected void computeFixpointAllocation(Resource totGuarant, // queue, recalculating its order based on need. orderedByNeed.add(sub); } - Resources.addTo(wQassigned, wQdone); + + Resources.subtractFrom(unassigned, wQdone); + + // Make sure unassigned is always larger than 0 + unassigned = Resources.componentwiseMax(unassigned, Resources.none()); } - Resources.subtractFrom(unassigned, wQassigned); } // Sometimes its possible that, all queues are properly served. So intra diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java index f097e9c6291..a5765a87e6e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java @@ -21,6 +21,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; @@ -132,6 +133,16 @@ private static void deductPreemptableResourcePerApp( * map to hold preempted containers * @param totalPreemptionAllowed * total preemption allowed per round + * @param conservativeDRF + * should we do conservativeDRF preemption or not. + * When true: + * stop preempt container when any resource type < 0 for to- + * preempt. + * This is default preemption behavior of intra-queue preemption + * When false: + * stop preempt container when: all resource type <= 0 for to- + * preempt. + * This is default preemption behavior of inter-queue preemption * @return should we preempt rmContainer. If we should, deduct from * resourceToObtainByPartition */ @@ -140,7 +151,7 @@ public static boolean tryPreemptContainerAndDeductResToObtain( Map resourceToObtainByPartitions, RMContainer rmContainer, Resource clusterResource, Map> preemptMap, - Resource totalPreemptionAllowed) { + Resource totalPreemptionAllowed, boolean conservativeDRF) { ApplicationAttemptId attemptId = rmContainer.getApplicationAttemptId(); // We will not account resource of a container twice or more @@ -152,13 +163,53 @@ public static boolean tryPreemptContainerAndDeductResToObtain( rmContainer.getAllocatedNode()); Resource toObtainByPartition = resourceToObtainByPartitions .get(nodePartition); + if (null == toObtainByPartition) { + return false; + } - if (null != toObtainByPartition - && Resources.greaterThan(rc, clusterResource, toObtainByPartition, + // If a toObtain resource type == 0, set it to -1 to avoid 0 resource + // type affect following doPreemption check: isAnyMajorResourceZero + for (ResourceInformation ri : toObtainByPartition.getResources()) { + if (ri.getValue() == 0) { + ri.setValue(-1); + } + } + + if (Resources.greaterThan(rc, clusterResource, toObtainByPartition, Resources.none()) && Resources.fitsIn(rc, rmContainer.getAllocatedResource(), - totalPreemptionAllowed) - && !Resources.isAnyMajorResourceZero(rc, toObtainByPartition)) { + totalPreemptionAllowed)){ + + boolean doPreempt; + + // How much resource left after preemption happen. + Resource toObtainAfterPreemption = Resources.subtract(toObtainByPartition, + rmContainer.getAllocatedResource()); + + if (conservativeDRF) { + doPreempt = !Resources.isAnyMajorResourceZeroOrNegative(rc, + toObtainByPartition); + } else { + // When we want to do more aggressive preemption, we will do preemption + // only if: + // - The preempt of the container makes positive contribution to the + // to-obtain resource. Positive contribution means any positive + // resource type decreases. + // + // This is example of positive contribution: + // * before: <30, 10, 5>, after <20, 10, -10> + // But this not positive contribution: + // * before: <30, 10, 0>, after <30, 10, -15> + doPreempt = Resources.lessThan(rc, clusterResource, + Resources + .componentwiseMax(toObtainAfterPreemption, Resources.none()), + Resources.componentwiseMax(toObtainByPartition, Resources.none())); + } + + if (!doPreempt) { + return false; + } + Resources.subtractFrom(toObtainByPartition, rmContainer.getAllocatedResource()); Resources.subtractFrom(totalPreemptionAllowed, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java index 748548a761e..3b2fcbb90d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java @@ -111,7 +111,7 @@ .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, selectedCandidates, - totalPreemptionAllowed); + totalPreemptionAllowed, false); if (!preempted) { continue; } @@ -187,7 +187,7 @@ private void preemptAMContainers(Resource clusterResource, boolean preempted = CapacitySchedulerPreemptionUtils .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, preemptMap, - totalPreemptionAllowed); + totalPreemptionAllowed, false); if (preempted) { Resources.subtractFrom(skippedAMSize, c.getAllocatedResource()); } @@ -221,7 +221,7 @@ private void preemptFrom(FiCaSchedulerApp app, // Try to preempt this container CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain( rc, preemptionContext, resToObtainByPartition, c, clusterResource, - selectedContainers, totalPreemptionAllowed); + selectedContainers, totalPreemptionAllowed, false); if (!preemptionContext.isObserveOnly()) { preemptionContext.getRMContext().getDispatcher().getEventHandler() @@ -264,7 +264,7 @@ private void preemptFrom(FiCaSchedulerApp app, // Try to preempt this container CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain( rc, preemptionContext, resToObtainByPartition, c, clusterResource, - selectedContainers, totalPreemptionAllowed); + selectedContainers, totalPreemptionAllowed, false); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java index 1776bd4d946..9f462a1c68c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java @@ -279,7 +279,7 @@ private void calculateToBePreemptedResourcePerApp(Resource clusterResource, // Once unallocated resource is 0, we can stop assigning ideal per app. if (Resources.lessThanOrEqual(rc, clusterResource, queueReassignableResource, Resources.none()) - || Resources.isAnyMajorResourceZero(rc, queueReassignableResource)) { + || Resources.isAnyMajorResourceZeroOrNegative(rc, queueReassignableResource)) { continue; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java index 5b6932e6403..a91fac7bd3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java @@ -230,7 +230,7 @@ private void preemptFromLeastStarvedApp(LeafQueue leafQueue, boolean ret = CapacitySchedulerPreemptionUtils .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext, resToObtainByPartition, c, clusterResource, selectedCandidates, - totalPreemptedResourceAllowed); + totalPreemptedResourceAllowed, true); // Subtract from respective user's resource usage once a container is // selected for preemption. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java index 676c14fc26a..6bd0649a73f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java @@ -40,9 +40,7 @@ AbstractPreemptableResourceCalculator { private static final Log LOG = LogFactory.getLog(PreemptableResourceCalculator.class); - - private boolean isReservedPreemptionCandidatesSelector; - + /** * PreemptableResourceCalculator constructor * @@ -95,8 +93,8 @@ protected void computeIdealResourceDistribution(ResourceCalculator rc, } // first compute the allocation as a fixpoint based on guaranteed capacity - computeFixpointAllocation(tot_guarant, nonZeroGuarQueues, unassigned, - false); + computeFixpointAllocation(tot_guarant, new HashSet<>(nonZeroGuarQueues), + unassigned, false); // if any capacity is left unassigned, distributed among zero-guarantee // queues uniformly (i.e., not based on guaranteed capacity, as this is zero) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java index 9d8297d5590..4214acc5524 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java @@ -151,7 +151,7 @@ Resource offer(Resource avail, ResourceCalculator rc, // # This is for leaf queue only. // max(guaranteed, used) - assigned} // remain = avail - accepted - Resource accepted = Resources.min(rc, clusterResource, + Resource accepted = Resources.componentwiseMin( absMaxCapIdealAssignedDelta, Resources.min(rc, clusterResource, avail, Resources /* @@ -186,6 +186,12 @@ Resource offer(Resource avail, ResourceCalculator rc, accepted = acceptedByLocality(rc, accepted); + // accept should never be < 0 + accepted = Resources.componentwiseMax(accepted, Resources.none()); + + // or more than offered + accepted = Resources.componentwiseMin(accepted, avail); + Resource remain = Resources.subtract(avail, accepted); Resources.addTo(idealAssigned, accepted); return remain; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java index a8e269779dc..a9725846f08 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java @@ -29,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -59,6 +60,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.mockito.ArgumentMatcher; @@ -104,10 +106,32 @@ EventHandler mDisp = null; ProportionalCapacityPreemptionPolicy policy = null; Resource clusterResource = null; + // Initialize resource map + Map riMap = new HashMap<>(); + + private void resetResourceInformationMap() { + // Initialize mandatory resources + ResourceInformation memory = ResourceInformation.newInstance( + ResourceInformation.MEMORY_MB.getName(), + ResourceInformation.MEMORY_MB.getUnits(), + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB); + ResourceInformation vcores = ResourceInformation.newInstance( + ResourceInformation.VCORES.getName(), + ResourceInformation.VCORES.getUnits(), + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); + riMap.put(ResourceInformation.MEMORY_URI, memory); + riMap.put(ResourceInformation.VCORES_URI, vcores); + + ResourceUtils.initializeResourcesFromResourceInformationMap(riMap); + } @SuppressWarnings("unchecked") @Before public void setup() { + resetResourceInformationMap(); + org.apache.log4j.Logger.getRootLogger().setLevel( org.apache.log4j.Level.DEBUG); @@ -142,6 +166,12 @@ public void setup() { partitionToResource = new HashMap<>(); nodeIdToSchedulerNodes = new HashMap<>(); nameToCSQueues = new HashMap<>(); + clusterResource = Resource.newInstance(0, 0); + } + + @After + public void cleanup() { + resetResourceInformationMap(); } public void buildEnv(String labelsConfig, String nodesConfig, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java index e9a8116f1e6..b693bdf6acb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java @@ -21,6 +21,9 @@ import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; +import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; +import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.junit.Before; import org.junit.Test; @@ -33,31 +36,13 @@ import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class TestPreemptionForQueueWithPriorities extends ProportionalCapacityPreemptionPolicyMockFramework { - // Initialize resource map - private Map riMap = new HashMap<>(); - @Before public void setup() { - - // Initialize mandatory resources - ResourceInformation memory = ResourceInformation.newInstance( - ResourceInformation.MEMORY_MB.getName(), - ResourceInformation.MEMORY_MB.getUnits(), - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB); - ResourceInformation vcores = ResourceInformation.newInstance( - ResourceInformation.VCORES.getName(), - ResourceInformation.VCORES.getUnits(), - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); - riMap.put(ResourceInformation.MEMORY_URI, memory); - riMap.put(ResourceInformation.VCORES_URI, vcores); - - ResourceUtils.initializeResourcesFromResourceInformationMap(riMap); - + rc = new DefaultResourceCalculator(); super.setup(); policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock); } @@ -340,8 +325,8 @@ public void testPriorityPreemptionForHierarchicalOfQueues() * - a2 (capacity=60), p=1 * - b (capacity=30), p=1 * - b1 (capacity=50), p=1 - * - b1 (capacity=50), p=2 - * - c (capacity=40), p=2 + * - b2 (capacity=50), p=2 + * - c (capacity=40), p=1 * */ String labelsConfig = "=100,true"; // default partition @@ -362,7 +347,7 @@ public void testPriorityPreemptionForHierarchicalOfQueues() "a2\t(1,1,n1,,20,false);" + // app2 in a2 "b1\t(1,1,n1,,30,false);" + // app3 in b1 "b2\t(1,1,n1,,29,false);" + // app4 in b2 - "c\t(1,1,n1,,29,false)"; // app5 in c + "c\t(1,1,n1,,1,false)"; // app5 in c buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); @@ -370,7 +355,7 @@ public void testPriorityPreemptionForHierarchicalOfQueues() // Preemption should first divide capacities between a / b, and b2 should // get less preemption than b1 (because b2 has higher priority) - verify(mDisp, times(5)).handle(argThat( + verify(mDisp, times(6)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); verify(mDisp, never()).handle(argThat( @@ -426,7 +411,7 @@ public void testPriorityPreemptionWithMandatoryResourceForHierarchicalOfQueues() // Preemption should first divide capacities between a / b, and b1 should // get less preemption than b2 (because b1 has higher priority) - verify(mDisp, never()).handle(argThat( + verify(mDisp, times(3)).handle(argThat( new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); verify(mDisp, never()).handle(argThat( @@ -505,4 +490,56 @@ public void testPriorityPreemptionWithMultipleResource() getAppAttemptId(3)))); } + @Test + public void test3ResourceTypesInterQueuePreemption() throws IOException { + rc = new DominantResourceCalculator(); + when(cs.getResourceCalculator()).thenReturn(rc); + + // Initialize resource map + String RESOURCE_1 = "res1"; + riMap.put(RESOURCE_1, ResourceInformation.newInstance(RESOURCE_1, "", 0, + ResourceTypes.COUNTABLE, 0, Integer.MAX_VALUE)); + + ResourceUtils.initializeResourcesFromResourceInformationMap(riMap); + + /** + * Queue structure is: + * + *
+     *              root
+     *           /  \  \
+     *          a    b  c
+     * 
+ * A / B / C have 33.3 / 33.3 / 33.4 resources + * Total cluster resource have mem=30, cpu=18, GPU=6 + * A uses mem=6, cpu=3, GPU=3 + * B uses mem=6, cpu=3, GPU=3 + * C is asking mem=1,cpu=1,GPU=1 + * + * We expect it can preempt from one of the jobs + */ + String labelsConfig = + "=30:18:6,true;"; + String nodesConfig = + "n1= res=30:18:6;"; // n1 is default partition + String queuesConfig = + // guaranteed,max,used,pending + "root(=[30:18:6 30:18:6 12:12:6 1:1:1]){priority=1};" + //root + "-a(=[10:6:2 10:6:2 6:6:3 0:0:0]){priority=1};" + // a + "-b(=[10:6:2 10:6:2 6:6:3 0:0:0]){priority=1};" + // b + "-c(=[10:6:2 10:6:2 0:0:0 1:1:1]){priority=2}"; // c + String appsConfig= + //queueName\t(priority,resource,host,expression,#repeat,reserved) + "a\t" // app1 in a1 + + "(1,2:2:1,n1,,3,false);" + + "b\t" // app2 in b2 + + "(1,2:2:1,n1,,3,false)"; + + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + policy.editSchedule(); + + verify(mDisp, times(1)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java index c8a1f0f70ce..14a3a9ad7d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyInterQueueWithDRF.java @@ -18,11 +18,16 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.junit.Before; import org.junit.Test; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import java.io.IOException; + import static org.mockito.Matchers.argThat; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -41,8 +46,7 @@ public void setup() { } @Test - public void testInterQueuePreemptionWithMultipleResource() - throws Exception { + public void testInterQueuePreemptionWithMultipleResource() throws Exception { /** * Queue structure is: * @@ -121,4 +125,52 @@ public void testInterQueuePreemptionWithNaturalTerminationFactor() new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( getAppAttemptId(1)))); } -} + + @Test + public void test3ResourceTypesInterQueuePreemption() throws IOException { + // Initialize resource map + String RESOURCE_1 = "res1"; + riMap.put(RESOURCE_1, ResourceInformation + .newInstance(RESOURCE_1, "", 0, ResourceTypes.COUNTABLE, 0, + Integer.MAX_VALUE)); + + ResourceUtils.initializeResourcesFromResourceInformationMap(riMap); + + /* + * root + * / \ \ + * a b c + * + * A / B / C have 33.3 / 33.3 / 33.4 resources + * Total cluster resource have mem=30, cpu=18, GPU=6 + * A uses mem=6, cpu=3, GPU=3 + * B uses mem=6, cpu=3, GPU=3 + * C is asking mem=1,cpu=1,GPU=1 + * + * We expect it can preempt from one of the jobs + */ + String labelsConfig = "=30:18:6,true;"; + String nodesConfig = "n1= res=30:18:6;"; // n1 is default partition + String queuesConfig = + // guaranteed,max,used,pending + "root(=[30:18:6 30:18:6 12:12:6 1:1:1]);" + //root + "-a(=[10:7:2 10:6:3 6:6:3 0:0:0]);" + // a + "-b(=[10:6:2 10:6:3 6:6:3 0:0:0]);" + // b + "-c(=[10:5:2 10:6:2 0:0:0 1:1:1])"; // c + String appsConfig = + //queueName\t(priority,resource,host,expression,#repeat,reserved) + "a\t" // app1 in a1 + + "(1,2:2:1,n1,,3,false);" + "b\t" // app2 in b2 + + "(1,2:2:1,n1,,3,false)"; + + buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig); + policy.editSchedule(); + + verify(mDisp, times(0)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(1)))); + verify(mDisp, times(1)).handle(argThat( + new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor( + getAppAttemptId(2)))); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java index a1d89d74dab..41cda0e5245 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueWithDRF.java @@ -20,6 +20,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; +import org.junit.After; import org.junit.Before; import org.junit.Test;