diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 3f84a23..02adf6f 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -466,7 +466,53 @@ public static boolean isAclEnabled(Configuration conf) {
public static final String RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS =
RM_PREFIX + "rm.container-allocation.expiry-interval-ms";
public static final int DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = 600000;
-
+
+ /** If true, run the policy but do not affect the cluster with preemption and
+ * kill events. */
+ public static final String PREEMPTION_OBSERVE_ONLY =
+ RM_PREFIX + "monitor.capacity.preemption.observe_only";
+ public static final boolean DEFAULT_PREEMPTION_OBSERVE_ONLY = false;
+
+ /** Time in milliseconds between invocations of this policy. */
+ public static final String PREEMPTION_MONITORING_INTERVAL =
+ RM_PREFIX + "monitor.capacity.preemption.monitoring_interval";
+ public static final long DEFAULT_PREEMPTION_MONITORING_INTERVAL = 3000L;
+
+ /** Time in milliseconds between requesting a preemption from an application
+ * and killing the container. */
+ public static final String PREEMPTION_WAIT_TIME_BEFORE_KILL =
+ RM_PREFIX + "monitor.capacity.preemption.max_wait_before_kill";
+ public static final long DEFAULT_PREEMPTION_WAIT_TIME_BEFORE_KILL = 15000L;
+
+ /** Maximum percentage of resources preemptionCandidates in a single round.
+ * By controlling this value one can throttle the pace at which containers
+ * are reclaimed from the cluster. After computing the total desired
+ * preemption, the policy scales it back within this limit. */
+ public static final String TOTAL_PREEMPTION_PER_ROUND =
+ RM_PREFIX + "monitor.capacity.preemption.total_preemption_per_round";
+ public static final float DEFAULT_TOTAL_PREEMPTION_PER_ROUND = 0.1f;
+
+ /** Maximum amount of resources above the target capacity ignored for
+ * preemption. This defines a deadzone around the target capacity that helps
+ * prevent thrashing and oscillations around the computed target balance.
+ * High values would slow the time to capacity and (absent natural
+ * completions) it might prevent convergence to guaranteed capacity. */
+ public static final String PREEMPTION_MAX_IGNORED_OVER_CAPACITY =
+ RM_PREFIX + "monitor.capacity.preemption.max_ignored_over_capacity";
+ public static final float DEFAULT_PREEMPTION_MAX_IGNORED_OVER_CAPACITY =
+ 0.1f;
+ /**
+ * Given a computed preemption target, account for containers naturally
+ * expiring and preempt only this percentage of the delta. This determines
+ * the rate of geometric convergence into the deadzone ({@link
+ * #PREEMPTION_MAX_IGNORED_OVER_CAPACITY}). For example, a termination factor
+ * of 0.5 will reclaim almost 95% of resources within 5 * {@link
+ * #PREEMPTION_WAIT_TIME_BEFORE_KILL}, even absent natural termination. */
+ public static final String PREEMPTION_NATURAL_TERMINATION_FACTOR =
+ RM_PREFIX + "monitor.capacity.preemption.natural_termination_factor";
+ public static final float DEFAULT_PREEMPTION_NATURAL_TERMINATION_FACTOR =
+ 0.2f;
+
/** Path to file with nodes to include.*/
public static final String RM_NODES_INCLUDE_FILE_PATH =
RM_PREFIX + "nodes.include-path";
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 6c247b0..b05f2f2 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -924,6 +924,64 @@
600000
+
+
+ If true, run the policy but do not affect the cluster with preemption and kill events.
+
+ yarn.resourcemanager.monitor.capacity.preemption.observe_only
+ false
+
+
+
+
+ Time in milliseconds between invocations of this ProportionalCapacityPreemptionPolicy
+ policy.
+
+ yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval
+ 3000
+
+
+
+
+ Time in milliseconds between requesting a preemption from an application and killing
+ the container.
+
+ yarn.resourcemanager.monitor.capacity.preemption.max_wait_before_kill
+ 15000
+
+
+
+
+ Maximum percentage of resources preempted in a single round. By controlling this valueone
+ can throttle the pace at which containers are reclaimed from the cluster. After computing
+ the total desired preemption, the policy scales it back within this limit.
+
+ yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round
+ 0.1
+
+
+
+
+ Maximum amount of resources above the target capacity ignored for preemption.
+ This defines a deadzone around the target capacity that helps prevent thrashing and
+ oscillations around the computed target balance. High values would slow the time to capacity
+ and (absent natural.completions) it might prevent convergence to guaranteed capacity.
+
+ yarn.resourcemanager.monitor.capacity.preemption.max_ignored_over_capacity
+ 0.1
+
+
+
+
+ Given a computed preemption target, account for containers naturally expiring and preempt
+ only this percentage of the delta. This determines the rate of geometric convergence into
+ the deadzone (MAX_IGNORED_OVER_CAPACITY). For example, a termination factor of 0.5 will reclaim
+ almost 95% of resources within 5 * #WAIT_TIME_BEFORE_KILL, even absent natural termination.
+
+ yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor
+ 0.2
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 36383502..7d062e3 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy;
@@ -144,28 +145,28 @@ public void init(Configuration config, RMContext context,
CapacitySchedulerConfiguration csConfig = scheduler.getConfiguration();
maxIgnoredOverCapacity = csConfig.getDouble(
- CapacitySchedulerConfiguration.PREEMPTION_MAX_IGNORED_OVER_CAPACITY,
- CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_MAX_IGNORED_OVER_CAPACITY);
+ YarnConfiguration.PREEMPTION_MAX_IGNORED_OVER_CAPACITY,
+ YarnConfiguration.DEFAULT_PREEMPTION_MAX_IGNORED_OVER_CAPACITY);
naturalTerminationFactor = csConfig.getDouble(
- CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
- CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_NATURAL_TERMINATION_FACTOR);
+ YarnConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
+ YarnConfiguration.DEFAULT_PREEMPTION_NATURAL_TERMINATION_FACTOR);
maxWaitTime = csConfig.getLong(
- CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
- CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_WAIT_TIME_BEFORE_KILL);
+ YarnConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
+ YarnConfiguration.DEFAULT_PREEMPTION_WAIT_TIME_BEFORE_KILL);
monitoringInterval = csConfig.getLong(
- CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
- CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_MONITORING_INTERVAL);
+ YarnConfiguration.PREEMPTION_MONITORING_INTERVAL,
+ YarnConfiguration.DEFAULT_PREEMPTION_MONITORING_INTERVAL);
percentageClusterPreemptionAllowed = csConfig.getFloat(
- CapacitySchedulerConfiguration.TOTAL_PREEMPTION_PER_ROUND,
- CapacitySchedulerConfiguration.DEFAULT_TOTAL_PREEMPTION_PER_ROUND);
+ YarnConfiguration.TOTAL_PREEMPTION_PER_ROUND,
+ YarnConfiguration.DEFAULT_TOTAL_PREEMPTION_PER_ROUND);
observeOnly = csConfig.getBoolean(
- CapacitySchedulerConfiguration.PREEMPTION_OBSERVE_ONLY,
- CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_OBSERVE_ONLY);
+ YarnConfiguration.PREEMPTION_OBSERVE_ONLY,
+ YarnConfiguration.DEFAULT_PREEMPTION_OBSERVE_ONLY);
lazyPreempionEnabled = csConfig.getBoolean(
CapacitySchedulerConfiguration.LAZY_PREEMPTION_ENALBED,
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 6db5074..b2ca221 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -1045,50 +1045,59 @@ public boolean getLazyPreemptionEnabled() {
private static final String PREEMPTION_CONFIG_PREFIX =
"yarn.resourcemanager.monitor.capacity.preemption.";
- /** If true, run the policy but do not affect the cluster with preemption and
- * kill events. */
+ /**
+ * @deprecated Moved to {@link YarnConfiguration}.
+ */
+ @Deprecated
public static final String PREEMPTION_OBSERVE_ONLY =
PREEMPTION_CONFIG_PREFIX + "observe_only";
- public static final boolean DEFAULT_PREEMPTION_OBSERVE_ONLY = false;
+ public static final boolean DEFAULT_PREEMPTION_OBSERVE_ONLY
+ = YarnConfiguration.DEFAULT_PREEMPTION_OBSERVE_ONLY;
- /** Time in milliseconds between invocations of this policy */
+ /**
+ * @deprecated Moved to {@link YarnConfiguration}.
+ */
+ @Deprecated
public static final String PREEMPTION_MONITORING_INTERVAL =
PREEMPTION_CONFIG_PREFIX + "monitoring_interval";
- public static final long DEFAULT_PREEMPTION_MONITORING_INTERVAL = 3000L;
+ public static final long DEFAULT_PREEMPTION_MONITORING_INTERVAL
+ = YarnConfiguration.DEFAULT_PREEMPTION_MONITORING_INTERVAL;
- /** Time in milliseconds between requesting a preemption from an application
- * and killing the container. */
+ /**
+ * @deprecated Moved to {@link YarnConfiguration}.
+ */
+ @Deprecated
public static final String PREEMPTION_WAIT_TIME_BEFORE_KILL =
PREEMPTION_CONFIG_PREFIX + "max_wait_before_kill";
- public static final long DEFAULT_PREEMPTION_WAIT_TIME_BEFORE_KILL = 15000L;
+ public static final long DEFAULT_PREEMPTION_WAIT_TIME_BEFORE_KILL
+ = YarnConfiguration.DEFAULT_PREEMPTION_WAIT_TIME_BEFORE_KILL;
- /** Maximum percentage of resources preemptionCandidates in a single round. By
- * controlling this value one can throttle the pace at which containers are
- * reclaimed from the cluster. After computing the total desired preemption,
- * the policy scales it back within this limit. */
+ /**
+ * @deprecated Moved to {@link YarnConfiguration}.
+ */
+ @Deprecated
public static final String TOTAL_PREEMPTION_PER_ROUND =
PREEMPTION_CONFIG_PREFIX + "total_preemption_per_round";
- public static final float DEFAULT_TOTAL_PREEMPTION_PER_ROUND = 0.1f;
+ public static final float DEFAULT_TOTAL_PREEMPTION_PER_ROUND
+ = YarnConfiguration.DEFAULT_TOTAL_PREEMPTION_PER_ROUND;
- /** Maximum amount of resources above the target capacity ignored for
- * preemption. This defines a deadzone around the target capacity that helps
- * prevent thrashing and oscillations around the computed target balance.
- * High values would slow the time to capacity and (absent natural
- * completions) it might prevent convergence to guaranteed capacity. */
+ /**
+ * @deprecated Moved to {@link YarnConfiguration}.
+ */
+ @Deprecated
public static final String PREEMPTION_MAX_IGNORED_OVER_CAPACITY =
PREEMPTION_CONFIG_PREFIX + "max_ignored_over_capacity";
- public static final float DEFAULT_PREEMPTION_MAX_IGNORED_OVER_CAPACITY = 0.1f;
+ public static final float DEFAULT_PREEMPTION_MAX_IGNORED_OVER_CAPACITY
+ = YarnConfiguration.DEFAULT_PREEMPTION_MAX_IGNORED_OVER_CAPACITY;
+
/**
- * Given a computed preemption target, account for containers naturally
- * expiring and preempt only this percentage of the delta. This determines
- * the rate of geometric convergence into the deadzone ({@link
- * #PREEMPTION_MAX_IGNORED_OVER_CAPACITY}). For example, a termination factor of 0.5
- * will reclaim almost 95% of resources within 5 * {@link
- * #PREEMPTION_WAIT_TIME_BEFORE_KILL}, even absent natural termination. */
+ * @deprecated Moved to {@link YarnConfiguration}.
+ */
+ @Deprecated
public static final String PREEMPTION_NATURAL_TERMINATION_FACTOR =
PREEMPTION_CONFIG_PREFIX + "natural_termination_factor";
- public static final float DEFAULT_PREEMPTION_NATURAL_TERMINATION_FACTOR =
- 0.2f;
+ public static final float DEFAULT_PREEMPTION_NATURAL_TERMINATION_FACTOR
+ = YarnConfiguration.DEFAULT_PREEMPTION_NATURAL_TERMINATION_FACTOR;
/**
* When calculating which containers to be preempted, we will try to preempt
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
index 0f5d526..8cd2cbb 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
@@ -143,14 +143,14 @@ public int getValue() {
public void setup() {
conf = new CapacitySchedulerConfiguration(new Configuration(false));
conf.setLong(
- CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, 10000);
- conf.setLong(CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
+ YarnConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, 10000);
+ conf.setLong(YarnConfiguration.PREEMPTION_MONITORING_INTERVAL,
3000);
// report "ideal" preempt
- conf.setFloat(CapacitySchedulerConfiguration.TOTAL_PREEMPTION_PER_ROUND,
+ conf.setFloat(YarnConfiguration.TOTAL_PREEMPTION_PER_ROUND,
1.0f);
conf.setFloat(
- CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
+ YarnConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
1.0f);
conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
ProportionalCapacityPreemptionPolicy.class.getCanonicalName());
@@ -275,7 +275,7 @@ public void testExpireKill() {
{ 3, 0, 0, 0 }, // subqueues
};
conf.setLong(
- CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
+ YarnConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
killTime);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
@@ -314,7 +314,7 @@ public void testDeadzone() {
{ 3, 0, 0, 0 }, // subqueues
};
conf.setFloat(
- CapacitySchedulerConfiguration.PREEMPTION_MAX_IGNORED_OVER_CAPACITY,
+ YarnConfiguration.PREEMPTION_MAX_IGNORED_OVER_CAPACITY,
(float) 0.1);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
@@ -604,7 +604,7 @@ public void testNaturalTermination() {
{ 3, 0, 0, 0 }, // subqueues
};
conf.setFloat(
- CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
+ YarnConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
(float) 0.1);
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
@@ -626,7 +626,7 @@ public void testObserveOnly() {
{ -1, 1, 1, 0 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
- conf.setBoolean(CapacitySchedulerConfiguration.PREEMPTION_OBSERVE_ONLY,
+ conf.setBoolean(YarnConfiguration.PREEMPTION_OBSERVE_ONLY,
true);
when(mCS.getConfiguration()).thenReturn(
new CapacitySchedulerConfiguration(conf));
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
index e31a889..c1f57d0 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
@@ -18,21 +18,65 @@
package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
+import org.apache.hadoop.yarn.util.Clock;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
public class TestProportionalCapacityPreemptionPolicyForNodePartitions
extends ProportionalCapacityPreemptionPolicyMockFramework {
+ @SuppressWarnings("unchecked")
@Before
public void setup() {
+ org.apache.log4j.Logger.getRootLogger().setLevel(
+ org.apache.log4j.Level.DEBUG);
+
+ conf = new CapacitySchedulerConfiguration(new Configuration(false));
+ conf.setLong(
+ YarnConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, 10000);
+ conf.setLong(YarnConfiguration.PREEMPTION_MONITORING_INTERVAL,
+ 3000);
+ // report "ideal" preempt
+ conf.setFloat(YarnConfiguration.TOTAL_PREEMPTION_PER_ROUND,
+ (float) 1.0);
+ conf.setFloat(
+ YarnConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
+ (float) 1.0);
+
+ mClock = mock(Clock.class);
+ cs = mock(CapacityScheduler.class);
+ when(cs.getResourceCalculator()).thenReturn(rc);
+ when(cs.getPreemptionManager()).thenReturn(new PreemptionManager());
+ when(cs.getConfiguration()).thenReturn(conf);
+
+ nlm = mock(RMNodeLabelsManager.class);
+ mDisp = mock(EventHandler.class);
+
+ rmContext = mock(RMContext.class);
+ when(rmContext.getNodeLabelManager()).thenReturn(nlm);
+ Dispatcher disp = mock(Dispatcher.class);
+ when(rmContext.getDispatcher()).thenReturn(disp);
+ when(disp.getEventHandler()).thenReturn(mDisp);
+ when(cs.getRMContext()).thenReturn(rmContext);
+
super.setup();
policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java
index e7157b8..ea87d41 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java
@@ -22,18 +22,22 @@
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.Assert;
import org.junit.Before;
@@ -47,15 +51,35 @@
import java.util.Set;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
public class TestCapacitySchedulerLazyPreemption
extends CapacitySchedulerPreemptionTestBase {
@Override
@Before
public void setUp() throws Exception {
- super.setUp();
+ conf = new YarnConfiguration();
+ conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+ ResourceScheduler.class);
+ conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
+ conf.setClass(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
+ ProportionalCapacityPreemptionPolicy.class, SchedulingEditPolicy.class);
+ conf = TestUtils.getConfigurationWithMultipleQueues(this.conf);
+
+ // Set preemption related configurations
+ conf.setInt(YarnConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
+ 0);
conf.setBoolean(CapacitySchedulerConfiguration.LAZY_PREEMPTION_ENALBED,
true);
+ conf.setFloat(YarnConfiguration.TOTAL_PREEMPTION_PER_ROUND,
+ 1.0f);
+ conf.setFloat(
+ YarnConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR,
+ 1.0f);
+ mgr = new NullRMNodeLabelsManager();
+ mgr.init(this.conf);
+ clock = mock(Clock.class);
+ when(clock.getTime()).thenReturn(0L);
}
@Test (timeout = 60000)