diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java
index e9de05227ec..add44a39103 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java
@@ -55,12 +55,16 @@ public static ResourceOption newInstance(Resource resource,
* Get timeout for tolerant of resource over-commitment
* Note: negative value means no timeout so that allocated containers will
* keep running until the end even under resource over-commitment cases.
- * @return overCommitTimeout of the ResourceOption
+ * @return overCommitTimeout of the ResourceOption in milliseconds
*/
@Private
@Evolving
public abstract int getOverCommitTimeout();
-
+
+ /**
+ * Set the over commit timeout.
+ * @param overCommitTimeout Timeout in ms. Negative means no timeout.
+ */
@Private
@Evolving
protected abstract void setOverCommitTimeout(int overCommitTimeout);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index c50950bbd9a..e82cd3cff15 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -677,6 +677,11 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
if (capability != null) {
nodeHeartBeatResponse.setResource(capability);
}
+ // Check if we got an event (AdminService) that updated the resources
+ if (rmNode.isUpdatedCapability()) {
+ nodeHeartBeatResponse.setResource(rmNode.getTotalCapability());
+ rmNode.resetUpdatedCapability();
+ }
// 7. Send Container Queuing Limits back to the Node. This will be used by
// the node to truncate the number of Containers queued for execution.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
index c77d29c89ae..d3b515e8241 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
@@ -104,6 +104,17 @@
*/
public Resource getTotalCapability();
+ /**
+ * If the total available resources has been updated.
+ * @return If the capability has been updated.
+ */
+ boolean isUpdatedCapability();
+
+ /**
+ * Mark that the updated event has been processed.
+ */
+ void resetUpdatedCapability();
+
/**
* the aggregated resource utilization of the containers.
* @return the aggregated resource utilization of the containers.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index d33ee44de4d..3b4ccc60524 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -125,6 +125,7 @@
/* Snapshot of total resources before receiving decommissioning command */
private volatile Resource originalTotalCapability;
private volatile Resource totalCapability;
+ private volatile boolean updatedCapability = false;
private final Node node;
private String healthReport;
@@ -455,6 +456,16 @@ public Resource getTotalCapability() {
return this.totalCapability;
}
+ @Override
+ public boolean isUpdatedCapability() {
+ return this.updatedCapability;
+ }
+
+ @Override
+ public void resetUpdatedCapability() {
+ this.updatedCapability = false;
+ }
+
@Override
public String getRackName() {
return node.getNetworkLocation();
@@ -813,11 +824,12 @@ private static void handleRunningAppOnNode(RMNodeImpl rmNode,
.handle(new RMAppRunningOnNodeEvent(appId, nodeId));
}
- private static void updateNodeResourceFromEvent(RMNodeImpl rmNode,
- RMNodeResourceUpdateEvent event){
- ResourceOption resourceOption = event.getResourceOption();
- // Set resource on RMNode
- rmNode.totalCapability = resourceOption.getResource();
+ private static void updateNodeResourceFromEvent(RMNodeImpl rmNode,
+ RMNodeResourceUpdateEvent event){
+ ResourceOption resourceOption = event.getResourceOption();
+ // Set resource on RMNode
+ rmNode.totalCapability = resourceOption.getResource();
+ rmNode.updatedCapability = true;
}
private static NodeHealthStatus updateRMNodeFromStatusEvents(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index a798b97af5f..16ab52d90ec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -92,10 +92,9 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerRequest;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
-
-
-
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerPreemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ReleaseContainerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerContext;
import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
@@ -118,6 +117,8 @@
private static final Log LOG = LogFactory.getLog(AbstractYarnScheduler.class);
+ private static final Resource ZERO_RESOURCE = Resource.newInstance(0, 0);
+
protected final ClusterNodeTracker nodeTracker =
new ClusterNodeTracker<>();
@@ -830,6 +831,7 @@ public void updateNodeResource(RMNode nm,
writeLock.lock();
SchedulerNode node = getSchedulerNode(nm.getNodeID());
Resource newResource = resourceOption.getResource();
+ final int timeout = resourceOption.getOverCommitTimeout();
Resource oldResource = node.getTotalResource();
if (!oldResource.equals(newResource)) {
// Notify NodeLabelsManager about this change
@@ -838,14 +840,20 @@ public void updateNodeResource(RMNode nm,
// Log resource change
LOG.info("Update resource on node: " + node.getNodeName() + " from: "
- + oldResource + ", to: " + newResource);
+ + oldResource + ", to: " + newResource + " in " + timeout + " ms");
nodeTracker.removeNode(nm.getNodeID());
// update resource to node
node.updateTotalResource(newResource);
+ node.setOvercommitTimeout(timeout);
+ if (timeout >= 0) {
+ markContainersIfOvercommitted(
+ node, SchedulerEventType.MARK_CONTAINER_FOR_PREEMPTION);
+ }
nodeTracker.addNode((N) node);
+
} else{
// Log resource change
LOG.warn("Update resource on node: " + node.getNodeName()
@@ -1188,6 +1196,11 @@ protected void nodeUpdate(RMNode nm) {
updateNodeResourceUtilization(nm, schedulerNode);
}
+ if (schedulerNode != null) {
+ markContainersIfOvercommitted(schedulerNode,
+ SchedulerEventType.MARK_CONTAINER_FOR_KILLABLE);
+ }
+
// Now node data structures are up-to-date and ready for scheduling.
if(LOG.isDebugEnabled()) {
LOG.debug(
@@ -1197,6 +1210,42 @@ protected void nodeUpdate(RMNode nm) {
}
}
+ /**
+ * Check if the node is over committed and needs to remove containers. It
+ * allows marking them for PREEMPT (notify the AM) or KILL.
+ * @param schedulerNode The node to check whether is over committed.
+ * @param eventType Scheduler event: PREEMPT or KILL.
+ */
+ private void markContainersIfOvercommitted(
+ SchedulerNode schedulerNode, SchedulerEventType eventType) {
+ boolean checkTimeOut = false;
+ if (eventType == SchedulerEventType.MARK_CONTAINER_FOR_KILLABLE) {
+ checkTimeOut = true;
+ }
+ if (schedulerNode.isOvercommitted(checkTimeOut)) {
+ LOG.debug(schedulerNode.getNodeID() +
+ " is over committed, free up resources");
+ List containers =
+ schedulerNode.getRunningContainersToKill();
+ Resource unallocated = Resource.newInstance(
+ schedulerNode.getUnallocatedResource());
+ for (RMContainer container : containers) {
+ if (Resources.fitsIn(ZERO_RESOURCE, unallocated)) {
+ LOG.debug("Enough free resources " + unallocated);
+ break;
+ }
+ Resources.addTo(unallocated, container.getAllocatedResource());
+
+ LOG.info("Send " + eventType + " for " + container.getContainerId() +
+ " to free up " + container.getAllocatedResource());
+ ApplicationAttemptId appId = container.getApplicationAttemptId();
+ ContainerPreemptEvent event =
+ new ContainerPreemptEvent(appId, container, eventType);
+ this.rmContext.getDispatcher().getEventHandler().handle(event);
+ }
+ }
+ }
+
@Override
public Resource getNormalizedResource(Resource requestedResource,
Resource maxResourceCapability) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index b35aeba83b3..78cb29b6ff6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.LinkedList;
import java.util.HashMap;
import java.util.List;
@@ -60,6 +61,8 @@
private static final Log LOG = LogFactory.getLog(SchedulerNode.class);
+ private static final Resource ZERO_RESOURCE = Resource.newInstance(0, 0);
+
private Resource unallocatedResource = Resource.newInstance(0, 0);
private Resource allocatedResource = Resource.newInstance(0, 0);
private Resource totalResource;
@@ -69,6 +72,8 @@
ResourceUtilization.newInstance(0, 0, 0f);
private volatile ResourceUtilization nodeUtilization =
ResourceUtilization.newInstance(0, 0, 0f);
+ /** Time stamp for over committed resources to time out. */
+ private long overcommitTimeout = -1;
/* set of containers that are allocated containers */
private final Map launchedContainers =
@@ -118,6 +123,40 @@ public synchronized void updateTotalResource(Resource resource){
this.allocatedResource);
}
+ /**
+ * Set the timeout for the node to stop over committing the resources. After
+ * this time the scheduler will start killing containers until the resources
+ * are not over committed anymore. This may reset a previous timeout.
+ *
+ * @param timeout Timeout in milliseconds.
+ */
+ public synchronized void setOvercommitTimeout(long timeout) {
+ if (timeout > 0) {
+ if (this.overcommitTimeout != -1) {
+ LOG.debug("The over commit timeout for " + getNodeID() +
+ " was already set to " + this.overcommitTimeout);
+ }
+ this.overcommitTimeout = Time.now() + timeout;
+ }
+ }
+
+ /**
+ * Check if the node is over committed. It check the time out to consider the
+ * node over committed and the amount of unallocated resources.
+ *
+ * @param checkTimeOut If we should check the timeout or just the resources.
+ * @return If the node is over committed.
+ */
+ public synchronized boolean isOvercommitted(boolean checkTimeOut) {
+ if (checkTimeOut) {
+ if (this.overcommitTimeout == -1 ||
+ Time.now() < this.overcommitTimeout) {
+ return false;
+ }
+ }
+ return !Resources.fitsIn(ZERO_RESOURCE, this.unallocatedResource);
+ }
+
/**
* Get the ID of the node which contains both its hostname and port.
* @return The ID of the node.
@@ -371,6 +410,29 @@ public int getNumContainers() {
return result;
}
+ /**
+ * Get the containers running on the node ordered by which to kill first.
+ * @return A copy of the running containers ordered by which to kill first.
+ */
+ public synchronized List getRunningContainersToKill() {
+ List result = new ArrayList<>();
+ for (ContainerInfo info : launchedContainers.values()) {
+ result.add(info.container);
+ }
+ Collections.sort(result, (c1, c2) -> {
+ int cmp = Boolean.compare(c1.isAMContainer(), c2.isAMContainer());
+ if (cmp != 0) {
+ return cmp;
+ }
+ cmp = c1.getExecutionType().compareTo(c2.getExecutionType());
+ if (cmp != 0) {
+ return cmp;
+ }
+ return Long.compare(c1.getCreationTime(), c1.getCreationTime());
+ });
+ return result;
+ }
+
/**
* Get the container for the specified container ID.
* @param containerId The container ID
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
index c0af0413a0f..3b72ca1c0e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java
@@ -190,6 +190,15 @@ public Resource getTotalCapability() {
return this.perNode;
}
+ @Override
+ public boolean isUpdatedCapability() {
+ return false;
+ }
+
+ @Override
+ public void resetUpdatedCapability() {
+ }
+
@Override
public String getRackName() {
return this.rackName;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
index ba409b1386b..e41b8b66ab6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
@@ -18,12 +18,14 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -1018,4 +1020,22 @@ public void testContainerRecoveredByNode() throws Exception {
System.out.println("Stopping testContainerRecoveredByNode");
}
}
+
+ @Test
+ public void testGetRunningContainersToKill() {
+ SchedulerNode node = mock(SchedulerNode.class);
+ assertEquals(Collections.emptyList(), node.getRunningContainersToKill());
+
+ RMContainer amContainer0 = mock(RMContainer.class);
+ when(amContainer0.isAMContainer()).thenReturn(true);
+ when(amContainer0.getExecutionType()).thenReturn(ExecutionType.GUARANTEED);
+ node.allocateContainer(amContainer0);
+
+ RMContainer oppContainer0 = mock(RMContainer.class);
+ when(oppContainer0.isAMContainer()).thenReturn(false);
+ when(oppContainer0.getExecutionType()).thenReturn(ExecutionType.OPPORTUNISTIC);
+ node.allocateContainer(amContainer0);
+
+ assertEquals(Collections.emptyList(), node.getRunningContainersToKill());
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java
index 60e25ed83ac..92d866ed722 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerTestBase.java
@@ -27,7 +27,7 @@
import java.util.Set;
public class CapacitySchedulerTestBase {
- protected final int GB = 1024;
+ protected final static int GB = 1024;
protected static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
protected static final String B = CapacitySchedulerConfiguration.ROOT + ".b";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index aac7f15a5a5..0708aee187b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -41,12 +41,11 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import com.google.common.collect.Sets;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.net.NetworkTopology;
@@ -57,6 +56,7 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.LocalConfigurationProvider;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -68,6 +68,7 @@
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -76,6 +77,10 @@
import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.PreemptionContainer;
+import org.apache.hadoop.yarn.api.records.PreemptionContract;
+import org.apache.hadoop.yarn.api.records.PreemptionMessage;
+import org.apache.hadoop.yarn.api.records.PreemptionResourceRequest;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueState;
@@ -178,9 +183,12 @@
import com.google.common.collect.ImmutableSet;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class TestCapacityScheduler extends CapacitySchedulerTestBase {
- private static final Log LOG = LogFactory.getLog(TestCapacityScheduler.class);
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestCapacityScheduler.class);
private final static ContainerUpdates NULL_UPDATE_REQUESTS =
new ContainerUpdates();
private ResourceManager resourceManager = null;
@@ -1309,113 +1317,190 @@ public void testAllocateReorder() throws Exception {
@Test
public void testResourceOverCommit() throws Exception {
- int waitCount;
Configuration conf = new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
MockRM rm = new MockRM(conf);
rm.start();
+ ResourceScheduler scheduler = rm.getResourceScheduler();
- MockNM nm1 = rm.registerNode("127.0.0.1:1234", 4 * GB);
- RMApp app1 = rm.submitApp(2048);
- // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
- nm1.nodeHeartbeat(true);
- RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
- MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
- am1.registerAppAttempt();
- SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
- nm1.getNodeId());
- // check node report, 2 GB used and 2 GB available
- Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize());
- Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemorySize());
+ MockNM nm = rm.registerNode("127.0.0.1:1234", 4 * GB);
+ NodeId nmId = nm.getNodeId();
+ RMApp app = rm.submitApp(2048);
+ // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm
+ nm.nodeHeartbeat(true);
+ RMAppAttempt attempt1 = app.getCurrentAppAttempt();
+ MockAM am = rm.sendAMLaunched(attempt1.getAppAttemptId());
+ am.registerAppAttempt();
+ assertMemory(scheduler, nmId, 2 * GB, 2 * GB);
- // add request for containers
- am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
- AllocateResponse alloc1Response = am1.schedule(); // send the request
+ // add request for 1 container of 2 GB
+ am.addRequests(new String[] {"127.0.0.1", "127.0.0.2"}, 2 * GB, 1, 1);
+ AllocateResponse alloc1Response = am.schedule(); // send the request
// kick the scheduler, 2 GB given to AM1, resource remaining 0
- nm1.nodeHeartbeat(true);
- while (alloc1Response.getAllocatedContainers().size() < 1) {
+ nm.nodeHeartbeat(true);
+ while (alloc1Response.getAllocatedContainers().isEmpty()) {
LOG.info("Waiting for containers to be created for app 1...");
Thread.sleep(100);
- alloc1Response = am1.schedule();
+ alloc1Response = am.schedule();
}
List allocated1 = alloc1Response.getAllocatedContainers();
- Assert.assertEquals(1, allocated1.size());
- Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemorySize());
- Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
+ assertEquals(1, allocated1.size());
+ Container c1 = allocated1.get(0);
+ assertEquals(2 * GB, c1.getResource().getMemorySize());
+ assertEquals(nmId, c1.getNodeId());
- report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
// check node report, 4 GB used and 0 GB available
- Assert.assertEquals(0, report_nm1.getAvailableResource().getMemorySize());
- Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemorySize());
+ assertMemory(scheduler, nmId, 4 * GB, 0);
+ nm.nodeHeartbeat(true);
+ assertEquals(4 * GB, nm.getCapability().getMemorySize());
- // check container is assigned with 2 GB.
- Container c1 = allocated1.get(0);
- Assert.assertEquals(2 * GB, c1.getResource().getMemorySize());
-
- // update node resource to 2 GB, so resource is over-consumed.
- Map nodeResourceMap =
- new HashMap();
- nodeResourceMap.put(nm1.getNodeId(),
- ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1));
- UpdateNodeResourceRequest request =
- UpdateNodeResourceRequest.newInstance(nodeResourceMap);
- AdminService as = ((MockRM)rm).getAdminService();
- as.updateNodeResource(request);
-
- waitCount = 0;
- while (waitCount++ != 20) {
- report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- if (report_nm1.getAvailableResource().getMemorySize() != 0) {
- break;
- }
- LOG.info("Waiting for RMNodeResourceUpdateEvent to be handled... Tried "
- + waitCount + " times already..");
- Thread.sleep(1000);
- }
- // Now, the used resource is still 4 GB, and available resource is minus value.
- report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemorySize());
- Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemorySize());
+ // update node resource to 2 GB, so resource is over-consumed
+ updateNodeResource(rm, nmId, 2 * GB, 1, -1);
- // Check container can complete successfully in case of resource over-commitment.
+ LOG.info("Waiting for RMNodeResourceUpdateEvent to be handled...");
+ GenericTestUtils.waitFor(() -> {
+ SchedulerNodeReport report = scheduler.getNodeReport(nmId);
+ return report.getAvailableResource().getMemorySize() != 0;
+ }, 100, 10 * 1000);
+
+ // the used resource should still 4 GB and negative available resource
+ assertMemory(scheduler, nmId, 4 * GB, -2 * GB);
+ nm.nodeHeartbeat(true);
+ assertEquals(2 * GB, nm.getCapability().getMemorySize());
+
+ // check container can complete successfully with resource over-commitment
ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
c1.getId(), ContainerState.COMPLETE, "", 0, c1.getResource());
- nm1.containerStatus(containerStatus);
- waitCount = 0;
- while (attempt1.getJustFinishedContainers().size() < 1
- && waitCount++ != 20) {
- LOG.info("Waiting for containers to be finished for app 1... Tried "
- + waitCount + " times already..");
+ nm.containerStatus(containerStatus);
+
+ LOG.info("Waiting for containers to be finished for app 1...");
+ GenericTestUtils.waitFor(
+ () -> attempt1.getJustFinishedContainers().size() == 1, 100, 2000);
+ assertEquals(1, am.schedule().getCompletedContainersStatuses().size());
+ assertMemory(scheduler, nmId, 2 * GB, 0);
+
+ // verify no NPE is trigger in schedule after resource is updated
+ am.addRequests(new String[] {"127.0.0.1", "127.0.0.2"}, 3 * GB, 1, 1);
+ AllocateResponse allocResponse2 = am.schedule();
+ assertTrue("Shouldn't have enough resource to allocate containers",
+ allocResponse2.getAllocatedContainers().isEmpty());
+ // try 10 times as scheduling is an async process
+ for (int i = 0; i < 10; i++) {
Thread.sleep(100);
+ allocResponse2 = am.schedule();
+ assertTrue("Shouldn't have enough resource to allocate containers",
+ allocResponse2.getAllocatedContainers().isEmpty());
}
- Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
- Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
- report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize());
- // As container return 2 GB back, the available resource becomes 0 again.
- Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize());
-
- // Verify no NPE is trigger in schedule after resource is updated.
- am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 1, 1);
- alloc1Response = am1.schedule();
- Assert.assertEquals("Shouldn't have enough resource to allocate containers",
- 0, alloc1Response.getAllocatedContainers().size());
- int times = 0;
- // try 10 times as scheduling is async process.
- while (alloc1Response.getAllocatedContainers().size() < 1
- && times++ < 10) {
- LOG.info("Waiting for containers to be allocated for app 1... Tried "
- + times + " times already..");
+
+ // increase the resources again to 5 GB to schedule the 3GB container
+ updateNodeResource(rm, nmId, 5 * GB, 1, -1);
+ GenericTestUtils.waitFor(() -> {
+ SchedulerNodeReport report = scheduler.getNodeReport(nmId);
+ return report.getAvailableResource().getMemorySize() > 0;
+ }, 100, 5 * 1000);
+ assertMemory(scheduler, nmId, 2 * GB, 3 * GB);
+
+ // kick the scheduling and check it took effect
+ nm.nodeHeartbeat(true);
+ while (allocResponse2.getAllocatedContainers().isEmpty()) {
+ LOG.info("Waiting for containers to be created for app 1...");
Thread.sleep(100);
+ allocResponse2 = am.schedule();
}
- Assert.assertEquals("Shouldn't have enough resource to allocate containers",
- 0, alloc1Response.getAllocatedContainers().size());
+ assertEquals(1, allocResponse2.getAllocatedContainers().size());
+ Container c2 = allocResponse2.getAllocatedContainers().get(0);
+ assertEquals(3 * GB, c2.getResource().getMemorySize());
+ assertEquals(nmId, c2.getNodeId());
+ assertMemory(scheduler, nmId, 5 * GB, 0);
+
+ // reduce the resources and trigger a preempt request to the AM for c2
+ long t0 = Time.now();
+ updateNodeResource(rm, nmId, 3 * GB, 1, 2000);
+ GenericTestUtils.waitFor(() -> {
+ SchedulerNodeReport report = scheduler.getNodeReport(nmId);
+ if (report == null) {
+ return false;
+ }
+ Resource avail = report.getAvailableResource();
+ return avail.getMemorySize() < 0;
+ }, 200, 5 * 1000);
+ assertMemory(scheduler, nmId, 5 * GB, -2 * GB);
+
+ AllocateResponse allocResponse4 = am.schedule();
+ PreemptionMessage preemptMsg = allocResponse4.getPreemptionMessage();
+ assertNotNull(preemptMsg);
+ Set preemptContainers =
+ preemptMsg.getContract().getContainers();
+ assertEquals(1, preemptContainers.size());
+ PreemptionContainer preemptContainer = preemptContainers.iterator().next();
+ assertEquals(c2.getId(), preemptContainer.getId());
+
+ // wait until the scheduler kills the container
+ GenericTestUtils.waitFor(() -> {
+ try {
+ nm.nodeHeartbeat(true); // trigger preemption in the NM
+ } catch (Exception e) {
+ LOG.error("Cannot heartbeat", e);
+ }
+ SchedulerNodeReport report = scheduler.getNodeReport(nmId);
+ return report.getAvailableResource().getMemorySize() > 0;
+ }, 200, 5 * 1000);
+ assertMemory(scheduler, nmId, 2 * GB, 1 * GB);
+
+ AllocateResponse allocResponse3 = am.schedule();
+ List completedContainers =
+ allocResponse3.getCompletedContainersStatuses();
+ assertEquals(1, completedContainers.size());
+ ContainerStatus c2status = completedContainers.get(0);
+ assertEquals(c2.getId(), c2status.getContainerId());
+ assertEquals(ContainerState.COMPLETE, c2status.getState());
+ assertEquals(ContainerExitStatus.PREEMPTED, c2status.getExitStatus());
+ assertEquals("Container preempted by scheduler", c2status.getDiagnostics());
+
+ long timeToKill = Time.now() - t0;
+ assertTrue("Took too short to kill: " + timeToKill, timeToKill > 2000);
+ assertTrue("Took too long to kill: " + timeToKill, timeToKill < 2500);
+
rm.stop();
}
+ /**
+ * Update the resources on a Node Manager.
+ * @param rm Resource Manager to contact.
+ * @param nmId Identifier of the Node Manager.
+ * @param memory Memory in MB.
+ * @param vCores Number of virtual cores.
+ * @param overcommitTimeout Timeout for over commit.
+ * @throws Exception If the udpate cannot be completed.
+ */
+ private static void updateNodeResource(MockRM rm, NodeId nmId,
+ int memory, int vCores, int overcommitTimeout) throws Exception {
+ AdminService admin = rm.getAdminService();
+ ResourceOption resourceOption = ResourceOption.newInstance(
+ Resource.newInstance(memory, vCores), overcommitTimeout);
+ UpdateNodeResourceRequest req = UpdateNodeResourceRequest.newInstance(
+ Collections.singletonMap(nmId, resourceOption));
+ admin.updateNodeResource(req );
+ }
+
+ /**
+ * Check if a node report has the expected memory values.
+ * @param scheduler Scheduler with the data.
+ * @param nmId Identifier of the node to check.
+ * @param expectedUsed The expected used memory in MB.
+ * @param expectedAvailable The expected available memory in MB.
+ */
+ private void assertMemory(ResourceScheduler scheduler, NodeId nmId,
+ long expectedUsed, long expectedAvailable) {
+ SchedulerNodeReport nmReport = scheduler.getNodeReport(nmId);
+ assertEquals(expectedUsed, nmReport.getUsedResource().getMemorySize());
+ assertEquals(expectedAvailable,
+ nmReport.getAvailableResource().getMemorySize());
+ }
+
@Test
public void testGetAppsInQueue() throws Exception {
Application application_0 = new Application("user_0", "a1", resourceManager);