diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerRequest.java index e4f7a82..925a7979 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerRequest.java @@ -150,11 +150,13 @@ public int hashCode() { ContainerId cId = getContainerId(); ExecutionType execType = getExecutionType(); Resource capability = getCapability(); + ContainerUpdateType updateType = getContainerUpdateType(); result = prime * result + ((capability == null) ? 0 : capability.hashCode()); result = prime * result + ((cId == null) ? 0 : cId.hashCode()); result = prime * result + getContainerVersion(); result = prime * result + ((execType == null) ? 0 : execType.hashCode()); + result = prime * result + ((updateType== null) ? 0 : updateType.hashCode()); return result; } @@ -208,6 +210,14 @@ public boolean equals(Object obj) { } else if (!execType.equals(other.getExecutionType())) { return false; } + ContainerUpdateType updateType = getContainerUpdateType(); + if (updateType == null) { + if (other.getContainerUpdateType() != null) { + return false; + } + } else if (!updateType.equals(other.getContainerUpdateType())) { + return false; + } return true; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java index 52155f5..37ae746 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java @@ -525,6 +525,9 @@ public abstract void unregisterApplicationMaster(FinalApplicationStatus appStatu public abstract void requestContainerResourceChange( Container container, Resource capability); + public abstract void requestContainerUpdate( + Container container, Resource capability, ExecutionType targetExectype); + /** * Release containers assigned by the Resource Manager. If the app cannot use * the container or wants to give up the container then it can release them. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index 44fc1e0..adaf726 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -169,15 +169,16 @@ static boolean canFit(Resource arg0, Resource arg1) { protected Set pendingRelease = new TreeSet(); // change map holds container resource change requests between two allocate() // calls, and are cleared after each successful allocate() call. - protected final Map> change = - new HashMap<>(); + protected final Map> change = new HashMap<>(); // pendingChange map holds history of container resource change requests in // case AM needs to reregister with the ResourceManager. // Change requests are removed from this map if RM confirms the change // through allocate response, or if RM confirms that the container has been // completed. - protected final Map> - pendingChange = new HashMap<>(); + protected final Map> pendingChange = + new HashMap<>(); public AMRMClientImpl() { super(AMRMClientImpl.class.getName()); @@ -259,7 +260,7 @@ public AllocateResponse allocate(float progressIndicator) AllocateRequest allocateRequest = null; List blacklistToAdd = new ArrayList(); List blacklistToRemove = new ArrayList(); - Map> oldChange = + Map> oldChange = new HashMap<>(); try { synchronized (this) { @@ -374,14 +375,15 @@ public AllocateResponse allocate(float progressIndicator) // // Only insert entries from the cached oldChange map // that do not exist in the current change map: - for (Map.Entry> entry : + for (Map.Entry> entry : oldChange.entrySet()) { ContainerId oldContainerId = entry.getKey(); Container oldContainer = entry.getValue().getKey(); - Resource oldResource = entry.getValue().getValue(); + UpdateContainerRequest oldupdate = entry.getValue().getValue(); if (change.get(oldContainerId) == null) { change.put( - oldContainerId, new SimpleEntry<>(oldContainer, oldResource)); + oldContainerId, new SimpleEntry<>(oldContainer, oldupdate)); } } blacklistAdditions.addAll(blacklistToAdd); @@ -394,19 +396,30 @@ public AllocateResponse allocate(float progressIndicator) private List createUpdateList() { List updateList = new ArrayList<>(); - for (Map.Entry> entry : - change.entrySet()) { - Resource targetCapability = entry.getValue().getValue(); - Resource currCapability = entry.getValue().getKey().getResource(); - int version = entry.getValue().getKey().getVersion(); - ContainerUpdateType updateType = - ContainerUpdateType.INCREASE_RESOURCE; - if (Resources.fitsIn(targetCapability, currCapability)) { - updateType = ContainerUpdateType.DECREASE_RESOURCE; + for (Map.Entry> entry : change.entrySet()) { + Resource targetCapability = entry.getValue().getValue().getCapability(); + if (targetCapability != null) { + Resource currCapability = entry.getValue().getKey().getResource(); + int version = entry.getValue().getKey().getVersion(); + ContainerUpdateType updateType = + ContainerUpdateType.INCREASE_RESOURCE; + if (Resources.fitsIn(targetCapability, currCapability)) { + updateType = ContainerUpdateType.DECREASE_RESOURCE; + } + updateList.add( + UpdateContainerRequest.newInstance(version, entry.getKey(), + updateType, targetCapability, null)); + } else { + // TODO: Validate + ExecutionType targetExecType = + entry.getValue().getValue().getExecutionType(); + updateList.add( + UpdateContainerRequest.newInstance( + entry.getValue().getKey().getVersion(), entry.getKey(), + ContainerUpdateType.UPDATE_EXECUTION_TYPE, null, + targetExecType)); } - updateList.add( - UpdateContainerRequest.newInstance(version, entry.getKey(), - updateType, targetCapability, null)); } return updateList; } @@ -591,21 +604,42 @@ public synchronized void removeContainerRequest(T req) { } @Override - public synchronized void requestContainerResourceChange( - Container container, Resource capability) { - validateContainerResourceChangeRequest( - container.getId(), container.getResource(), capability); + public void requestContainerResourceChange(Container container, + Resource capability) { + requestContainerUpdate(container, UpdateContainerRequest.newInstance( + container.getVersion(), container.getId(), null, capability, null)); + } + + @Override + public void requestContainerUpdate(Container container, Resource capability, + ExecutionType targetExectype) { + LOG.info("Requesting Container update : " + + "container=" + container + ", " + + "targetCapability=" + capability + ", " + + "targetExecType=" + targetExectype); + requestContainerUpdate(container, UpdateContainerRequest.newInstance( + container.getVersion(), container.getId(), null, + capability, targetExectype)); + } + + private synchronized void requestContainerUpdate( + Container container, UpdateContainerRequest updateContainerRequest) { + if (updateContainerRequest.getCapability() != null) { + validateContainerResourceChangeRequest( + container.getId(), container.getResource(), + updateContainerRequest.getCapability()); + } if (change.get(container.getId()) == null) { change.put(container.getId(), - new SimpleEntry<>(container, capability)); + new SimpleEntry<>(container, updateContainerRequest)); } else { - change.get(container.getId()).setValue(capability); + change.get(container.getId()).setValue(updateContainerRequest); } if (pendingChange.get(container.getId()) == null) { pendingChange.put(container.getId(), - new SimpleEntry<>(container, capability)); + new SimpleEntry<>(container, updateContainerRequest)); } else { - pendingChange.get(container.getId()).setValue(capability); + pendingChange.get(container.getId()).setValue(updateContainerRequest); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java index 802c207..118cf36 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java @@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.Token; +import org.apache.hadoop.yarn.api.records.UpdatedContainer; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.client.api.AMRMClient; @@ -54,6 +55,9 @@ import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; + +import org.apache.hadoop.yarn.server.resourcemanager.scheduler + .AbstractYarnScheduler; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Records; import org.junit.After; @@ -66,13 +70,17 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.TreeSet; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; /** * Class that tests the allocation of OPPORTUNISTIC containers through the @@ -83,7 +91,6 @@ private static MiniYARNCluster yarnCluster = null; private static YarnClient yarnClient = null; private static List nodeReports = null; - private static ApplicationAttemptId attemptId = null; private static int nodeCount = 3; private static final int ROLLING_INTERVAL_SEC = 13; @@ -92,12 +99,22 @@ private static Resource capability; private static Priority priority; private static Priority priority2; + private static Priority priority3; + private static Priority priority4; private static String node; private static String rack; private static String[] nodes; private static String[] racks; private final static int DEFAULT_ITERATION = 3; + // Per test.. + private ApplicationAttemptId attemptId = null; + private AMRMClientImpl amClient = null; + private long availMB; + private int availVCores; + private long allocMB; + private int allocVCores; + @BeforeClass public static void setup() throws Exception { // start minicluster @@ -106,7 +123,7 @@ public static void setup() throws Exception { YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, ROLLING_INTERVAL_SEC); conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, AM_EXPIRE_MS); - conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100); + conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1000); // set the minimum allocation so that resource decrease can go under 1024 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512); conf.setBoolean( @@ -129,7 +146,9 @@ public static void setup() throws Exception { priority = Priority.newInstance(1); priority2 = Priority.newInstance(2); - capability = Resource.newInstance(1024, 1); + priority3 = Priority.newInstance(3); + priority4 = Priority.newInstance(4); + capability = Resource.newInstance(512, 1); node = nodeReports.get(0).getNodeId().getHost(); rack = nodeReports.get(0).getRackName(); @@ -193,10 +212,35 @@ public void startApp() throws Exception { UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken()); appAttempt.getAMRMToken() .setService(ClientRMProxy.getAMRMTokenService(conf)); + + // start am rm client + amClient = (AMRMClientImpl)AMRMClient + .createAMRMClient(); + + //setting an instance NMTokenCache + amClient.setNMTokenCache(new NMTokenCache()); + //asserting we are not using the singleton instance cache + Assert.assertNotSame(NMTokenCache.getSingleton(), + amClient.getNMTokenCache()); + + amClient.init(conf); + amClient.start(); + + amClient.registerApplicationMaster("Host", 10000, ""); } @After public void cancelApp() throws YarnException, IOException { + try { + amClient + .unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, + null); + } finally { + if (amClient != null && + amClient.getServiceState() == Service.STATE.STARTED) { + amClient.stop(); + } + } yarnClient.killApplication(attemptId.getApplicationId()); attemptId = null; } @@ -214,43 +258,224 @@ public static void tearDown() { } @Test(timeout = 60000) - public void testAMRMClient() throws YarnException, IOException { - AMRMClient amClient = null; - try { - // start am rm client - amClient = AMRMClient.createAMRMClient(); + public void testPromotionFromAcquired() throws YarnException, IOException { + // setup container request + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); - //setting an instance NMTokenCache - amClient.setNMTokenCache(new NMTokenCache()); - //asserting we are not using the singleton instance cache - Assert.assertNotSame(NMTokenCache.getSingleton(), - amClient.getNMTokenCache()); + amClient.addContainerRequest( + new AMRMClient.ContainerRequest(capability, null, null, priority2, 0, + true, null, + ExecutionTypeRequest.newInstance( + ExecutionType.OPPORTUNISTIC, true))); - amClient.init(conf); - amClient.start(); + int oppContainersRequestedAny = + amClient.getTable(0).get(priority2, ResourceRequest.ANY, + ExecutionType.OPPORTUNISTIC, capability).remoteRequest + .getNumContainers(); - amClient.registerApplicationMaster("Host", 10000, ""); + assertEquals(1, oppContainersRequestedAny); - testOpportunisticAllocation( - (AMRMClientImpl) amClient); + assertEquals(1, amClient.ask.size()); + assertEquals(0, amClient.release.size()); - testAllocation((AMRMClientImpl)amClient); + // RM should allocate container within 2 calls to allocate() + int allocatedContainerCount = 0; + Map allocatedOpportContainers = new HashMap<>(); + int iterationsLeft = 50; - amClient - .unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, - null); + amClient.getNMTokenCache().clearCache(); + Assert.assertEquals(0, + amClient.getNMTokenCache().numberOfTokensInCache()); + HashMap receivedNMTokens = new HashMap<>(); - } finally { - if (amClient != null && - amClient.getServiceState() == Service.STATE.STARTED) { - amClient.stop(); + updateMetrics("Before Opp Allocation"); + + while (allocatedContainerCount < oppContainersRequestedAny + && iterationsLeft-- > 0) { + AllocateResponse allocResponse = amClient.allocate(0.1f); + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + allocatedContainerCount += + allocResponse.getAllocatedContainers().size(); + for (Container container : allocResponse.getAllocatedContainers()) { + if (container.getExecutionType() == ExecutionType.OPPORTUNISTIC) { + allocatedOpportContainers.put(container.getId(), container); + removeCR(amClient, container); + } + } + + for (NMToken token : allocResponse.getNMTokens()) { + String nodeID = token.getNodeId().toString(); + receivedNMTokens.put(nodeID, token.getToken()); + } + + if (allocatedContainerCount < oppContainersRequestedAny) { + // sleep to let NM's heartbeat to RM and trigger allocations + sleep(100); + } + } + + assertEquals(oppContainersRequestedAny, allocatedContainerCount); + assertEquals(oppContainersRequestedAny, allocatedOpportContainers.size()); + + updateMetrics("After Opp Allocation / Before Promotion"); + + amClient.requestContainerUpdate( + allocatedOpportContainers.values().iterator().next(), + null, ExecutionType.GUARANTEED); + iterationsLeft = 120; + Map updatedContainers = new HashMap<>(); + // do a few iterations to ensure RM is not going to send new containers + while (iterationsLeft-- > 0 && updatedContainers.isEmpty()) { + // inform RM of rejection + AllocateResponse allocResponse = amClient.allocate(0.1f); + // RM did not send new containers because AM does not need any + if (allocResponse.getUpdatedContainers() != null) { + for (UpdatedContainer updatedContainer : allocResponse + .getUpdatedContainers()) { + System.out.println("Got update.."); + updatedContainers.put(updatedContainer.getContainer().getId(), + updatedContainer); + } + } + if (iterationsLeft > 0) { + // sleep to make sure NM's heartbeat + sleep(100); } } + + updateMetrics("After Promotion"); + + assertEquals(1, updatedContainers.size()); + for (ContainerId cId : allocatedOpportContainers.keySet()) { + Container orig = allocatedOpportContainers.get(cId); + UpdatedContainer updatedContainer = updatedContainers.get(cId); + assertNotNull(updatedContainer); + assertEquals(ExecutionType.GUARANTEED, + updatedContainer.getContainer().getExecutionType()); + assertEquals(orig.getResource(), + updatedContainer.getContainer().getResource()); + assertEquals(orig.getNodeId(), + updatedContainer.getContainer().getNodeId()); + assertEquals(orig.getVersion() + 1, + updatedContainer.getContainer().getVersion()); + } + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + amClient.ask.clear(); } - private void testAllocation( - final AMRMClientImpl amClient) - throws YarnException, IOException { + @Test(timeout = 60000) + public void testDemotionFromAcquired() throws YarnException, IOException { + // setup container request + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + amClient.addContainerRequest( + new AMRMClient.ContainerRequest(capability, null, null, priority3)); + + int guarContainersRequestedAny = amClient.getTable(0).get(priority3, + ResourceRequest.ANY, ExecutionType.GUARANTEED, capability) + .remoteRequest.getNumContainers(); + + assertEquals(1, guarContainersRequestedAny); + + assertEquals(1, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + // RM should allocate container within 2 calls to allocate() + int allocatedContainerCount = 0; + Map allocatedGuarContainers = new HashMap<>(); + int iterationsLeft = 50; + + amClient.getNMTokenCache().clearCache(); + Assert.assertEquals(0, + amClient.getNMTokenCache().numberOfTokensInCache()); + HashMap receivedNMTokens = new HashMap<>(); + + updateMetrics("Before Guar Allocation"); + + while (allocatedContainerCount < guarContainersRequestedAny + && iterationsLeft-- > 0) { + AllocateResponse allocResponse = amClient.allocate(0.1f); + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + allocatedContainerCount += + allocResponse.getAllocatedContainers().size(); + for (Container container : allocResponse.getAllocatedContainers()) { + if (container.getExecutionType() == ExecutionType.GUARANTEED) { + allocatedGuarContainers.put(container.getId(), container); + removeCR(amClient, container); + } + } + + for (NMToken token : allocResponse.getNMTokens()) { + String nodeID = token.getNodeId().toString(); + receivedNMTokens.put(nodeID, token.getToken()); + } + + if (allocatedContainerCount < guarContainersRequestedAny) { + // sleep to let NM's heartbeat to RM and trigger allocations + sleep(100); + } + } + + assertEquals(guarContainersRequestedAny, allocatedContainerCount); + assertEquals(guarContainersRequestedAny, allocatedGuarContainers.size()); + + updateMetrics("After Guar Allocation / Before Demotion"); + + amClient.requestContainerUpdate( + allocatedGuarContainers.values().iterator().next(), + null, ExecutionType.OPPORTUNISTIC); + iterationsLeft = 120; + Map updatedContainers = new HashMap<>(); + // do a few iterations to ensure RM is not going to send new containers + while (iterationsLeft-- > 0 && updatedContainers.isEmpty()) { + // inform RM of rejection + AllocateResponse allocResponse = amClient.allocate(0.1f); + // RM did not send new containers because AM does not need any + if (allocResponse.getUpdatedContainers() != null) { + for (UpdatedContainer updatedContainer : allocResponse + .getUpdatedContainers()) { + System.out.println("Got update.."); + updatedContainers.put(updatedContainer.getContainer().getId(), + updatedContainer); + } + } + if (iterationsLeft > 0) { + // sleep to make sure NM's heartbeat + sleep(100); + } + } + + updateMetrics("After Demotion"); + + assertEquals(1, updatedContainers.size()); + for (ContainerId cId : allocatedGuarContainers.keySet()) { + Container orig = allocatedGuarContainers.get(cId); + UpdatedContainer updatedContainer = updatedContainers.get(cId); + assertNotNull(updatedContainer); + assertEquals(ExecutionType.OPPORTUNISTIC, + updatedContainer.getContainer().getExecutionType()); + assertEquals(orig.getResource(), + updatedContainer.getContainer().getResource()); + assertEquals(orig.getNodeId(), + updatedContainer.getContainer().getNodeId()); + assertEquals(orig.getVersion() + 1, + updatedContainer.getContainer().getVersion()); + } + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + amClient.ask.clear(); + } + + @Test(timeout = 60000) + public void testMixedAllocationAndRelease() throws YarnException, + IOException { // setup container request assertEquals(0, amClient.ask.size()); assertEquals(0, amClient.release.size()); @@ -274,6 +499,28 @@ private void testAllocation( ExecutionTypeRequest.newInstance( ExecutionType.OPPORTUNISTIC, true))); + int containersRequestedNode = amClient.getTable(0).get(priority, + node, ExecutionType.GUARANTEED, capability).remoteRequest + .getNumContainers(); + int containersRequestedRack = amClient.getTable(0).get(priority, + rack, ExecutionType.GUARANTEED, capability).remoteRequest + .getNumContainers(); + int containersRequestedAny = amClient.getTable(0).get(priority, + ResourceRequest.ANY, ExecutionType.GUARANTEED, capability) + .remoteRequest.getNumContainers(); + int oppContainersRequestedAny = + amClient.getTable(0).get(priority2, ResourceRequest.ANY, + ExecutionType.OPPORTUNISTIC, capability).remoteRequest + .getNumContainers(); + + assertEquals(4, containersRequestedNode); + assertEquals(4, containersRequestedRack); + assertEquals(4, containersRequestedAny); + assertEquals(2, oppContainersRequestedAny); + + assertEquals(4, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + amClient.removeContainerRequest( new AMRMClient.ContainerRequest(capability, nodes, racks, priority)); amClient.removeContainerRequest( @@ -284,16 +531,16 @@ private void testAllocation( ExecutionTypeRequest.newInstance( ExecutionType.OPPORTUNISTIC, true))); - int containersRequestedNode = amClient.getTable(0).get(priority, + containersRequestedNode = amClient.getTable(0).get(priority, node, ExecutionType.GUARANTEED, capability).remoteRequest .getNumContainers(); - int containersRequestedRack = amClient.getTable(0).get(priority, + containersRequestedRack = amClient.getTable(0).get(priority, rack, ExecutionType.GUARANTEED, capability).remoteRequest .getNumContainers(); - int containersRequestedAny = amClient.getTable(0).get(priority, + containersRequestedAny = amClient.getTable(0).get(priority, ResourceRequest.ANY, ExecutionType.GUARANTEED, capability) .remoteRequest.getNumContainers(); - int oppContainersRequestedAny = + oppContainersRequestedAny = amClient.getTable(0).get(priority2, ResourceRequest.ANY, ExecutionType.OPPORTUNISTIC, capability).remoteRequest .getNumContainers(); @@ -309,7 +556,7 @@ private void testAllocation( // RM should allocate container within 2 calls to allocate() int allocatedContainerCount = 0; int allocatedOpportContainerCount = 0; - int iterationsLeft = 10; + int iterationsLeft = 50; Set releases = new TreeSet<>(); amClient.getNMTokenCache().clearCache(); @@ -324,8 +571,8 @@ private void testAllocation( assertEquals(0, amClient.ask.size()); assertEquals(0, amClient.release.size()); - allocatedContainerCount += allocResponse.getAllocatedContainers() - .size(); + allocatedContainerCount += + allocResponse.getAllocatedContainers().size(); for (Container container : allocResponse.getAllocatedContainers()) { if (container.getExecutionType() == ExecutionType.OPPORTUNISTIC) { allocatedOpportContainerCount++; @@ -345,9 +592,9 @@ private void testAllocation( } } - assertEquals(allocatedContainerCount, - containersRequestedAny + oppContainersRequestedAny); - assertEquals(allocatedOpportContainerCount, oppContainersRequestedAny); + assertEquals(containersRequestedAny + oppContainersRequestedAny, + allocatedContainerCount); + assertEquals(oppContainersRequestedAny, allocatedOpportContainerCount); for (ContainerId rejectContainerId : releases) { amClient.releaseAssignedContainer(rejectContainerId); } @@ -395,26 +642,25 @@ private void testAllocation( /** * Tests allocation with requests comprising only opportunistic containers. */ - private void testOpportunisticAllocation( - final AMRMClientImpl amClient) - throws YarnException, IOException { + @Test(timeout = 60000) + public void testOpportunisticAllocation() throws YarnException, IOException { // setup container request assertEquals(0, amClient.ask.size()); assertEquals(0, amClient.release.size()); amClient.addContainerRequest( - new AMRMClient.ContainerRequest(capability, null, null, priority, 0, + new AMRMClient.ContainerRequest(capability, null, null, priority3, 0, true, null, ExecutionTypeRequest.newInstance( ExecutionType.OPPORTUNISTIC, true))); amClient.addContainerRequest( - new AMRMClient.ContainerRequest(capability, null, null, priority, 0, + new AMRMClient.ContainerRequest(capability, null, null, priority3, 0, true, null, ExecutionTypeRequest.newInstance( ExecutionType.OPPORTUNISTIC, true))); int oppContainersRequestedAny = - amClient.getTable(0).get(priority, ResourceRequest.ANY, + amClient.getTable(0).get(priority3, ResourceRequest.ANY, ExecutionType.OPPORTUNISTIC, capability).remoteRequest .getNumContainers(); @@ -456,9 +702,44 @@ private void testOpportunisticAllocation( } } + assertEquals(oppContainersRequestedAny, allocatedContainerCount); assertEquals(1, receivedNMTokens.values().size()); } + private void removeCR(AMRMClientImpl amClient, + Container container) { + List> + matchingRequests = amClient.getMatchingRequests(container + .getPriority(), + ResourceRequest.ANY, ExecutionType.OPPORTUNISTIC, + container.getResource()); + Set toRemove = new HashSet<>(); + for (Collection rc : matchingRequests) { + for (AMRMClient.ContainerRequest cr : rc) { + toRemove.add(cr); + } + } + for (AMRMClient.ContainerRequest cr : toRemove) { + amClient.removeContainerRequest(cr); + } + } + + private void updateMetrics(String msg) { + AbstractYarnScheduler scheduler = + (AbstractYarnScheduler)yarnCluster.getResourceManager() + .getResourceScheduler(); + availMB = scheduler.getRootQueueMetrics().getAvailableMB(); + availVCores = scheduler.getRootQueueMetrics().getAvailableVirtualCores(); + allocMB = scheduler.getRootQueueMetrics().getAllocatedMB(); + allocVCores = scheduler.getRootQueueMetrics().getAllocatedVirtualCores(); + System.out.println("## METRICS (" + msg + ")==>"); + System.out.println(" : availMB=" + availMB + ", " + + "availVCores=" +availVCores + ", " + + "allocMB=" + allocMB + ", " + + "allocVCores=" + allocVCores + ", "); + System.out.println("<== ##"); + } + private void sleep(int sleepTime) { try { Thread.sleep(sleepTime); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java index 2d77671..3c1fcbd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerAllocator.java @@ -312,13 +312,21 @@ private Container buildContainer(long rmIdentifier, // before accepting an ask) Resource capability = normalizeCapability(appParams, rr); + return createContainer( + rmIdentifier, appParams.getContainerTokenExpiryInterval(), + SchedulerRequestKey.create(rr), userName, node, cId, capability); + } + + private Container createContainer(long rmIdentifier, long tokenExpiry, + SchedulerRequestKey schedulerKey, String userName, RemoteNode node, + ContainerId cId, Resource capability) { long currTime = System.currentTimeMillis(); ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier( cId, 0, node.getNodeId().toString(), userName, - capability, currTime + appParams.containerTokenExpiryInterval, + capability, currTime + tokenExpiry, tokenSecretManager.getCurrentKey().getKeyId(), rmIdentifier, - rr.getPriority(), currTime, + schedulerKey.getPriority(), currTime, null, CommonNodeLabelsManager.NO_LABEL, ContainerType.TASK, ExecutionType.OPPORTUNISTIC); byte[] pwd = @@ -327,9 +335,9 @@ private Container buildContainer(long rmIdentifier, containerTokenIdentifier); Container container = BuilderUtils.newContainer( cId, node.getNodeId(), node.getHttpAddress(), - capability, rr.getPriority(), containerToken, + capability, schedulerKey.getPriority(), containerToken, containerTokenIdentifier.getExecutionType(), - rr.getAllocationRequestId()); + schedulerKey.getAllocationRequestId()); return container; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java index 875e166..d1a7ec8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/OpportunisticContainerContext.java @@ -18,14 +18,15 @@ package org.apache.hadoop.yarn.server.scheduler; -import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.ExecutionType; -import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.UpdateContainerRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RemoteNode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,6 +40,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerAllocator.AllocationParams; import static org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerAllocator.ContainerIdGenerator; @@ -52,6 +54,11 @@ private static final Logger LOG = LoggerFactory .getLogger(OpportunisticContainerContext.class); + public static ContainerId UNDEFINED = + ContainerId.newContainerId( + ApplicationAttemptId.newInstance( + ApplicationId.newInstance(-1, -1), -1), -1); + private AllocationParams appParams = new AllocationParams(); private ContainerIdGenerator containerIdGenerator = @@ -69,6 +76,20 @@ private final TreeMap> outstandingOpReqs = new TreeMap<>(); + // Keep track of containers that are undergoing promotion + private final Map>>> outstandingPromotions = new HashMap<>(); + + // A promotion request is sent as just another request request, but it + // is tracked using an allocationRequestId, which is < 0. This will + // ensure that these requests are processed before normal requests + private final Map + outstandingPromotionKeys = new HashMap<>(); + + private final Set outstandingDemotions = new HashSet<>(); + + private AtomicLong promotionCounter = new AtomicLong(-1); + public AllocationParams getAppParams() { return appParams; } @@ -117,6 +138,101 @@ public void updateAllocationParams(Resource minResource, Resource maxResource, return outstandingOpReqs; } + public synchronized boolean isBeingPromoted(Container container) { + Map>> resourceMap = + outstandingPromotions.get(SchedulerRequestKey.extractFrom(container)); + if (resourceMap != null) { + Map> locationMap = + resourceMap.get(container.getResource()); + if (locationMap != null) { + Set containerIds = locationMap.get(container.getNodeId()); + if (containerIds != null && !containerIds.isEmpty()) { + return containerIds.contains(container.getId()); + } + } + } + return false; + } + + public synchronized boolean checkAndAddToOutstandingDemotions( + Container container) { + if (isBeingPromoted(container) + || outstandingDemotions.contains(container.getId())) { + return false; + } + outstandingDemotions.add(container.getId()); + return true; + } + + public SchedulerRequestKey checkAndAddToOutstandingPromotions( + Container container) { + SchedulerRequestKey schedulerKey = + SchedulerRequestKey.extractFrom(container); + Map>> resourceMap = + outstandingPromotions.get(schedulerKey); + if (resourceMap == null) { + resourceMap = new HashMap<>(); + outstandingPromotions.put(schedulerKey, resourceMap); + } + Map> locationMap = + resourceMap.get(container.getResource()); + if (locationMap == null) { + locationMap = new HashMap<>(); + resourceMap.put(container.getResource(), locationMap); + } + Set containerIds = locationMap.get(container.getNodeId()); + if (containerIds == null) { + containerIds = new HashSet<>(); + locationMap.put(container.getNodeId(), containerIds); + } + if (containerIds.contains(container.getId()) + || outstandingDemotions.contains(container.getId())) { + return null; + } + containerIds.add(container.getId()); + SchedulerRequestKey promotionKey = new SchedulerRequestKey + (container.getPriority(), promotionCounter.decrementAndGet()); + outstandingPromotionKeys.put(promotionKey, schedulerKey); + return promotionKey; + } + + public ContainerId matchContainerToOutstandingPromotionReq( + Container container) { + ContainerId retVal = null; + SchedulerRequestKey promotionKey = + SchedulerRequestKey.extractFrom(container); + SchedulerRequestKey schedulerKey = + outstandingPromotionKeys.get(promotionKey); + Map>> resourceMap = + outstandingPromotions.get(schedulerKey); + if (resourceMap != null) { + Map> locationMap = + resourceMap.get(container.getResource()); + if (locationMap != null) { + Set containerIds = locationMap.get(container.getNodeId()); + if (containerIds != null && !containerIds.isEmpty()) { + retVal = containerIds.iterator().next(); + containerIds.remove(retVal); + outstandingPromotionKeys.remove(promotionKey); + if (containerIds.isEmpty()) { + locationMap.remove(container.getNodeId()); + } + } + if (locationMap.isEmpty()) { + resourceMap.remove(container.getResource()); + } + } + if (resourceMap.isEmpty()) { + outstandingPromotions.remove(schedulerKey); + } + } + // Allocation happened on NM on the same host, but not on the NM + // we need.. We need to signal the this container has to be released. + if (schedulerKey != null && retVal == null) { + return UNDEFINED; + } + return retVal; + } /** * Takes a list of ResourceRequests (asks), extracts the key information viz. * (Priority, ResourceName, Capability) and adds to the outstanding @@ -155,8 +271,9 @@ public void addToOutstandingReqs(List resourceAsks) { resourceRequest.getNumContainers() + request.getNumContainers()); } if (ResourceRequest.isAnyLocation(request.getResourceName())) { - LOG.info("# of outstandingOpReqs in ANY (at" + - "priority = "+ schedulerKey.getPriority() + LOG.info("# of outstandingOpReqs in ANY (at " + + "priority = " + schedulerKey.getPriority() + + ", allocationReqId = " + schedulerKey.getAllocationRequestId() + ", with capability = " + request.getCapability() + " ) : " + resourceRequest.getNumContainers()); } @@ -172,7 +289,8 @@ public void addToOutstandingReqs(List resourceAsks) { public void matchAllocationToOutstandingRequest(Resource capability, List allocatedContainers) { for (Container c : allocatedContainers) { - SchedulerRequestKey schedulerKey = SchedulerRequestKey.extractFrom(c); + SchedulerRequestKey schedulerKey = + SchedulerRequestKey.extractFrom(c); Map asks = outstandingOpReqs.get(schedulerKey); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java index 83409e6..e726e8f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java @@ -26,7 +26,14 @@ import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerUpdateType; +import org.apache.hadoop.yarn.api.records.ExecutionType; +import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.UpdateContainerError; +import org.apache.hadoop.yarn.api.records.UpdateContainerRequest; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.server.api.DistributedSchedulingAMProtocol; @@ -70,9 +77,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; - import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerAllocator; import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerContext; +import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils; import java.io.IOException; @@ -80,6 +88,9 @@ import java.util.ArrayList; import java.util.List; +import static org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils + .RECORD_FACTORY; + /** * The OpportunisticContainerAllocatorAMService is started instead of the * ApplicationMasterService if opportunistic scheduling is enabled for the YARN @@ -249,11 +260,166 @@ protected void allocate(ApplicationAttemptId appAttemptId, addToAllocatedContainers(allocateResponse, oppContainers); } + handleExecutionTypeUpdates(appAttempt, request, allocateResponse, + oppCtx, partitionedAsks.getGuaranteed()); + + + List promotedContainers = + appAttempt.pullContainersWithUpdatedExecType(); + addToUpdatedContainers(allocateResponse, + ContainerUpdateType.UPDATE_EXECUTION_TYPE, promotedContainers); + // Allocate GUARANTEED containers. request.setAskList(partitionedAsks.getGuaranteed()); + super.allocate(appAttemptId, request, allocateResponse); } + private void handleExecutionTypeUpdates( + SchedulerApplicationAttempt appAttempt, AllocateRequest request, + AllocateResponse allocateResponse, OpportunisticContainerContext oppCntxt, + List existingGuaranteedReqs) { + List promotionRequests = new ArrayList<>(); + List demotionRequests = new ArrayList<>(); + + List updateContainerErrors = RMServerUtils + .validateAndSplitUpdateExecutionTypeRequests(rmContext, + request, promotionRequests, demotionRequests); + + if (!promotionRequests.isEmpty()) { + LOG.info("Promotion Update requests : " + promotionRequests); + } + if (!demotionRequests.isEmpty()) { + LOG.info("Demotion Update requests : " + demotionRequests); + } + + List resourceReqsForPromotion = + createResourceReqsForPromotion(oppCntxt, promotionRequests, + updateContainerErrors); + + if (!resourceReqsForPromotion.isEmpty() && LOG.isDebugEnabled()) { + LOG.debug("Generated Resource Requests for promotion : " + + resourceReqsForPromotion); + } + + handleDemotionRequests(appAttempt, demotionRequests, updateContainerErrors); + addToUpdateContainerErrors(allocateResponse, updateContainerErrors); + existingGuaranteedReqs.addAll(resourceReqsForPromotion); + } + + private void handleDemotionRequests(SchedulerApplicationAttempt appAttempt, + List demotionRequests, + List updateContainerErrors) { + OpportunisticContainerContext oppCntxt = + appAttempt.getOpportunisticContainerContext(); + for (UpdateContainerRequest uReq : demotionRequests) { + RMContainer rmContainer = + rmContext.getScheduler().getRMContainer(uReq.getContainerId()); + if (rmContainer != null) { + if (oppCntxt.checkAndAddToOutstandingDemotions( + rmContainer.getContainer())) { + RMContainer demotedRMContainer = createDemotedRMContainer + (appAttempt, oppCntxt, rmContainer); + appAttempt.addToNewlyDemotedContainers( + uReq.getContainerId(), demotedRMContainer); + } else { + updateContainerErrors.add(UpdateContainerError.newInstance( + RMServerUtils.UPDATE_OUTSTANDING_ERROR, uReq)); + } + } else { + LOG.warn("Cannot demote non-existent (or completed) Container [" + + uReq.getContainerId() + "]"); + } + } + } + + private RMContainer createDemotedRMContainer( + SchedulerApplicationAttempt appAttempt, + OpportunisticContainerContext oppCntxt, + RMContainer rmContainer) { + SchedulerRequestKey sk = + SchedulerRequestKey.extractFrom(rmContainer.getContainer()); + Container demotedContainer = BuilderUtils.newContainer( + ContainerId.newContainerId(appAttempt.getApplicationAttemptId(), + oppCntxt.getContainerIdGenerator().generateContainerId()), + rmContainer.getContainer().getNodeId(), + rmContainer.getContainer().getNodeHttpAddress(), + rmContainer.getContainer().getResource(), + sk.getPriority(), null, ExecutionType.OPPORTUNISTIC, + sk.getAllocationRequestId()); + demotedContainer.setVersion(rmContainer.getContainer().getVersion()); + return createRmContainer(demotedContainer, false); + } + + public List createResourceReqsForPromotion( + OpportunisticContainerContext oppCntxt, + List updateContainerRequests, + List updateContainerErrors) { + List retList = new ArrayList<>(); + for (UpdateContainerRequest uReq : updateContainerRequests) { + RMContainer rmContainer = + rmContext.getScheduler().getRMContainer(uReq.getContainerId()); + // Check if this is a container update + // And not in the middle of a Demotion + if (rmContainer != null) { + // Check if this is an executionType change request + // If so, fix the rr to make it look like a normal rr + // with relaxLocality=false and numContainers=1 + SchedulerNode schedulerNode = rmContext.getScheduler() + .getSchedulerNode(rmContainer.getContainer().getNodeId()); + + // Add only if no outstanding promote requests exist. + SchedulerRequestKey schedulerKey = oppCntxt + .checkAndAddToOutstandingPromotions(rmContainer.getContainer()); + if (schedulerKey != null) { + long promotionReqId = schedulerKey.getAllocationRequestId(); + // Create a new Ask + retList.add( + createResourceReqForPromotion(promotionReqId, + RECORD_FACTORY.newRecordInstance(ResourceRequest.class), + rmContainer, + rmContainer.getContainer().getNodeId().getHost())); + + // TODO: The below are also required now.. Since the Schedulers + // actually update demand only for * requests. + + // Add rack local ask + retList.add( + createResourceReqForPromotion(promotionReqId, + RECORD_FACTORY.newRecordInstance(ResourceRequest.class), + rmContainer, schedulerNode.getRackName())); + + // Add ANY ask + retList.add( + createResourceReqForPromotion(promotionReqId, + RECORD_FACTORY.newRecordInstance(ResourceRequest.class), + rmContainer, ResourceRequest.ANY)); + } else { + updateContainerErrors.add(UpdateContainerError.newInstance( + RMServerUtils.UPDATE_OUTSTANDING_ERROR, uReq)); + } + } else { + LOG.warn("Cannot promote non-existent (or completed) Container [" + + uReq.getContainerId() + "]"); + } + } + return retList; + } + + private static ResourceRequest createResourceReqForPromotion(long allocReqId, + ResourceRequest rr, RMContainer rmContainer, String resourceName) { + rr.setResourceName(resourceName); + rr.setNumContainers(1); + rr.setRelaxLocality(false); + rr.setPriority(rmContainer.getContainer().getPriority()); + rr.setAllocationRequestId(allocReqId); + rr.setCapability(rmContainer.getContainer().getResource()); + rr.setNodeLabelExpression(rmContainer.getNodeLabelExpression()); + rr.setExecutionTypeRequest(ExecutionTypeRequest.newInstance( + ExecutionType.GUARANTEED, true)); + return rr; + } + @Override public RegisterDistributedSchedulingAMResponse registerApplicationMasterForDistributedScheduling( @@ -298,21 +464,28 @@ private void handleNewContainers(List allocContainers, boolean isRemotelyAllocated) { for (Container container : allocContainers) { // Create RMContainer - SchedulerApplicationAttempt appAttempt = - ((AbstractYarnScheduler) rmContext.getScheduler()) - .getCurrentAttemptForContainer(container.getId()); - RMContainer rmContainer = new RMContainerImpl(container, - appAttempt.getApplicationAttemptId(), container.getNodeId(), - appAttempt.getUser(), rmContext, isRemotelyAllocated); - appAttempt.addRMContainer(container.getId(), rmContainer); - ((AbstractYarnScheduler) rmContext.getScheduler()).getNode( - container.getNodeId()).allocateContainer(rmContainer); + RMContainer rmContainer = + createRmContainer(container, isRemotelyAllocated); rmContainer.handle( new RMContainerEvent(container.getId(), RMContainerEventType.ACQUIRED)); } } + private RMContainer createRmContainer( + Container container, boolean isRemotelyAllocated) { + SchedulerApplicationAttempt appAttempt = + ((AbstractYarnScheduler) rmContext.getScheduler()) + .getCurrentAttemptForContainer(container.getId()); + RMContainer rmContainer = new RMContainerImpl(container, + appAttempt.getApplicationAttemptId(), container.getNodeId(), + appAttempt.getUser(), rmContext, isRemotelyAllocated); + appAttempt.addRMContainer(container.getId(), rmContainer); + ((AbstractYarnScheduler) rmContext.getScheduler()).getNode( + container.getNodeId()).allocateContainer(rmContainer); + return rmContainer; + } + @Override public void handle(SchedulerEvent event) { switch (event.getType()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java index a0cdf68..e0c7676 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java @@ -39,6 +39,8 @@ import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerUpdateType; +import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.Resource; @@ -80,7 +82,7 @@ */ public class RMServerUtils { - private static final String UPDATE_OUTSTANDING_ERROR = + public static final String UPDATE_OUTSTANDING_ERROR = "UPDATE_OUTSTANDING_ERROR"; private static final String INCORRECT_CONTAINER_VERSION_ERROR = "INCORRECT_CONTAINER_VERSION_ERROR"; @@ -123,6 +125,48 @@ } /** + * + * @param rmContext RM context + * @param request Allocate Request + * @param promoteExecTypeReqs Promotion requests + * @param demoteExecTypeReqs Demotion requests + * @return List of container update Errors + */ + public static List + validateAndSplitUpdateExecutionTypeRequests(RMContext rmContext, + AllocateRequest request, List promoteExecTypeReqs, + List demoteExecTypeReqs) { + List errors = new ArrayList<>(); + Set outstandingUpdate = new HashSet<>(); + for (UpdateContainerRequest updateReq : request.getUpdateRequests()) { + if (updateReq.getContainerUpdateType() == + ContainerUpdateType.UPDATE_EXECUTION_TYPE) { + RMContainer rmContainer = rmContext.getScheduler().getRMContainer( + updateReq.getContainerId()); + String msg = validateContainerIdAndVersion(outstandingUpdate, + updateReq, rmContainer); + if (msg == null) { + ExecutionType original = rmContainer.getExecutionType(); + ExecutionType target = updateReq.getExecutionType(); + if (target != original) { + if (target == ExecutionType.GUARANTEED && + original == ExecutionType.OPPORTUNISTIC) { + promoteExecTypeReqs.add(updateReq); + outstandingUpdate.add(updateReq.getContainerId()); + } else if (target == ExecutionType.OPPORTUNISTIC && + original == ExecutionType.GUARANTEED) { + demoteExecTypeReqs.add(updateReq); + outstandingUpdate.add(updateReq.getContainerId()); + } + } + } + checkAndcreateUpdateError(errors, updateReq, msg); + } + } + return errors; + } + + /** * Check if we have: * - Request for same containerId and different target resource * - If targetResources violates maximum/minimumAllocation @@ -131,7 +175,7 @@ * @param maximumAllocation Maximum Allocation * @param increaseResourceReqs Increase Resource Request * @param decreaseResourceReqs Decrease Resource Request - * @return List of container Errors + * @return List of container update Errors */ public static List validateAndSplitUpdateResourceRequests(RMContext rmContext, @@ -141,59 +185,76 @@ List errors = new ArrayList<>(); Set outstandingUpdate = new HashSet<>(); for (UpdateContainerRequest updateReq : request.getUpdateRequests()) { - RMContainer rmContainer = rmContext.getScheduler().getRMContainer( - updateReq.getContainerId()); - String msg = null; - if (rmContainer == null) { - msg = INVALID_CONTAINER_ID; - } - // Only allow updates if the requested version matches the current - // version - if (msg == null && updateReq.getContainerVersion() != - rmContainer.getContainer().getVersion()) { - msg = INCORRECT_CONTAINER_VERSION_ERROR + "|" - + updateReq.getContainerVersion() + "|" - + rmContainer.getContainer().getVersion(); - } - // No more than 1 container update per request. - if (msg == null && - outstandingUpdate.contains(updateReq.getContainerId())) { - msg = UPDATE_OUTSTANDING_ERROR; - } - if (msg == null) { - Resource original = rmContainer.getContainer().getResource(); - Resource target = updateReq.getCapability(); - if (Resources.fitsIn(target, original)) { - // This is a decrease request - if (validateIncreaseDecreaseRequest(rmContext, updateReq, - maximumAllocation, false)) { - decreaseResourceReqs.add(updateReq); - outstandingUpdate.add(updateReq.getContainerId()); + if (updateReq.getContainerUpdateType() != + ContainerUpdateType.UPDATE_EXECUTION_TYPE) { + RMContainer rmContainer = rmContext.getScheduler().getRMContainer( + updateReq.getContainerId()); + String msg = validateContainerIdAndVersion(outstandingUpdate, + updateReq, rmContainer); + if (msg == null) { + Resource original = rmContainer.getContainer().getResource(); + Resource target = updateReq.getCapability(); + if (Resources.fitsIn(target, original)) { + // This is a decrease request + if (validateIncreaseDecreaseRequest(rmContext, updateReq, + maximumAllocation, false)) { + decreaseResourceReqs.add(updateReq); + outstandingUpdate.add(updateReq.getContainerId()); + } else { + msg = RESOURCE_OUTSIDE_ALLOWED_RANGE; + } } else { - msg = RESOURCE_OUTSIDE_ALLOWED_RANGE; - } - } else { - // This is an increase request - if (validateIncreaseDecreaseRequest(rmContext, updateReq, - maximumAllocation, true)) { - increaseResourceReqs.add(updateReq); - outstandingUpdate.add(updateReq.getContainerId()); - } else { - msg = RESOURCE_OUTSIDE_ALLOWED_RANGE; + // This is an increase request + if (validateIncreaseDecreaseRequest(rmContext, updateReq, + maximumAllocation, true)) { + increaseResourceReqs.add(updateReq); + outstandingUpdate.add(updateReq.getContainerId()); + } else { + msg = RESOURCE_OUTSIDE_ALLOWED_RANGE; + } } } - } - if (msg != null) { - UpdateContainerError updateError = RECORD_FACTORY - .newRecordInstance(UpdateContainerError.class); - updateError.setReason(msg); - updateError.setUpdateContainerRequest(updateReq); - errors.add(updateError); + checkAndcreateUpdateError(errors, updateReq, msg); } } return errors; } + private static void checkAndcreateUpdateError( + List errors, UpdateContainerRequest updateReq, + String msg) { + if (msg != null) { + UpdateContainerError updateError = RECORD_FACTORY + .newRecordInstance(UpdateContainerError.class); + updateError.setReason(msg); + updateError.setUpdateContainerRequest(updateReq); + errors.add(updateError); + } + } + + private static String validateContainerIdAndVersion( + Set outstandingUpdate, UpdateContainerRequest updateReq, + RMContainer rmContainer) { + String msg = null; + if (rmContainer == null) { + msg = INVALID_CONTAINER_ID; + } + // Only allow updates if the requested version matches the current + // version + if (msg == null && updateReq.getContainerVersion() != + rmContainer.getContainer().getVersion()) { + msg = INCORRECT_CONTAINER_VERSION_ERROR + "|" + + updateReq.getContainerVersion() + "|" + + rmContainer.getContainer().getVersion(); + } + // No more than 1 container update per request. + if (msg == null && + outstandingUpdate.contains(updateReq.getContainerId())) { + msg = UPDATE_OUTSTANDING_ERROR; + } + return msg; + } + /** * Utility method to validate a list resource requests, by insuring that the * requested memory/vcore is non-negative and not greater than max diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java index dbc6169..8824730 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java @@ -108,6 +108,8 @@ // Transitions from ACQUIRED state .addTransition(RMContainerState.ACQUIRED, RMContainerState.RUNNING, RMContainerEventType.LAUNCHED) + .addTransition(RMContainerState.ACQUIRED, RMContainerState.ACQUIRED, + RMContainerEventType.ACQUIRED) .addTransition(RMContainerState.ACQUIRED, RMContainerState.COMPLETED, RMContainerEventType.FINISHED, new FinishedTransition()) .addTransition(RMContainerState.ACQUIRED, RMContainerState.RELEASED, @@ -125,6 +127,8 @@ .addTransition(RMContainerState.RUNNING, RMContainerState.RELEASED, RMContainerEventType.RELEASED, new KillTransition()) .addTransition(RMContainerState.RUNNING, RMContainerState.RUNNING, + RMContainerEventType.ACQUIRED) + .addTransition(RMContainerState.RUNNING, RMContainerState.RUNNING, RMContainerEventType.RESERVED, new ContainerReservedTransition()) .addTransition(RMContainerState.RUNNING, RMContainerState.RUNNING, RMContainerEventType.CHANGE_RESOURCE, new ChangeResourceTransition()) @@ -163,13 +167,13 @@ private final WriteLock writeLock; private final ApplicationAttemptId appAttemptId; private final NodeId nodeId; - private final Container container; private final RMContext rmContext; private final EventHandler eventHandler; private final ContainerAllocationExpirer containerAllocationExpirer; private final String user; private final String nodeLabelExpression; + private Container container; private Resource reservedResource; private NodeId reservedNode; private SchedulerRequestKey reservedSchedulerKey; @@ -276,6 +280,10 @@ public Container getContainer() { return this.container; } + public synchronized void setContainer(Container container) { + this.container = container; + } + @Override public RMContainerState getState() { this.readLock.lock(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index e94d800..bee82db 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -47,6 +48,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerUpdateType; import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.api.records.NMToken; @@ -54,6 +56,7 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.UpdatedContainer; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.server.api.ContainerType; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; @@ -133,6 +136,9 @@ private AtomicLong firstContainerAllocatedTime = new AtomicLong(0); protected List newlyAllocatedContainers = new ArrayList<>(); + protected Map newlyPromotedContainers = new HashMap<>(); + protected Map newlyDemotedContainers = new HashMap<>(); + protected List tempContainerToKill = new ArrayList<>(); protected Map newlyDecreasedContainers = new HashMap<>(); protected Map newlyIncreasedContainers = new HashMap<>(); protected Set updatedNMTokens = new HashSet<>(); @@ -635,10 +641,10 @@ public Resource getCurrentConsumption() { } private Container updateContainerAndNMToken(RMContainer rmContainer, - boolean newContainer, boolean increasedContainer) { + ContainerUpdateType updateType) { Container container = rmContainer.getContainer(); ContainerType containerType = ContainerType.TASK; - if (!newContainer) { + if (updateType != null) { container.setVersion(container.getVersion() + 1); } // The working knowledge is that masterContainer for AM is null as it @@ -662,12 +668,14 @@ private Container updateContainerAndNMToken(RMContainer rmContainer, return null; } - if (newContainer) { + if (updateType == null || + ContainerUpdateType.UPDATE_EXECUTION_TYPE == updateType) { rmContainer.handle(new RMContainerEvent( rmContainer.getContainerId(), RMContainerEventType.ACQUIRED)); } else { rmContainer.handle(new RMContainerUpdatesAcquiredEvent( - rmContainer.getContainerId(), increasedContainer)); + rmContainer.getContainerId(), + ContainerUpdateType.INCREASE_RESOURCE == updateType)); } return container; } @@ -699,8 +707,8 @@ private void updateNMToken(Container container) { Iterator i = newlyAllocatedContainers.iterator(); while (i.hasNext()) { RMContainer rmContainer = i.next(); - Container updatedContainer = updateContainerAndNMToken(rmContainer, - true, false); + Container updatedContainer = + updateContainerAndNMToken(rmContainer, null); // Only add container to return list when it's not null. // updatedContainer could be null when generate token failed, it can be // caused by DNS resolving failed. @@ -713,9 +721,114 @@ private void updateNMToken(Container container) { } finally { writeLock.unlock(); } + } + public void addToNewlyDemotedContainers(ContainerId containerId, + RMContainer rmContainer) { + newlyDemotedContainers.put(containerId, rmContainer); } - + + protected synchronized void addToNewlyAllocatedContainers( + RMContainer rmContainer) { + if (oppContainerContext == null) { + newlyAllocatedContainers.add(rmContainer); + return; + } + ContainerId matchedContainerId = + oppContainerContext.matchContainerToOutstandingPromotionReq( + rmContainer.getContainer()); + if (matchedContainerId != null) { + if (OpportunisticContainerContext.UNDEFINED == matchedContainerId) { + // This is a spurious allocation (relaxLocality = false + // resulted in the Container being allocated on an NM on the same host + // but not on the NM running the container to be updated. Can + // happen if more than one NM exists on the same host.. usually + // occurs when using MiniYARNCluster to test). + tempContainerToKill.add(rmContainer); + } else { + newlyPromotedContainers.put(matchedContainerId, rmContainer); + } + } else { + newlyAllocatedContainers.add(rmContainer); + } + } + + /** + * A container is promoted if its executionType is changed from + * OPPORTUNISTIC to GUARANTEED. It id demoted if the change is from + * GUARANTEED to OPPORTUNISTIC. + * @return Newly Promoted and Demoted containers + */ + public List pullContainersWithUpdatedExecType() { + List updatedContainers = new ArrayList<>(); + if (oppContainerContext == null) { + return updatedContainers; + } + try { + writeLock.lock(); + for (Map newlyUpdatedContainers : + Arrays.asList(newlyPromotedContainers, newlyDemotedContainers)) { + Iterator> i = + newlyUpdatedContainers.entrySet().iterator(); + while (i.hasNext()) { + Map.Entry entry = i.next(); + ContainerId matchedContainerId = entry.getKey(); + RMContainer rmContainer = entry.getValue(); + + // swap containers + RMContainer existingRMContainer = swapContainer( + rmContainer, matchedContainerId); + Container updatedContainer = updateContainerAndNMToken( + existingRMContainer, ContainerUpdateType.UPDATE_EXECUTION_TYPE); + updatedContainers.add(updatedContainer); + + // Mark container for release (set RRs to null, so RM does not think + // it is a recoverable container) + ((RMContainerImpl) rmContainer).setResourceRequests(null); + tempContainerToKill.add(rmContainer); + i.remove(); + } + } + // Release all temporary containers + Iterator tempIter = tempContainerToKill.iterator(); + while (tempIter.hasNext()) { + RMContainer c = tempIter.next(); + ((AbstractYarnScheduler) rmContext.getScheduler()).completedContainer(c, + SchedulerUtils.createAbnormalContainerStatus(c.getContainerId(), + SchedulerUtils.UPDATED_CONTAINER), + RMContainerEventType.KILL); + tempIter.remove(); + } + return updatedContainers; + } finally { + writeLock.unlock(); + } + } + + private RMContainer swapContainer(RMContainer rmContainer, ContainerId + matchedContainerId) { + RMContainer existingRMContainer = + getRMContainer(matchedContainerId); + if (existingRMContainer != null) { + // Swap updated container with the existing container + Container updatedContainer = rmContainer.getContainer(); + ContainerId tempContainerId = updatedContainer.getId(); + + Container existingContainer = existingRMContainer.getContainer(); + ContainerId existingContainerId = existingContainer.getId(); + + updatedContainer.setId(existingContainerId); + updatedContainer.setVersion(existingContainer.getVersion()); + ((RMContainerImpl) existingRMContainer).setContainer( + updatedContainer); + + existingContainer.setId(tempContainerId); + ((RMContainerImpl) rmContainer).setContainer( + existingContainer); + } + return existingRMContainer; + } + private List pullNewlyUpdatedContainers( Map updatedContainerMap, boolean increase) { try { @@ -728,7 +841,8 @@ private void updateNMToken(Container container) { while (i.hasNext()) { RMContainer rmContainer = i.next().getValue(); Container updatedContainer = updateContainerAndNMToken(rmContainer, - false, increase); + increase ? ContainerUpdateType.INCREASE_RESOURCE : + ContainerUpdateType.DECREASE_RESOURCE); if (updatedContainer != null) { returnContainerList.add(updatedContainer); i.remove(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index 6f905b9..57e5f27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -57,6 +57,9 @@ public static final String RELEASED_CONTAINER = "Container released by application"; + + public static final String UPDATED_CONTAINER = + "Temporary container killed by application for ExeutionType update"; public static final String LOST_CONTAINER = "Container released on a *lost* node"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index b14bc20..604a92b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -554,7 +554,7 @@ public void apply(Resource cluster, // Update this application for the allocated container if (!allocation.isIncreasedAllocation()) { // Allocate a new container - newlyAllocatedContainers.add(rmContainer); + addToNewlyAllocatedContainers(rmContainer); liveContainers.put(containerId, rmContainer); // Deduct pending resource requests diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java index a9591a5..7ecf8aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java @@ -454,7 +454,7 @@ public RMContainer allocate(NodeType type, FSSchedulerNode node, ((RMContainerImpl) rmContainer).setQueueName(this.getQueueName()); // Add it to allContainers list. - newlyAllocatedContainers.add(rmContainer); + addToNewlyAllocatedContainers(rmContainer); liveContainers.put(container.getId(), rmContainer); // Update consumption and track allocations diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoAppAttempt.java index e60f70e..9d6fbb6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoAppAttempt.java @@ -76,7 +76,7 @@ public RMContainer allocate(NodeType type, FiCaSchedulerNode node, updateAMContainerDiagnostics(AMState.ASSIGNED, null); // Add it to allContainers list. - newlyAllocatedContainers.add(rmContainer); + addToNewlyAllocatedContainers(rmContainer); ContainerId containerId = container.getId(); liveContainers.put(containerId, rmContainer); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java index 73d9e5c..37b6655 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java @@ -148,9 +148,11 @@ public void testNodeRemovalDuringAllocate() throws Exception { // After removal of node 1, only 1 node will be applicable for scheduling. for (int i = 0; i < 10; i++) { try { - am1.allocate( + AllocateResponse allocate = am1.allocate( Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), - "*", Resources.createResource(1 * GB), 2)), + "*", Resources.createResource(1 * GB), 2, true, null, + ExecutionTypeRequest.newInstance( + ExecutionType.OPPORTUNISTIC, false))), null); } catch (Exception e) { Assert.fail("Allocate request should be handled on node removal");