diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java index 6c51626..aff6652 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java @@ -1736,7 +1736,8 @@ public synchronized Allocation allocate( List containers = normalAlloc.getContainers(); if(containers.size() > 0) { // allocate excess container - FiCaSchedulerApp application = super.getApplicationAttempt(applicationAttemptId); + FiCaSchedulerApp application = + super.getCurrentApplicationAttempt(applicationAttemptId); ContainerId containerId = BuilderUtils.newContainerId(application .getApplicationAttemptId(), application.getNewContainerId()); Container excessC = mock(Container.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java index 052ec22..a8a17b0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java @@ -286,7 +286,8 @@ public void allocate(ApplicationAttemptId appAttemptId, addToContainerUpdates(response, allocation, ((AbstractYarnScheduler)getScheduler()) - .getApplicationAttempt(appAttemptId).pullUpdateContainerErrors()); + .getCurrentApplicationAttempt(appAttemptId) + .pullUpdateContainerErrors()); response.setNumClusterNodes(getScheduler().getNumClusterNodes()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java index 4fc2916..e28a69e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java @@ -129,7 +129,7 @@ public void registerApplicationMaster( RegisterApplicationMasterRequest request, RegisterApplicationMasterResponse response) throws IOException { SchedulerApplicationAttempt appAttempt = ((AbstractYarnScheduler) - getScheduler()).getApplicationAttempt(applicationAttemptId); + getScheduler()).getCurrentApplicationAttempt(applicationAttemptId); if (appAttempt.getOpportunisticContainerContext() == null) { OpportunisticContainerContext opCtx = new OpportunisticContainerContext(); @@ -167,7 +167,7 @@ public void allocate(ApplicationAttemptId appAttemptId, // Allocate OPPORTUNISTIC containers. SchedulerApplicationAttempt appAttempt = ((AbstractYarnScheduler)rmContext.getScheduler()) - .getApplicationAttempt(appAttemptId); + .getCurrentApplicationAttempt(appAttemptId); OpportunisticContainerContext oppCtx = appAttempt.getOpportunisticContainerContext(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java index c730a2d..398aca3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java @@ -289,9 +289,9 @@ private boolean preChecksForMovingReservedContainerToNode( // For normal requests FiCaSchedulerApp app = - preemptionContext.getScheduler().getApplicationAttempt( + preemptionContext.getScheduler().getCurrentApplicationAttempt( reservedContainer.getApplicationAttemptId()); - if (!app.getAppSchedulingInfo().canDelayTo( + if (app == null || !app.getAppSchedulingInfo().canDelayTo( reservedContainer.getAllocatedSchedulerKey(), ResourceRequest.ANY)) { // This is a hard locality request return false; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index d748860..131b809 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -2287,7 +2287,7 @@ public void setRecoveredFinalState(RMAppAttemptState finalState) { AbstractYarnScheduler ayScheduler = (AbstractYarnScheduler) scheduler; SchedulerApplicationAttempt attempt = - ayScheduler.getApplicationAttempt(applicationAttemptId); + ayScheduler.getCurrentApplicationAttempt(applicationAttemptId); if (attempt != null) { return attempt.getBlacklistedNodes(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index 2c27017..8862808 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -334,8 +334,8 @@ protected void containerIncreasedOnNode(ContainerId containerId, } - // TODO: Rename it to getCurrentApplicationAttempt - public T getApplicationAttempt(ApplicationAttemptId applicationAttemptId) { + public T getCurrentApplicationAttempt( + ApplicationAttemptId applicationAttemptId) { SchedulerApplication app = applications.get( applicationAttemptId.getApplicationId()); return app == null ? null : app.getCurrentAppAttempt(); @@ -344,7 +344,8 @@ public T getApplicationAttempt(ApplicationAttemptId applicationAttemptId) { @Override public SchedulerAppReport getSchedulerAppInfo( ApplicationAttemptId appAttemptId) { - SchedulerApplicationAttempt attempt = getApplicationAttempt(appAttemptId); + SchedulerApplicationAttempt attempt = + getCurrentApplicationAttempt(appAttemptId); if (attempt == null) { if (LOG.isDebugEnabled()) { LOG.debug("Request for appInfo of unknown attempt " + appAttemptId); @@ -357,7 +358,8 @@ public SchedulerAppReport getSchedulerAppInfo( @Override public ApplicationResourceUsageReport getAppResourceUsageReport( ApplicationAttemptId appAttemptId) { - SchedulerApplicationAttempt attempt = getApplicationAttempt(appAttemptId); + SchedulerApplicationAttempt attempt = + getCurrentApplicationAttempt(appAttemptId); if (attempt == null) { if (LOG.isDebugEnabled()) { LOG.debug("Request for appInfo of unknown attempt " + appAttemptId); @@ -368,7 +370,7 @@ public ApplicationResourceUsageReport getAppResourceUsageReport( } public T getCurrentAttemptForContainer(ContainerId containerId) { - return getApplicationAttempt(containerId.getApplicationAttemptId()); + return getCurrentApplicationAttempt(containerId.getApplicationAttemptId()); } @Override @@ -807,7 +809,8 @@ protected void refreshMaximumAllocation(Resource newMaxAlloc) { @Override public List getPendingResourceRequestsForAttempt( ApplicationAttemptId attemptId) { - SchedulerApplicationAttempt attempt = getApplicationAttempt(attemptId); + SchedulerApplicationAttempt attempt = + getCurrentApplicationAttempt(attemptId); if (attempt != null) { return attempt.getAppSchedulingInfo().getAllResourceRequests(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index fde84c4..e4a79d1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -848,7 +848,8 @@ private void doneApplicationAttempt( LOG.info("Application Attempt " + applicationAttemptId + " is done." + " finalState=" + rmAppAttemptFinalState); - FiCaSchedulerApp attempt = getApplicationAttempt(applicationAttemptId); + FiCaSchedulerApp attempt = + getCurrentApplicationAttempt(applicationAttemptId); SchedulerApplication application = applications.get( applicationAttemptId.getApplicationId()); @@ -903,7 +904,8 @@ public Allocation allocate(ApplicationAttemptId applicationAttemptId, List ask, List release, List blacklistAdditions, List blacklistRemovals, ContainerUpdates updateRequests) { - FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId); + FiCaSchedulerApp application = + getCurrentApplicationAttempt(applicationAttemptId); if (application == null) { LOG.error("Calling allocate on removed or non existent application " + applicationAttemptId.getApplicationId()); @@ -1075,7 +1077,7 @@ private void updateLabelsOnNode(NodeId nodeId, // Update resources of these containers for (RMContainer rmContainer : node.getCopiedListOfRunningContainers()) { - FiCaSchedulerApp application = getApplicationAttempt( + FiCaSchedulerApp application = getCurrentApplicationAttempt( rmContainer.getApplicationAttemptId()); if (null != application) { application.nodePartitionUpdated(rmContainer, oldPartition, @@ -1681,9 +1683,9 @@ protected void completedContainerInternal( @Lock(Lock.NoLock.class) @VisibleForTesting @Override - public FiCaSchedulerApp getApplicationAttempt( + public FiCaSchedulerApp getCurrentApplicationAttempt( ApplicationAttemptId applicationAttemptId) { - return super.getApplicationAttempt(applicationAttemptId); + return super.getCurrentApplicationAttempt(applicationAttemptId); } @Lock(Lock.NoLock.class) @@ -1725,7 +1727,7 @@ public void markContainerForPreemption(ApplicationAttemptId aid, + ": appAttempt:" + aid.toString() + " container: " + cont.toString()); } - FiCaSchedulerApp app = getApplicationAttempt(aid); + FiCaSchedulerApp app = getCurrentApplicationAttempt(aid); if (app != null) { app.markContainerForPreemption(cont.getContainerId()); } @@ -2080,8 +2082,8 @@ public void preValidateMoveApplication(ApplicationId appId, // Validation check - ACLs, submission limits for user & queue String user = application.getUser(); // Check active partition only when attempt is available - FiCaSchedulerApp appAttempt = - getApplicationAttempt(ApplicationAttemptId.newInstance(appId, 0)); + FiCaSchedulerApp appAttempt = getCurrentApplicationAttempt( + ApplicationAttemptId.newInstance(appId, 0)); if (null != appAttempt) { checkQueuePartition(appAttempt, dest); } @@ -2295,7 +2297,7 @@ public ResourceUsage getClusterResourceUsage() { return null; } - FiCaSchedulerApp app = getApplicationAttempt( + FiCaSchedulerApp app = getCurrentApplicationAttempt( rmContainer.getApplicationAttemptId()); if (null == app) { return null; } @@ -2468,7 +2470,7 @@ public void tryCommit(Resource cluster, ResourceCommitRequest r) { } if (attemptId != null) { - FiCaSchedulerApp app = getApplicationAttempt(attemptId); + FiCaSchedulerApp app = getCurrentApplicationAttempt(attemptId); // Required sanity check for attemptId - when async-scheduling enabled, // proposal might be outdated if AM failover just finished // and proposal queue was not be consumed in time @@ -2548,7 +2550,7 @@ public boolean moveReservedContainer(RMContainer toBeMovedContainer, return false; } - FiCaSchedulerApp app = getApplicationAttempt( + FiCaSchedulerApp app = getCurrentApplicationAttempt( toBeMovedContainer.getApplicationAttemptId()); if (null == app) { if (LOG.isDebugEnabled()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java index 9aeaec6..093a37e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerContext.java @@ -18,17 +18,15 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import java.util.Comparator; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerHealth; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; @@ -64,7 +62,7 @@ FiCaSchedulerNode getNode(NodeId nodeId); - FiCaSchedulerApp getApplicationAttempt(ApplicationAttemptId attemptId); + FiCaSchedulerApp getCurrentApplicationAttempt(ApplicationAttemptId attemptId); PreemptionManager getPreemptionManager(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 2e48000..58a07df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -1000,7 +1000,7 @@ private void killContainersToEnforceMaxQueueCapacity(String partition, while (Resources.greaterThan(resourceCalculator, partitionResource, queueUsage.getUsed(partition), maxResource)) { RMContainer toKillContainer = killableContainerIter.next(); - FiCaSchedulerApp attempt = csContext.getApplicationAttempt( + FiCaSchedulerApp attempt = csContext.getCurrentApplicationAttempt( toKillContainer.getContainerId().getApplicationAttemptId()); FiCaSchedulerNode node = csContext.getNode( toKillContainer.getAllocatedNode()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index c521250..a2bacaf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -655,7 +655,7 @@ private void removeApplicationAttempt( writeLock.lock(); LOG.info("Application " + applicationAttemptId + " is done. finalState=" + rmAppAttemptFinalState); - FSAppAttempt attempt = getApplicationAttempt(applicationAttemptId); + FSAppAttempt attempt = getCurrentApplicationAttempt(applicationAttemptId); if (attempt == null) { LOG.info( @@ -1076,7 +1076,7 @@ void attemptScheduling(FSSchedulerNode node) { } public FSAppAttempt getSchedulerApp(ApplicationAttemptId appAttemptId) { - return super.getApplicationAttempt(appAttemptId); + return super.getCurrentApplicationAttempt(appAttemptId); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index 94c7e16..5eaa5ba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -328,7 +328,8 @@ public Allocation allocate(ApplicationAttemptId applicationAttemptId, List ask, List release, List blacklistAdditions, List blacklistRemovals, ContainerUpdates updateRequests) { - FifoAppAttempt application = getApplicationAttempt(applicationAttemptId); + FifoAppAttempt application = + getCurrentApplicationAttempt(applicationAttemptId); if (application == null) { LOG.error("Calling allocate on removed or non existent application " + applicationAttemptId.getApplicationId()); @@ -463,7 +464,7 @@ private synchronized void doneApplicationAttempt( ApplicationAttemptId applicationAttemptId, RMAppAttemptState rmAppAttemptFinalState, boolean keepContainers) throws IOException { - FifoAppAttempt attempt = getApplicationAttempt(applicationAttemptId); + FifoAppAttempt attempt = getCurrentApplicationAttempt(applicationAttemptId); SchedulerApplication application = applications.get(applicationAttemptId.getApplicationId()); if (application == null || attempt == null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java index 82a946e..8e54993 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppAttemptInfo.java @@ -74,8 +74,8 @@ public AppAttemptInfo(ResourceManager rm, RMAppAttempt attempt, String user, if (rm.getResourceScheduler() instanceof AbstractYarnScheduler) { AbstractYarnScheduler ayScheduler = (AbstractYarnScheduler) rm.getResourceScheduler(); - SchedulerApplicationAttempt sattempt = - ayScheduler.getApplicationAttempt(attempt.getAppAttemptId()); + SchedulerApplicationAttempt sattempt = ayScheduler + .getCurrentApplicationAttempt(attempt.getAppAttemptId()); if (sattempt != null) { blacklistedNodes = StringUtils.join(sattempt.getBlacklistedNodes(), ", "); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java index f11939a..8330204 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java @@ -255,7 +255,7 @@ public AppInfo(ResourceManager rm, RMApp app, Boolean hasAccess, RMAppAttempt attempt = app.getCurrentAppAttempt(); if (null != attempt) { FiCaSchedulerApp ficaAppAttempt = ((CapacityScheduler) scheduler) - .getApplicationAttempt(attempt.getAppAttemptId()); + .getCurrentApplicationAttempt(attempt.getAppAttemptId()); resourceInfo = null != ficaAppAttempt ? new ResourcesInfo(ficaAppAttempt.getSchedulingResourceUsage()) : null; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index e967807..dd7fb89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -1104,7 +1104,7 @@ private static void waitForSchedulerAppAttemptAdded( rm.drainEventsImplicitly(); // Wait for at most 5 sec while (null == ((AbstractYarnScheduler) rm.getResourceScheduler()) - .getApplicationAttempt(attemptId) && tick < 50) { + .getCurrentApplicationAttempt(attemptId) && tick < 50) { Thread.sleep(100); if (tick % 10 == 0) { LOG.info("waiting for SchedulerApplicationAttempt=" @@ -1114,7 +1114,7 @@ private static void waitForSchedulerAppAttemptAdded( } Assert.assertNotNull("Timed out waiting for SchedulerApplicationAttempt=" + attemptId + " to be added.", ((AbstractYarnScheduler) - rm.getResourceScheduler()).getApplicationAttempt(attemptId)); + rm.getResourceScheduler()).getCurrentApplicationAttempt(attemptId)); } public static MockAM launchAMWhenAsyncSchedulingEnabled(RMApp app, MockRM rm) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java index 9b9eb3c..d32eb1f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java @@ -307,7 +307,7 @@ public void testContainerPromoteAndDemoteBeforeContainerStart() throws Exception // Verify that the container is still in ACQUIRED state wrt the RM. RMContainer rmContainer = ((CapacityScheduler) scheduler) - .getApplicationAttempt( + .getCurrentApplicationAttempt( uc.getId().getApplicationAttemptId()).getRMContainer(uc.getId()); Assert.assertEquals(RMContainerState.ACQUIRED, rmContainer.getState()); @@ -360,7 +360,8 @@ public void testContainerPromoteAfterContainerStart() throws Exception { .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100)); OpportunisticContainerContext ctxt = ((CapacityScheduler) scheduler) - .getApplicationAttempt(attemptId).getOpportunisticContainerContext(); + .getCurrentApplicationAttempt(attemptId) + .getOpportunisticContainerContext(); // Send add and update node events to AM Service. amservice.handle(new NodeAddedSchedulerEvent(rmNode1)); amservice.handle(new NodeAddedSchedulerEvent(rmNode2)); @@ -400,7 +401,7 @@ public void testContainerPromoteAfterContainerStart() throws Exception { // Verify that container is actually running wrt the RM.. RMContainer rmContainer = ((CapacityScheduler) scheduler) - .getApplicationAttempt( + .getCurrentApplicationAttempt( container.getId().getApplicationAttemptId()).getRMContainer( container.getId()); Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState()); @@ -446,7 +447,7 @@ public void testContainerPromoteAfterContainerStart() throws Exception { // Verify that the Container is still in RUNNING state wrt RM.. rmContainer = ((CapacityScheduler) scheduler) - .getApplicationAttempt( + .getCurrentApplicationAttempt( uc.getId().getApplicationAttemptId()).getRMContainer(uc.getId()); Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState()); @@ -485,7 +486,8 @@ public void testContainerPromoteAfterContainerComplete() throws Exception { .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100)); OpportunisticContainerContext ctxt = ((CapacityScheduler) scheduler) - .getApplicationAttempt(attemptId).getOpportunisticContainerContext(); + .getCurrentApplicationAttempt(attemptId) + .getOpportunisticContainerContext(); // Send add and update node events to AM Service. amservice.handle(new NodeAddedSchedulerEvent(rmNode1)); amservice.handle(new NodeAddedSchedulerEvent(rmNode2)); @@ -525,7 +527,7 @@ public void testContainerPromoteAfterContainerComplete() throws Exception { // Verify that container is actually running wrt the RM.. RMContainer rmContainer = ((CapacityScheduler) scheduler) - .getApplicationAttempt( + .getCurrentApplicationAttempt( container.getId().getApplicationAttemptId()).getRMContainer( container.getId()); Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState()); @@ -539,7 +541,7 @@ public void testContainerPromoteAfterContainerComplete() throws Exception { // Verify that container has been removed.. rmContainer = ((CapacityScheduler) scheduler) - .getApplicationAttempt( + .getCurrentApplicationAttempt( container.getId().getApplicationAttemptId()).getRMContainer( container.getId()); Assert.assertNull(rmContainer); @@ -594,7 +596,7 @@ public void testContainerAutoUpdateContainer() throws Exception { .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100)); OpportunisticContainerContext ctxt = - ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId) + ((CapacityScheduler) scheduler).getCurrentApplicationAttempt(attemptId) .getOpportunisticContainerContext(); // Send add and update node events to AM Service. amservice.handle(new NodeAddedSchedulerEvent(rmNode1)); @@ -620,7 +622,8 @@ public void testContainerAutoUpdateContainer() throws Exception { // Verify that container is actually running wrt the RM.. RMContainer rmContainer = ((CapacityScheduler) scheduler) - .getApplicationAttempt(container.getId().getApplicationAttemptId()) + .getCurrentApplicationAttempt( + container.getId().getApplicationAttemptId()) .getRMContainer(container.getId()); Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState()); @@ -757,7 +760,8 @@ public void testNodeRemovalDuringAllocate() throws Exception { ((RMNodeImpl) rmNode2) .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100)); OpportunisticContainerContext ctxt = ((CapacityScheduler) scheduler) - .getApplicationAttempt(attemptId).getOpportunisticContainerContext(); + .getCurrentApplicationAttempt(attemptId) + .getOpportunisticContainerContext(); // Send add and update node events to AM Service. amservice.handle(new NodeAddedSchedulerEvent(rmNode1)); amservice.handle(new NodeAddedSchedulerEvent(rmNode2)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java index 39313d0..4649059 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java @@ -210,7 +210,8 @@ public void testNMTokenSentForNormalContainer() throws Exception { // Call getNewContainerId to increase container Id so that the AM container // Id doesn't equal to one. CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); - cs.getApplicationAttempt(attempt.getAppAttemptId()).getNewContainerId(); + cs.getCurrentApplicationAttempt(attempt.getAppAttemptId()) + .getNewContainerId(); MockAM am = MockRM.launchAM(app, rm, nm1); // am container Id not equal to 1. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java index 2c37f44..11bcb21 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java @@ -1106,8 +1106,9 @@ public void testReleasedContainerNotRecovered() throws Exception { public Boolean get() { // release cache is cleaned up and previous running container is not // recovered - return scheduler.getApplicationAttempt(am1.getApplicationAttemptId()) - .getPendingRelease().isEmpty() + return scheduler + .getCurrentApplicationAttempt(am1.getApplicationAttemptId()) + .getPendingRelease().isEmpty() && scheduler.getRMContainer(runningContainer) == null; } }, 1000, 20000); @@ -1133,12 +1134,12 @@ public static void waitForNumContainersToRecover(int num, MockRM rm, AbstractYarnScheduler scheduler = (AbstractYarnScheduler) rm.getResourceScheduler(); SchedulerApplicationAttempt attempt = - scheduler.getApplicationAttempt(attemptId); + scheduler.getCurrentApplicationAttempt(attemptId); while (attempt == null) { System.out.println("Wait for scheduler attempt " + attemptId + " to be created"); Thread.sleep(200); - attempt = scheduler.getApplicationAttempt(attemptId); + attempt = scheduler.getCurrentApplicationAttempt(attemptId); } while (attempt.getLiveContainers().size() < num) { System.out.println("Wait for " + num diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java index 60b9e4b..c031768 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java @@ -682,7 +682,7 @@ public void testResourceRequestRecoveryToTheRightAppAttempt() SchedulerApplicationAttempt firstSchedulerAppAttempt = ((AbstractYarnScheduler) rm .getResourceScheduler()) - .getApplicationAttempt(applicationAttemptOneID); + .getCurrentApplicationAttempt(applicationAttemptOneID); // AM crashes, and a new app-attempt gets created node.nodeHeartbeat(applicationAttemptOneID, 1, ContainerState.COMPLETE); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index cdc67ed..36c35bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -779,7 +779,7 @@ public static void waitSchedulerApplicationAttemptStopped( AbstractYarnScheduler ys, ApplicationAttemptId attemptId) throws InterruptedException { SchedulerApplicationAttempt schedulerApp = - ys.getApplicationAttempt(attemptId); + ys.getCurrentApplicationAttempt(attemptId); if (null == schedulerApp) { return; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index a526222..5072f79 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -1121,12 +1121,12 @@ public void testBlackListNodes() throws Exception { cs.allocate(appAttemptId, Collections.emptyList(), Collections.emptyList(), Collections.singletonList(host), null, NULL_UPDATE_REQUESTS); - Assert.assertTrue(cs.getApplicationAttempt(appAttemptId) + Assert.assertTrue(cs.getCurrentApplicationAttempt(appAttemptId) .isPlaceBlacklisted(host)); cs.allocate(appAttemptId, Collections.emptyList(), Collections.emptyList(), null, Collections.singletonList(host), NULL_UPDATE_REQUESTS); - Assert.assertFalse(cs.getApplicationAttempt(appAttemptId) + Assert.assertFalse(cs.getCurrentApplicationAttempt(appAttemptId) .isPlaceBlacklisted(host)); rm.stop(); } @@ -1701,7 +1701,7 @@ public void testRecoverRequestAfterPreemption() throws Exception { RMContainer rmContainer = cs.getRMContainer(containerId1); List requests = rmContainer.getResourceRequests(); - FiCaSchedulerApp app = cs.getApplicationAttempt(am1 + FiCaSchedulerApp app = cs.getCurrentApplicationAttempt(am1 .getApplicationAttemptId()); FiCaSchedulerNode node = cs.getNode(rmContainer.getAllocatedNode()); @@ -1776,7 +1776,7 @@ public void testMoveAppBasic() throws Exception { List appsInA1 = scheduler.getAppsInQueue("a1"); assertEquals(1, appsInA1.size()); String queue = - scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue() + scheduler.getCurrentApplicationAttempt(appsInA1.get(0)).getQueue() .getQueueName(); Assert.assertEquals("a1", queue); @@ -1801,7 +1801,7 @@ public void testMoveAppBasic() throws Exception { appsInB1 = scheduler.getAppsInQueue("b1"); assertEquals(1, appsInB1.size()); queue = - scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue() + scheduler.getCurrentApplicationAttempt(appsInB1.get(0)).getQueue() .getQueueName(); Assert.assertEquals("b1", queue); @@ -1838,7 +1838,7 @@ public void testMoveAppSameParent() throws Exception { List appsInA1 = scheduler.getAppsInQueue("a1"); assertEquals(1, appsInA1.size()); String queue = - scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue() + scheduler.getCurrentApplicationAttempt(appsInA1.get(0)).getQueue() .getQueueName(); Assert.assertEquals("a1", queue); @@ -1860,7 +1860,7 @@ public void testMoveAppSameParent() throws Exception { appsInA2 = scheduler.getAppsInQueue("a2"); assertEquals(1, appsInA2.size()); queue = - scheduler.getApplicationAttempt(appsInA2.get(0)).getQueue() + scheduler.getCurrentApplicationAttempt(appsInA2.get(0)).getQueue() .getQueueName(); Assert.assertEquals("a2", queue); @@ -2358,7 +2358,7 @@ public void testMoveAllApps() throws Exception { assertTrue(appsInA.contains(appAttemptId)); assertEquals(1, appsInA.size()); String queue = - scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue() + scheduler.getCurrentApplicationAttempt(appsInA1.get(0)).getQueue() .getQueueName(); Assert.assertEquals("a1", queue); @@ -2380,7 +2380,7 @@ public void testMoveAllApps() throws Exception { appsInB1 = scheduler.getAppsInQueue("b1"); assertEquals(1, appsInB1.size()); queue = - scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue() + scheduler.getCurrentApplicationAttempt(appsInB1.get(0)).getQueue() .getQueueName(); Assert.assertEquals("b1", queue); @@ -2635,7 +2635,7 @@ public void testRemoveAttemptMoveAdded() throws Exception { when(rmContainer.getNodeLabelExpression()) .thenReturn(RMNodeLabelsManager.NO_LABEL); when(rmContainer.getContainerId()).thenReturn(newContainerId); - sch.getApplicationAttempt(appAttemptId).getLiveContainersMap() + sch.getCurrentApplicationAttempt(appAttemptId).getLiveContainersMap() .put(newContainerId, rmContainer); QueueMetrics queueA1M = queueA1.getMetrics(); queueA1M.incrPendingResources(rmContainer.getNodeLabelExpression(), @@ -2736,7 +2736,7 @@ public void testKillAllAppsInQueue() throws Exception { assertTrue(appsInA.contains(appAttemptId)); assertEquals(1, appsInA.size()); String queue = - scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue() + scheduler.getCurrentApplicationAttempt(appsInA1.get(0)).getQueue() .getQueueName(); Assert.assertEquals("a1", queue); @@ -3265,7 +3265,7 @@ public void testSchedulerKeyGarbageCollection() throws Exception { Assert.assertEquals(0, allocatedContainers.size()); Collection schedulerKeys = - ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId) + ((CapacityScheduler) scheduler).getCurrentApplicationAttempt(attemptId) .getAppSchedulingInfo().getSchedulerKeys(); Assert.assertEquals(4, schedulerKeys.size()); @@ -3281,7 +3281,7 @@ public void testSchedulerKeyGarbageCollection() throws Exception { Assert.assertEquals(3, schedulerKeys.size()); List resReqs = - ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId) + ((CapacityScheduler) scheduler).getCurrentApplicationAttempt(attemptId) .getAppSchedulingInfo().getAllResourceRequests(); // Verify 1 outstanding schedulerKey is removed from the @@ -3309,8 +3309,9 @@ public void testSchedulerKeyGarbageCollection() throws Exception { // Verify 1 outstanding schedulerKey is removed Assert.assertEquals(2, schedulerKeys.size()); - resReqs = ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId) - .getAppSchedulingInfo().getAllResourceRequests(); + resReqs = + ((CapacityScheduler) scheduler).getCurrentApplicationAttempt(attemptId) + .getAppSchedulingInfo().getAllResourceRequests(); // Verify the map size is not increased due to 0 req Assert.assertEquals(2, resReqs.size()); @@ -3326,7 +3327,7 @@ public void testSchedulerKeyGarbageCollection() throws Exception { null); schedulerKeys = - ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId) + ((CapacityScheduler) scheduler).getCurrentApplicationAttempt(attemptId) .getAppSchedulingInfo().getSchedulerKeys(); Thread.sleep(200); @@ -3346,7 +3347,7 @@ public void testSchedulerKeyGarbageCollection() throws Exception { // Verify no more outstanding schedulerKeys.. Assert.assertEquals(0, schedulerKeys.size()); resReqs = - ((CapacityScheduler) scheduler).getApplicationAttempt(attemptId) + ((CapacityScheduler) scheduler).getCurrentApplicationAttempt(attemptId) .getAppSchedulingInfo().getAllResourceRequests(); Assert.assertEquals(0, resReqs.size()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java index 0c3130d..797ff19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java @@ -210,7 +210,7 @@ public void testCommitProposalForFailedAppAttempt() YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS, null, null, true, true); MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1); FiCaSchedulerApp schedulerApp = - scheduler.getApplicationAttempt(am.getApplicationAttemptId()); + scheduler.getCurrentApplicationAttempt(am.getApplicationAttemptId()); // allocate and launch 1 containers and running on nm2 allocateAndLaunchContainers(am, nm2, rm, 1, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerDynamicBehavior.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerDynamicBehavior.java index 9aba30c..5f43567 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerDynamicBehavior.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerDynamicBehavior.java @@ -220,7 +220,7 @@ public void testMoveAppToPlanQueue() throws Exception { assertTrue(appsInA.isEmpty()); String queue = - scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue() + scheduler.getCurrentApplicationAttempt(appsInB1.get(0)).getQueue() .getQueueName(); Assert.assertEquals("b1", queue); @@ -246,7 +246,7 @@ public void testMoveAppToPlanQueue() throws Exception { appsInDefQ = scheduler.getAppsInQueue(defQName); assertEquals(1, appsInDefQ.size()); queue = - scheduler.getApplicationAttempt(appsInDefQ.get(0)).getQueue() + scheduler.getCurrentApplicationAttempt(appsInDefQ.get(0)).getQueue() .getQueueName(); Assert.assertTrue(queue.equals(defQName)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java index e7157b8..a9e689a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java @@ -106,7 +106,7 @@ public void testSimplePreemption() throws Exception { } // App1 should have 7 containers now, and no available resource for cluster - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(7, schedulerApp1.getLiveContainers().size()); @@ -143,7 +143,7 @@ public void testSimplePreemption() throws Exception { // Call CS.handle once to see if container preempted cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); - FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp2 = cs.getCurrentApplicationAttempt( am2.getApplicationAttemptId()); // App1 has 6 containers, and app2 has 2 containers @@ -185,7 +185,7 @@ public void testPreemptionConsidersNodeLocalityDelay() } // App1 should have 7 containers now, and no available resource for cluster - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(7, schedulerApp1.getLiveContainers().size()); @@ -225,7 +225,7 @@ public void testPreemptionConsidersNodeLocalityDelay() // Call CS.handle once to see if container preempted cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); - FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp2 = cs.getCurrentApplicationAttempt( am2.getApplicationAttemptId()); // App1 has 7 containers, and app2 has 1 containers (no container preempted) @@ -276,7 +276,7 @@ public void testPreemptionConsidersHardNodeLocality() } // App1 should have 7 containers now, and no available resource for cluster - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(7, schedulerApp1.getLiveContainers().size()); @@ -317,7 +317,7 @@ public void testPreemptionConsidersHardNodeLocality() // Call CS.handle once to see if container preempted cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); - FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp2 = cs.getCurrentApplicationAttempt( am2.getApplicationAttemptId()); // App1 has 7 containers, and app2 has 1 containers (no container preempted) @@ -373,7 +373,7 @@ public void testPreemptionPolicyShouldRespectAlreadyMarkedKillableContainers() } // App1 should have 7 containers now, and no available resource for cluster - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(7, schedulerApp1.getLiveContainers().size()); @@ -473,7 +473,7 @@ public void testPreemptionPolicyCleanupKillableContainersWhenNoPreemptionNeeded( } // App1 should have 7 containers now, and no available resource for cluster - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(7, schedulerApp1.getLiveContainers().size()); @@ -562,7 +562,7 @@ public void testPreemptionConsidersUserLimit() } // App1 should have 7 containers now, and no available resource for cluster - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(7, schedulerApp1.getLiveContainers().size()); @@ -598,7 +598,7 @@ public void testPreemptionConsidersUserLimit() // Call CS.handle once to see if container preempted cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); - FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp2 = cs.getCurrentApplicationAttempt( am2.getApplicationAttemptId()); // App1 has 7 containers, and app2 has 1 containers (nothing preempted) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java index b4ebd15..79b0b99 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java @@ -303,7 +303,8 @@ public RMNodeLabelsManager createNodeLabelManager() { checkUsedCapacity(rm, "a", 1024, 8000); CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); - FiCaSchedulerApp app = cs.getApplicationAttempt(am1.getApplicationAttemptId()); + FiCaSchedulerApp app = + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); // change h1's label to z mgr.replaceLabelsOnNode(ImmutableMap.of(nm1.getNodeId(), toSet("z"))); @@ -557,9 +558,9 @@ public RMNodeLabelsManager createNodeLabelManager() { CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); FiCaSchedulerApp application1 = - cs.getApplicationAttempt(am1.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); FiCaSchedulerApp application2 = - cs.getApplicationAttempt(am2.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId()); // change h1's label to z cs.handle(new NodeLabelsUpdateSchedulerEvent(ImmutableMap.of(nm1.getNodeId(), @@ -680,7 +681,8 @@ public RMNodeLabelsManager createNodeLabelManager() { checkAMUsedResource(rm, "a", 1024, "x"); CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); - FiCaSchedulerApp app = cs.getApplicationAttempt(am1.getApplicationAttemptId()); + FiCaSchedulerApp app = + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); // change h1's label to z cs.handle(new NodeLabelsUpdateSchedulerEvent( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java index 9146373..470132c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java @@ -108,7 +108,7 @@ public void testSimpleSurgicalPreemption() } // App1 should have 33 containers now - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(33, schedulerApp1.getLiveContainers().size()); // 17 from n1 and 16 from n2 @@ -201,7 +201,7 @@ public void testSurgicalPreemptionWithAvailableResource() } // App1 should have 31 containers now - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(39, schedulerApp1.getLiveContainers().size()); // 17 from n1 and 16 from n2 @@ -213,7 +213,7 @@ public void testSurgicalPreemptionWithAvailableResource() // Submit app2 to queue-c and asks for a 4G container for AM RMApp app2 = rm1.submitApp(4 * GB, "app", "user", null, "c"); - FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp2 = cs.getCurrentApplicationAttempt( ApplicationAttemptId.newInstance(app2.getApplicationId(), 1)); // Call editSchedule: containers are selected to be preemption candidate @@ -303,7 +303,7 @@ public void testPriorityPreemptionWhenAllQueuesAreBelowGuaranteedCapacities() // App1 should have 7 containers now, so the abs-used-cap of b is // 7 / 40 = 17.5% < 20% (guaranteed) - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(7, schedulerApp1.getLiveContainers().size()); // 4 from n1 and 3 from n2 @@ -314,7 +314,7 @@ public void testPriorityPreemptionWhenAllQueuesAreBelowGuaranteedCapacities() // Submit app2 to queue-c and asks for a 1G container for AM RMApp app2 = rm1.submitApp(18 * GB, "app", "user", null, "c"); - FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp2 = cs.getCurrentApplicationAttempt( ApplicationAttemptId.newInstance(app2.getApplicationId(), 1)); while (cs.getNode(rmNode1.getNodeID()).getReservedContainer() == null) { @@ -407,7 +407,7 @@ public void testPriorityPreemptionRequiresMoveReservation() cs.handle(new NodeUpdateSchedulerEvent(rmNode2)); } - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(3, schedulerApp1.getLiveContainers().size()); @@ -420,7 +420,7 @@ public void testPriorityPreemptionRequiresMoveReservation() // Submit app2 to queue-c and asks for a 2G container for AM, on n3 RMApp app2 = rm1.submitApp(2 * GB, "app", "user", null, "c"); MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm3); - FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp2 = cs.getCurrentApplicationAttempt( ApplicationAttemptId.newInstance(app2.getApplicationId(), 1)); // Asks 1 * 9G container @@ -529,7 +529,7 @@ public void testPriorityPreemptionOnlyTriggeredWhenDemandingQueueUnsatisfied() } // App1 should have 9 containers now, so the abs-used-cap of b is 9% - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(9, schedulerApp1.getLiveContainers().size()); for (int i = 0; i < 9; i++) { @@ -541,7 +541,7 @@ public void testPriorityPreemptionOnlyTriggeredWhenDemandingQueueUnsatisfied() // Launch AM in NM9 RMApp app2 = rm1.submitApp(10 * GB, "app", "user", null, "c"); MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, mockNMs[9]); - FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp2 = cs.getCurrentApplicationAttempt( ApplicationAttemptId.newInstance(app2.getApplicationId(), 1)); // Ask 10 * 10GB containers @@ -659,7 +659,7 @@ public void testPriorityPreemptionFromHighestPriorityQueueAndOldestContainer() } // App1 should have 5 containers now, one for each node - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(5, schedulerApp1.getLiveContainers().size()); for (int i = 0; i < 5; i++) { @@ -670,7 +670,7 @@ public void testPriorityPreemptionFromHighestPriorityQueueAndOldestContainer() // Submit app2 to queue-a and asks for a 0.5G container for AM (on n0) RMApp app2 = rm1.submitApp(512, "app", "user", null, "a"); MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, mockNMs[0]); - FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp2 = cs.getCurrentApplicationAttempt( ApplicationAttemptId.newInstance(app2.getApplicationId(), 1)); // Ask 2 * 3.5GB containers @@ -692,7 +692,7 @@ public void testPriorityPreemptionFromHighestPriorityQueueAndOldestContainer() // Submit app3 to queue-b and asks for a 0.5G container for AM (on n2) RMApp app3 = rm1.submitApp(512, "app", "user", null, "b"); MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, mockNMs[2]); - FiCaSchedulerApp schedulerApp3 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp3 = cs.getCurrentApplicationAttempt( ApplicationAttemptId.newInstance(app3.getApplicationId(), 1)); // Ask 2 * 3.5GB containers @@ -862,7 +862,7 @@ public void testPreemptionForFragmentatedCluster() throws Exception { } // App1 should have 5 containers now - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt( am1.getApplicationAttemptId()); Assert.assertEquals(5, schedulerApp1.getLiveContainers().size()); @@ -880,7 +880,7 @@ public void testPreemptionForFragmentatedCluster() throws Exception { } // App2 should have 2 containers now - FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt( + FiCaSchedulerApp schedulerApp2 = cs.getCurrentApplicationAttempt( am2.getApplicationAttemptId()); Assert.assertEquals(2, schedulerApp2.getLiveContainers().size()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java index dd6b25b..33e4d94 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java @@ -381,9 +381,9 @@ public void testExcessReservationWillBeUnreserved() throws Exception { // App2 will get preference to be allocated on node1, and node1 will be all // used by App2. FiCaSchedulerApp schedulerApp1 = - cs.getApplicationAttempt(am1.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); FiCaSchedulerApp schedulerApp2 = - cs.getApplicationAttempt(am2.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId()); // Check if a 4G contaienr allocated for app1, and nothing allocated for app2 Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); @@ -464,9 +464,9 @@ public void testAllocationForReservedContainer() throws Exception { // App2 will get preference to be allocated on node1, and node1 will be all // used by App2. FiCaSchedulerApp schedulerApp1 = - cs.getApplicationAttempt(am1.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); FiCaSchedulerApp schedulerApp2 = - cs.getApplicationAttempt(am2.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId()); // Check if a 4G container allocated for app1, and nothing allocated for app2 Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); @@ -560,9 +560,9 @@ public void testReservedContainerMetricsOnDecommisionedNode() throws Exception { // App2 will get preference to be allocated on node1, and node1 will be all // used by App2. FiCaSchedulerApp schedulerApp1 = - cs.getApplicationAttempt(am1.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); FiCaSchedulerApp schedulerApp2 = - cs.getApplicationAttempt(am2.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId()); // Check if a 4G container allocated for app1, and nothing allocated for app2 Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); @@ -621,7 +621,7 @@ public void testAssignMultipleOffswitchContainers() throws Exception { cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); FiCaSchedulerApp schedulerApp1 = - cs.getApplicationAttempt(am1.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); // App1 will get one container allocated (plus AM container Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); @@ -678,7 +678,7 @@ public void testContinuousReservationLookingWhenUsedEqualsMax() throws Exception cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); FiCaSchedulerApp schedulerApp1 = - cs.getApplicationAttempt(am1.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); // App1 will get 2 container allocated (plus AM container) Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); @@ -731,7 +731,7 @@ public void testPendingResourcesConsideringUserLimit() throws Exception { cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); FiCaSchedulerApp schedulerApp1 = - cs.getApplicationAttempt(am1.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); // App1 will get 1 container reserved Assert.assertEquals(1, schedulerApp1.getReservedContainers().size()); @@ -754,7 +754,7 @@ public void testPendingResourcesConsideringUserLimit() throws Exception { .getMemorySize()); FiCaSchedulerApp schedulerApp2 = - cs.getApplicationAttempt(am2.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId()); Assert.assertEquals(4 * GB, schedulerApp2.getAppAttemptResourceUsage().getUsed().getMemorySize()); Assert.assertEquals(0 * GB, @@ -821,11 +821,11 @@ public void testQueuePriorityOrdering() throws Exception { RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId()); FiCaSchedulerApp schedulerApp1 = - cs.getApplicationAttempt(am1.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); FiCaSchedulerApp schedulerApp2 = - cs.getApplicationAttempt(am2.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId()); FiCaSchedulerApp schedulerApp3 = - cs.getApplicationAttempt(am3.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am3.getApplicationAttemptId()); // container will be allocated to am1 // App1 will get 2 container allocated (plus AM container) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java index 740ef33..9eab442 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java @@ -523,7 +523,7 @@ public RMNodeLabelsManager createNodeLabelManager() { "h1"); // Check if a 4G container allocated for app1, and 4G is reserved - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1 + FiCaSchedulerApp schedulerApp1 = cs.getCurrentApplicationAttempt(am1 .getApplicationAttemptId()); Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); Assert.assertTrue(schedulerApp1.getReservedContainers().size() > 0); @@ -550,7 +550,7 @@ public RMNodeLabelsManager createNodeLabelManager() { private void checkPendingResource(MockRM rm, int priority, ApplicationAttemptId attemptId, int memory) { CapacityScheduler cs = (CapacityScheduler) rm.getRMContext().getScheduler(); - FiCaSchedulerApp app = cs.getApplicationAttempt(attemptId); + FiCaSchedulerApp app = cs.getCurrentApplicationAttempt(attemptId); PendingAsk ask = app.getAppSchedulingInfo().getPendingAsk( TestUtils.toSchedulerKey(priority), "*"); @@ -610,7 +610,7 @@ public RMNodeLabelsManager createNodeLabelManager() { CapacityScheduler cs = (CapacityScheduler) rm1.getRMContext().getScheduler(); FiCaSchedulerApp app = - cs.getApplicationAttempt(am1.getApplicationAttemptId()); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2, "y"); checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3, "y"); checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 4, @@ -715,8 +715,10 @@ public RMNodeLabelsManager createNodeLabelManager() { // App2 will get preference to be allocated on node1, and node1 will be all // used by App2. - FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId()); - FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(am2.getApplicationAttemptId()); + FiCaSchedulerApp schedulerApp1 = + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId()); + FiCaSchedulerApp schedulerApp2 = + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId()); // app1 get nothing in nm1 (partition=y) checkNumOfContainersInAnAppOnGivenNode(0, nm1.getNodeId(), schedulerApp1); checkNumOfContainersInAnAppOnGivenNode(9, nm2.getNodeId(), schedulerApp1); @@ -1615,7 +1617,7 @@ public RMNodeLabelsManager createNodeLabelManager() { am2.allocate("*", 1 * GB, 1, new ArrayList(), "x"); doNMHeartbeat(rm, nm1.getNodeId(), 1); checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(), - cs.getApplicationAttempt(am2.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId())); // Test case 2 // Do another allocation, a will go first since it has 0 use_capacity(x) and @@ -1623,7 +1625,7 @@ public RMNodeLabelsManager createNodeLabelManager() { am2.allocate("*", 1 * GB, 1, new ArrayList(), "x"); doNMHeartbeat(rm, nm1.getNodeId(), 1); checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(), - cs.getApplicationAttempt(am1.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId())); // Test case 3 // Just like above, when doing non-exclusive allocation, b will go first as well. @@ -1631,17 +1633,17 @@ public RMNodeLabelsManager createNodeLabelManager() { am2.allocate("*", 1 * GB, 1, new ArrayList(), ""); doNMHeartbeat(rm, nm1.getNodeId(), 2); checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(), - cs.getApplicationAttempt(am2.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(), - cs.getApplicationAttempt(am1.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId())); // Test case 4 // After b allocated, we should be able to allocate non-exlusive container in a doNMHeartbeat(rm, nm1.getNodeId(), 2); checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(), - cs.getApplicationAttempt(am2.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(), - cs.getApplicationAttempt(am1.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId())); // Test case 5 // b/c/d asks non-exclusive container together, b will go first irrelated to @@ -1651,50 +1653,50 @@ public RMNodeLabelsManager createNodeLabelManager() { am4.allocate("*", 1 * GB, 2, new ArrayList(), ""); doNMHeartbeat(rm, nm1.getNodeId(), 2); checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(), - cs.getApplicationAttempt(am1.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(3, nm1.getNodeId(), - cs.getApplicationAttempt(am2.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(0, nm1.getNodeId(), - cs.getApplicationAttempt(am3.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am3.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(0, nm1.getNodeId(), - cs.getApplicationAttempt(am4.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am4.getApplicationAttemptId())); // Test case 6 // After b allocated, c will go first by lexicographic order doNMHeartbeat(rm, nm1.getNodeId(), 1); checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(), - cs.getApplicationAttempt(am1.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(3, nm1.getNodeId(), - cs.getApplicationAttempt(am2.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(), - cs.getApplicationAttempt(am3.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am3.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(0, nm1.getNodeId(), - cs.getApplicationAttempt(am4.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am4.getApplicationAttemptId())); // Test case 7 // After c allocated, d will go first because it has less used_capacity(x) // than c doNMHeartbeat(rm, nm1.getNodeId(), 1); checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(), - cs.getApplicationAttempt(am1.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(3, nm1.getNodeId(), - cs.getApplicationAttempt(am2.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(), - cs.getApplicationAttempt(am3.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am3.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(), - cs.getApplicationAttempt(am4.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am4.getApplicationAttemptId())); // Test case 8 // After d allocated, c will go first, c/d has same use_capacity(x), so compare c/d's lexicographic order doNMHeartbeat(rm, nm1.getNodeId(), 1); checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(), - cs.getApplicationAttempt(am1.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(3, nm1.getNodeId(), - cs.getApplicationAttempt(am2.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(), - cs.getApplicationAttempt(am3.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am3.getApplicationAttemptId())); checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(), - cs.getApplicationAttempt(am4.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am4.getApplicationAttemptId())); } @@ -1768,7 +1770,7 @@ public RMNodeLabelsManager createNodeLabelManager() { am2.allocate("*", 1 * GB, 1, new ArrayList(), "x"); doNMHeartbeat(rm, nm1.getNodeId(), 1); checkNumOfContainersInAnAppOnGivenNode(2, nm1.getNodeId(), - cs.getApplicationAttempt(am1.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId())); } @Test @@ -1852,7 +1854,7 @@ public RMNodeLabelsManager createNodeLabelManager() { am1.allocate("*", 4 * GB, 2, new ArrayList(), "x"); doNMHeartbeat(rm, nm1.getNodeId(), 10); checkNumOfContainersInAnAppOnGivenNode(3, nm1.getNodeId(), - cs.getApplicationAttempt(am1.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am1.getApplicationAttemptId())); // Try to launch app2 in a2, asked 2GB, should success RMApp app2 = rm.submitApp(2 * GB, "app", "user", null, "a2", "x"); @@ -1864,7 +1866,7 @@ public RMNodeLabelsManager createNodeLabelManager() { doNMHeartbeat(rm, nm1.getNodeId(), 10); checkNumOfContainersInAnAppOnGivenNode(1, nm1.getNodeId(), - cs.getApplicationAttempt(am2.getApplicationAttemptId())); + cs.getCurrentApplicationAttempt(am2.getApplicationAttemptId())); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 4bc5127..109b869 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -446,7 +446,7 @@ public FiCaSchedulerNode getNode(NodeId nodeId) { } @Override - public FiCaSchedulerApp getApplicationAttempt( + public FiCaSchedulerApp getCurrentApplicationAttempt( ApplicationAttemptId applicationAttemptId) { return apps.get(applicationAttemptId); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java index 36ee68e..75af77b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java @@ -91,12 +91,12 @@ public static void waitForNumContainersToRecover(int num, MockRM rm, AbstractYarnScheduler scheduler = (AbstractYarnScheduler) rm.getResourceScheduler(); SchedulerApplicationAttempt attempt = - scheduler.getApplicationAttempt(attemptId); + scheduler.getCurrentApplicationAttempt(attemptId); while (attempt == null) { System.out.println("Wait for scheduler attempt " + attemptId + " to be created"); Thread.sleep(200); - attempt = scheduler.getApplicationAttempt(attemptId); + attempt = scheduler.getCurrentApplicationAttempt(attemptId); } while (attempt.getLiveContainers().size() < num) { System.out.println("Wait for " + num diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java index 9efa83d..3591f22 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java @@ -356,7 +356,7 @@ public void testFairSchedulerContinuousSchedulingInitTime() throws Exception { scheduler.addApplication(id11.getApplicationId(), "root.queue1", "user1", false); scheduler.addApplicationAttempt(id11, false, false); - fsAppAttempt = scheduler.getApplicationAttempt(id11); + fsAppAttempt = scheduler.getCurrentApplicationAttempt(id11); String hostName = "127.0.0.1"; RMNode node1 = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java index 78fadef..0e47d85 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java @@ -212,7 +212,7 @@ public FSAppAttempt getSchedulerApp(ApplicationAttemptId return null ; } @Override - public FSAppAttempt getApplicationAttempt(ApplicationAttemptId + public FSAppAttempt getCurrentApplicationAttempt(ApplicationAttemptId applicationAttemptId) { return null; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java index 1cbdec3..c2e98bb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java @@ -1506,7 +1506,7 @@ public void verifyAppsXML(NodeList nodes, RMApp app, boolean hasResourceReq) (Element) resourceRequests.getElementsByTagName("capability").item(0); ResourceRequest rr = ((AbstractYarnScheduler)rm.getRMContext().getScheduler()) - .getApplicationAttempt( + .getCurrentApplicationAttempt( app.getCurrentAppAttempt().getAppAttemptId()) .getAppSchedulingInfo().getAllResourceRequests().get(0); verifyResourceRequestsGeneric(rr, @@ -1659,7 +1659,7 @@ public void verifyResourceRequests(JSONArray resourceRequest, RMApp app) JSONObject requestInfo = resourceRequest.getJSONObject(0); ResourceRequest rr = ((AbstractYarnScheduler) rm.getRMContext().getScheduler()) - .getApplicationAttempt( + .getCurrentApplicationAttempt( app.getCurrentAppAttempt().getAppAttemptId()) .getAppSchedulingInfo().getAllResourceRequests().get(0); verifyResourceRequestsGeneric(rr,