diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index ab94175..f046680 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -531,7 +531,7 @@ public AllocateResponse allocate(AllocateRequest request) Resource used = BuilderUtils.newResource(0, 0); int numContainers = 0; if (schedulerNodeReport != null) { - used = schedulerNodeReport.getUsedResource(); + used = schedulerNodeReport.getAllocatedResource(); numContainers = schedulerNodeReport.getNumContainers(); } NodeId nodeId = rmNode.getNodeID(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index ba1edf9..cae6b07 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -1002,7 +1002,7 @@ private NodeReport createNodeReports(RMNode rmNode) { Resource used = BuilderUtils.newResource(0, 0); int numContainers = 0; if (schedulerNodeReport != null) { - used = schedulerNodeReport.getUsedResource(); + used = schedulerNodeReport.getAllocatedResource(); numContainers = schedulerNodeReport.getNumContainers(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java index ef4a0d4..b1be2d4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java @@ -94,9 +94,9 @@ public String getLiveNodeManagers() { ni.getNodeManagerVersion()); if(report != null) { info.put("NumContainers", report.getNumContainers()); - info.put("UsedMemoryMB", report.getUsedResource().getMemory()); + info.put("UsedMemoryMB", report.getAllocatedResource().getMemory()); info.put("AvailableMemoryMB", - report.getAvailableResource().getMemory()); + report.getUnallocatedResource().getMemory()); } nodesInfo.add(info); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java index 03edd40..38ba1a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java @@ -54,9 +54,9 @@ private static final Log LOG = LogFactory.getLog(SchedulerNode.class); - private Resource availableResource = Resource.newInstance(0, 0); - private Resource usedResource = Resource.newInstance(0, 0); - private Resource totalResourceCapability; + private Resource unallocatedResource = Resource.newInstance(0, 0); + private Resource allocatedResource = Resource.newInstance(0, 0); + private Resource totalResource; private RMContainer reservedContainer; private volatile int numContainers; private volatile ResourceUtilization containersUtilization = @@ -65,20 +65,20 @@ ResourceUtilization.newInstance(0, 0, 0f); - /* set of containers that are allocated containers */ + /** Set of containers that are allocated containers. */ private final Map launchedContainers = - new HashMap(); + new HashMap<>(); private final RMNode rmNode; private final String nodeName; - + private volatile Set labels = null; - + public SchedulerNode(RMNode node, boolean usePortForNodeName, Set labels) { this.rmNode = node; - this.availableResource = Resources.clone(node.getTotalCapability()); - this.totalResourceCapability = Resources.clone(node.getTotalCapability()); + this.unallocatedResource = Resources.clone(node.getTotalCapability()); + this.totalResource = Resources.clone(node.getTotalCapability()); if (usePortForNodeName) { nodeName = rmNode.getHostName() + ":" + node.getNodeID().getPort(); } else { @@ -97,23 +97,26 @@ public RMNode getRMNode() { /** * Set total resources on the node. - * @param resource total resources on the node. + * @param resource Total resources on the node. */ public synchronized void setTotalResource(Resource resource){ - this.totalResourceCapability = resource; - this.availableResource = Resources.subtract(totalResourceCapability, - this.usedResource); + this.totalResource = resource; + this.unallocatedResource = Resources.subtract(totalResource, + this.allocatedResource); } /** * Get the ID of the node which contains both its hostname and port. - * - * @return the ID of the node + * @return The ID of the node. */ public NodeId getNodeID() { return this.rmNode.getNodeID(); } + /** + * Get HTTP address for the node. + * @return HTTP address for the node. + */ public String getHttpAddress() { return this.rmNode.getHttpAddress(); } @@ -126,8 +129,7 @@ public String getHttpAddress() { * {@link YarnConfiguration#RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} constant. * The main usecase of this is Yarn minicluster to be able to differentiate * node manager instances by their port number. - * - * @return name of the node for scheduling matching decisions. + * @return Name of the node for scheduling matching decisions. */ public String getNodeName() { return nodeName; @@ -135,7 +137,6 @@ public String getNodeName() { /** * Get rackname. - * * @return rackname */ public String getRackName() { @@ -145,13 +146,11 @@ public String getRackName() { /** * The Scheduler has allocated containers on this node to the given * application. - * - * @param rmContainer - * allocated container + * @param rmContainer Allocated container */ public synchronized void allocateContainer(RMContainer rmContainer) { Container container = rmContainer.getContainer(); - deductAvailableResource(container.getResource()); + deductUnallocatedResource(container.getResource()); ++numContainers; launchedContainers.put(container.getId(), rmContainer); @@ -159,27 +158,35 @@ public synchronized void allocateContainer(RMContainer rmContainer) { LOG.info("Assigned container " + container.getId() + " of capacity " + container.getResource() + " on host " + rmNode.getNodeAddress() + ", which has " + numContainers + " containers, " - + getUsedResource() + " used and " + getAvailableResource() + + getAllocatedResource() + " used and " + getUnallocatedResource() + " available after allocation"); } - + + /** + * Change the resources allocated for a container. + * @param containerId Identifier of the container to change. + * @param deltaResource Change in the resource allocation. + * @param increase True if the change is an increase of allocation. + */ private synchronized void changeContainerResource(ContainerId containerId, Resource deltaResource, boolean increase) { if (increase) { - deductAvailableResource(deltaResource); + deductUnallocatedResource(deltaResource); } else { - addAvailableResource(deltaResource); + addUnallocatedResource(deltaResource); } LOG.info((increase ? "Increased" : "Decreased") + " container " + containerId + " of capacity " + deltaResource + " on host " + rmNode.getNodeAddress() + ", which has " + numContainers - + " containers, " + getUsedResource() + " used and " - + getAvailableResource() + " available after allocation"); + + " containers, " + getAllocatedResource() + " used and " + + getUnallocatedResource() + " available after allocation"); } /** - * The Scheduler increased container + * Increase the resources allocated to a container. + * @param containerId Identifier of the container to change. + * @param deltaResource Increase of resource allocation. */ public synchronized void increaseContainer(ContainerId containerId, Resource deltaResource) { @@ -187,7 +194,9 @@ public synchronized void increaseContainer(ContainerId containerId, } /** - * The Scheduler decreased container + * Decrease the resources allocated to a container. + * @param containerId Identifier of the container to change. + * @param deltaResource Decrease of resource allocation. */ public synchronized void decreaseContainer(ContainerId containerId, Resource deltaResource) { @@ -195,32 +204,33 @@ public synchronized void decreaseContainer(ContainerId containerId, } /** - * Get available resources on the node. - * - * @return available resources on the node + * Get unallocated resources on the node. + * @return Unallocated resources on the node */ - public synchronized Resource getAvailableResource() { - return this.availableResource; + public synchronized Resource getUnallocatedResource() { + return this.unallocatedResource; } /** - * Get used resources on the node. - * - * @return used resources on the node + * Get allocated resources on the node. + * @return Allocated resources on the node */ - public synchronized Resource getUsedResource() { - return this.usedResource; + public synchronized Resource getAllocatedResource() { + return this.allocatedResource; } /** * Get total resources on the node. - * - * @return total resources on the node. + * @return Total resources on the node. */ public synchronized Resource getTotalResource() { - return this.totalResourceCapability; + return this.totalResource; } + /** + * Check if a container is launched by this node. + * @return If the container is launched by the node. + */ public synchronized boolean isValidContainer(ContainerId containerId) { if (launchedContainers.containsKey(containerId)) { return true; @@ -228,16 +238,18 @@ public synchronized boolean isValidContainer(ContainerId containerId) { return false; } + /** + * Update the resources of the node when allocating a new container. + * @param container Container to allocate. + */ private synchronized void updateResource(Container container) { - addAvailableResource(container.getResource()); + addUnallocatedResource(container.getResource()); --numContainers; } /** * Release an allocated container on this node. - * - * @param container - * container to be released + * @param container Container to be released. */ public synchronized void releaseContainer(Container container) { if (!isValidContainer(container.getId())) { @@ -245,7 +257,7 @@ public synchronized void releaseContainer(Container container) { return; } - /* remove the containers from the nodemanger */ + // Remove the containers from the nodemanger if (null != launchedContainers.remove(container.getId())) { updateResource(container); } @@ -253,81 +265,118 @@ public synchronized void releaseContainer(Container container) { LOG.info("Released container " + container.getId() + " of capacity " + container.getResource() + " on host " + rmNode.getNodeAddress() + ", which currently has " + numContainers + " containers, " - + getUsedResource() + " used and " + getAvailableResource() + + getAllocatedResource() + " used and " + getUnallocatedResource() + " available" + ", release resources=" + true); } - private synchronized void addAvailableResource(Resource resource) { + /** + * Add unallocated resources to the node. This is used when unallocating a + * container. + * @param resource Resources to add. + */ + private synchronized void addUnallocatedResource(Resource resource) { if (resource == null) { LOG.error("Invalid resource addition of null resource for " + rmNode.getNodeAddress()); return; } - Resources.addTo(availableResource, resource); - Resources.subtractFrom(usedResource, resource); + Resources.addTo(unallocatedResource, resource); + Resources.subtractFrom(allocatedResource, resource); } - private synchronized void deductAvailableResource(Resource resource) { + /** + * Deduct unallocated resources from the node. This is used when allocating a + * container. + * @param resource Resources to deduct. + */ + private synchronized void deductUnallocatedResource(Resource resource) { if (resource == null) { LOG.error("Invalid deduction of null resource for " + rmNode.getNodeAddress()); return; } - Resources.subtractFrom(availableResource, resource); - Resources.addTo(usedResource, resource); + Resources.subtractFrom(unallocatedResource, resource); + Resources.addTo(allocatedResource, resource); } /** * Reserve container for the attempt on this node. + * @param attempt Application attempt asking for the reservation. + * @param priority Priority of the reservation. + * @param container Container reserving resources for. */ public abstract void reserveResource(SchedulerApplicationAttempt attempt, Priority priority, RMContainer container); /** * Unreserve resources on this node. + * @param attempt Application attempt that had done the reservation. */ public abstract void unreserveResource(SchedulerApplicationAttempt attempt); @Override public String toString() { return "host: " + rmNode.getNodeAddress() + " #containers=" - + getNumContainers() + " available=" + getAvailableResource() - + " used=" + getUsedResource(); + + getNumContainers() + " available=" + getUnallocatedResource() + + " used=" + getAllocatedResource(); } /** * Get number of active containers on the node. - * - * @return number of active containers on the node + * @return Number of active containers on the node. */ public int getNumContainers() { return numContainers; } + /** + * Get the running containers in the node. + * @return List of running containers in the node. + */ public synchronized List getRunningContainers() { return new ArrayList(launchedContainers.values()); } + /** + * Get the reserved container in the node. + * @return Reserved container in the node. + */ public synchronized RMContainer getReservedContainer() { return reservedContainer; } + /** + * Set the reserved container in the node. + * @param reservedContainer Reserved container in the node. + */ protected synchronized void setReservedContainer(RMContainer reservedContainer) { this.reservedContainer = reservedContainer; } + /** + * Recover a container. + * @param reservedContainer Container to recover. + */ public synchronized void recoverContainer(RMContainer rmContainer) { if (rmContainer.getState().equals(RMContainerState.COMPLETED)) { return; } allocateContainer(rmContainer); } - + + /** + * Get the labels for the node. + * @return Set of labels for the node. + */ public Set getLabels() { return labels; } - + + /** + * Update the labels for the node. + * @param labels Set of labels for the node. + */ public void updateLabels(Set labels) { this.labels = labels; } @@ -336,6 +385,7 @@ public void updateLabels(Set labels) { * Get partition of which the node belongs to, if node-labels of this node is * empty or null, it belongs to NO_LABEL partition. And since we only support * one partition for each node (YARN-2694), first label will be its partition. + * @return Partition for the node. */ public String getPartition() { if (this.labels == null || this.labels.isEmpty()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java index b1f6c64..6bbc283 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNodeReport.java @@ -28,28 +28,28 @@ @Private @Stable public class SchedulerNodeReport { - private final Resource used; - private final Resource avail; + private final Resource allocated; + private final Resource unallocated; private final int num; public SchedulerNodeReport(SchedulerNode node) { - this.used = node.getUsedResource(); - this.avail = node.getAvailableResource(); + this.allocated = node.getAllocatedResource(); + this.unallocated = node.getUnallocatedResource(); this.num = node.getNumContainers(); } /** - * @return the amount of resources currently used by the node. + * @return the amount of resources currently allocated by the node. */ - public Resource getUsedResource() { - return used; + public Resource getAllocatedResource() { + return allocated; } /** - * @return the amount of resources currently available on the node + * @return the amount of resources currently unallocated on the node */ - public Resource getAvailableResource() { - return avail; + public Resource getUnallocatedResource() { + return unallocated; } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 7f844a0..acc2782 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -1096,7 +1096,7 @@ private synchronized void nodeUpdate(RMNode nm) { .handle( new RMNodeResourceUpdateEvent(nm.getNodeID(), ResourceOption .newInstance(getSchedulerNode(nm.getNodeID()) - .getUsedResource(), 0))); + .getAllocatedResource(), 0))); } schedulerHealth.updateSchedulerReleaseDetails(lastNodeUpdateTime, releaseResources); @@ -1109,8 +1109,8 @@ private synchronized void nodeUpdate(RMNode nm) { // Now node data structures are upto date and ready for scheduling. if(LOG.isDebugEnabled()) { - LOG.debug("Node being looked for scheduling " + nm - + " availableResource: " + node.getAvailableResource()); + LOG.debug("Node being looked for scheduling " + nm + + " availableResource: " + node.getUnallocatedResource()); } } @@ -1254,11 +1254,11 @@ protected synchronized void allocateContainersToNode(FiCaSchedulerNode node) { // Try to schedule more if there are no reservations to fulfill if (node.getReservedContainer() == null) { - if (calculator.computeAvailableContainers(node.getAvailableResource(), + if (calculator.computeAvailableContainers(node.getUnallocatedResource(), minimumAllocation) > 0) { if (LOG.isDebugEnabled()) { LOG.debug("Trying to schedule on node: " + node.getNodeName() + - ", available: " + node.getAvailableResource()); + ", available: " + node.getUnallocatedResource()); } assignment = root.assignContainers( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index a7d8796..7cf5565 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -496,7 +496,7 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, private boolean canAssign(Resource clusterResource, FiCaSchedulerNode node) { return (node.getReservedContainer() == null) && Resources.greaterThanOrEqual(resourceCalculator, clusterResource, - node.getAvailableResource(), minimumAllocation); + node.getUnallocatedResource(), minimumAllocation); } private ResourceLimits getResourceLimitsOfChild(CSQueue child, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/IncreaseContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/IncreaseContainerAllocator.java index 16cf6d3..25e5824 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/IncreaseContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/IncreaseContainerAllocator.java @@ -112,7 +112,7 @@ private CSAssignment allocateIncreaseRequestFromReservedContainer( SchedulerNode node, Resource cluster, SchedContainerChangeRequest increaseRequest) { if (Resources.fitsIn(rc, cluster, increaseRequest.getDeltaCapacity(), - node.getAvailableResource())) { + node.getUnallocatedResource())) { // OK, we can allocate this increase request // Unreserve it first application.unreserve(increaseRequest.getPriority(), @@ -141,7 +141,7 @@ private CSAssignment allocateIncreaseRequestFromReservedContainer( private CSAssignment allocateIncreaseRequest(FiCaSchedulerNode node, Resource cluster, SchedContainerChangeRequest increaseRequest) { if (Resources.fitsIn(rc, cluster, increaseRequest.getDeltaCapacity(), - node.getAvailableResource())) { + node.getUnallocatedResource())) { // Notify node node.increaseContainer(increaseRequest.getContainerId(), increaseRequest.getDeltaCapacity()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java index 820cccd..e168edf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java @@ -423,7 +423,7 @@ private ContainerAllocation assignContainer(Resource clusterResource, } Resource capability = request.getCapability(); - Resource available = node.getAvailableResource(); + Resource available = node.getUnallocatedResource(); Resource totalResource = node.getTotalResource(); if (!Resources.lessThanOrEqual(rc, clusterResource, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index 730743c..4d563cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -589,7 +589,7 @@ public void updateNodeInfoForAMDiagnostics(FiCaSchedulerNode node) { diagnosticMessageBldr.append(", Total resource : "); diagnosticMessageBldr.append(node.getTotalResource()); diagnosticMessageBldr.append(", Available resource : "); - diagnosticMessageBldr.append(node.getAvailableResource()); + diagnosticMessageBldr.append(node.getUnallocatedResource()); diagnosticMessageBldr.append(" )."); updateAMContainerDiagnostics(AMState.ACTIVATED, diagnosticMessageBldr.toString()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java index 7e0a693..f1cefad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java @@ -191,7 +191,7 @@ private void subtractResourcesOnBlacklistedNodes( SchedulerNode node = scheduler.getSchedulerNode(nodeId); if (node != null) { Resources.subtractFrom(availableResources, - node.getAvailableResource()); + node.getUnallocatedResource()); } } if (availableResources.getMemory() < 0) { @@ -613,7 +613,7 @@ private Resource assignContainer( Resource capability = request.getCapability(); // How much does the node have? - Resource available = node.getAvailableResource(); + Resource available = node.getUnallocatedResource(); Container container = null; if (reserved) { @@ -840,7 +840,7 @@ public boolean assignReservedContainer(FSSchedulerNode node) { // Note that we have an assumption here that // there's only one container size per priority. if (Resources.fitsIn(node.getReservedContainer().getReservedResource(), - node.getAvailableResource())) { + node.getUnallocatedResource())) { assignContainer(node, true); } return true; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 1dbcda2..2801bee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -1069,7 +1069,7 @@ private synchronized void nodeUpdate(RMNode nm) { .handle( new RMNodeResourceUpdateEvent(nm.getNodeID(), ResourceOption .newInstance(getSchedulerNode(nm.getNodeID()) - .getUsedResource(), 0))); + .getAllocatedResource(), 0))); } if (continuousSchedulingEnabled) { @@ -1105,7 +1105,7 @@ void continuousSchedulingAttempt() throws InterruptedException { FSSchedulerNode node = getFSSchedulerNode(nodeId); try { if (node != null && Resources.fitsIn(minimumAllocation, - node.getAvailableResource())) { + node.getUnallocatedResource())) { attemptScheduling(node); } } catch (Throwable ex) { @@ -1137,8 +1137,8 @@ public int compare(NodeId n1, NodeId n2) { return -1; } return RESOURCE_CALCULATOR.compare(clusterResource, - nodes.get(n2).getAvailableResource(), - nodes.get(n1).getAvailableResource()); + nodes.get(n2).getUnallocatedResource(), + nodes.get(n1).getUnallocatedResource()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index a0e14111..147c3f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -527,7 +527,7 @@ private void assignContainers(FiCaSchedulerNode node) { // Done if (Resources.lessThan(resourceCalculator, clusterResource, - node.getAvailableResource(), minimumAllocation)) { + node.getUnallocatedResource(), minimumAllocation)) { break; } } @@ -682,13 +682,9 @@ private int assignContainer(FiCaSchedulerNode node, FiCaSchedulerApp application " request=" + request + " type=" + type); Resource capability = request.getCapability(); - int availableContainers = - node.getAvailableResource().getMemory() / capability.getMemory(); // TODO: A buggy - // application - // with this - // zero would - // crash the - // scheduler. + // TODO: A buggy application with this zero would crash the scheduler. + int availableContainers = node.getUnallocatedResource().getMemory() / + capability.getMemory(); int assignedContainers = Math.min(assignableContainers, availableContainers); @@ -760,7 +756,7 @@ private synchronized void nodeUpdate(RMNode rmNode) { .handle( new RMNodeResourceUpdateEvent(rmNode.getNodeID(), ResourceOption .newInstance(getSchedulerNode(rmNode.getNodeID()) - .getUsedResource(), 0))); + .getAllocatedResource(), 0))); } if (rmContext.isWorkPreservingRecoveryEnabled() @@ -769,14 +765,14 @@ private synchronized void nodeUpdate(RMNode rmNode) { } if (Resources.greaterThanOrEqual(resourceCalculator, clusterResource, - node.getAvailableResource(),minimumAllocation)) { + node.getUnallocatedResource(), minimumAllocation)) { LOG.debug("Node heartbeat " + rmNode.getNodeID() + - " available resource = " + node.getAvailableResource()); + " available resource = " + node.getUnallocatedResource()); assignContainers(node); LOG.debug("Node after allocation " + rmNode.getNodeID() + " resource = " - + node.getAvailableResource()); + + node.getUnallocatedResource()); } updateAvailableResourcesMetrics(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java index 1099baf..9f3c7e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java @@ -57,8 +57,10 @@ _("Minimum Queue Memory Capacity:" , Integer.toString(sinfo.getMinQueueMemoryCapacity())). _("Maximum Queue Memory Capacity:" , Integer.toString(sinfo.getMaxQueueMemoryCapacity())). _("Number of Nodes:" , Integer.toString(sinfo.getNumNodes())). - _("Used Node Capacity:" , Integer.toString(sinfo.getUsedNodeCapacity())). - _("Available Node Capacity:" , Integer.toString(sinfo.getAvailNodeCapacity())). + _("Used Node Capacity:", + Integer.toString(sinfo.getAllocatedNodeCapacity())). + _("Available Node Capacity:", + Integer.toString(sinfo.getUnallocatedNodeCapacity())). _("Total Node Capacity:" , Integer.toString(sinfo.getTotalNodeCapacity())). _("Number of Node Containers:" , Integer.toString(sinfo.getNumContainers())); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java index 9603468..2163fa9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java @@ -127,8 +127,8 @@ protected void render(Block html) { } } NodeInfo info = new NodeInfo(ni, sched); - int usedMemory = (int) info.getUsedMemory(); - int availableMemory = (int) info.getAvailableMemory(); + int usedMemory = (int) info.getAllocatedMemory(); + int availableMemory = (int) info.getUnallocatedMemory(); TR>> row = tbody.tr().td(StringUtils.join(",", info.getNodeLabels())) .td(info.getRack()).td(info.getState()).td(info.getNodeId()); @@ -143,11 +143,11 @@ protected void render(Block html) { .td(info.getHealthReport()) .td(String.valueOf(info.getNumContainers())).td().br() .$title(String.valueOf(usedMemory))._() - ._(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))._().td().br() - .$title(String.valueOf(availableMemory))._() + ._(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))._() + .td().br().$title(String.valueOf(availableMemory))._() ._(StringUtils.byteDesc(availableMemory * BYTES_IN_MB))._() - .td(String.valueOf(info.getUsedVirtualCores())) - .td(String.valueOf(info.getAvailableVirtualCores())) + .td(String.valueOf(info.getAllocatedVirtualCores())) + .td(String.valueOf(info.getUnallocatedVirtualCores())) .td(ni.getNodeManagerVersion())._(); } tbody._()._(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java index bd940d1..e4d45bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java @@ -43,8 +43,8 @@ protected int minQueueMemoryCapacity; protected int maxQueueMemoryCapacity; protected int numNodes; - protected int usedNodeCapacity; - protected int availNodeCapacity; + protected int allocatedNodeCapacity; + protected int unallocatedNodeCapacity; protected int totalNodeCapacity; protected int numContainers; @@ -72,15 +72,16 @@ public FifoSchedulerInfo(final ResourceManager rm) { this.qstate = qInfo.getQueueState(); this.numNodes = rmContext.getRMNodes().size(); - this.usedNodeCapacity = 0; - this.availNodeCapacity = 0; + this.allocatedNodeCapacity = 0; + this.unallocatedNodeCapacity = 0; this.totalNodeCapacity = 0; this.numContainers = 0; for (RMNode ni : rmContext.getRMNodes().values()) { SchedulerNodeReport report = fs.getNodeReport(ni.getNodeID()); - this.usedNodeCapacity += report.getUsedResource().getMemory(); - this.availNodeCapacity += report.getAvailableResource().getMemory(); + this.allocatedNodeCapacity += report.getAllocatedResource().getMemory(); + this.unallocatedNodeCapacity += + report.getUnallocatedResource().getMemory(); this.totalNodeCapacity += ni.getTotalCapability().getMemory(); this.numContainers += fs.getNodeReport(ni.getNodeID()).getNumContainers(); } @@ -90,12 +91,12 @@ public int getNumNodes() { return this.numNodes; } - public int getUsedNodeCapacity() { - return this.usedNodeCapacity; + public int getAllocatedNodeCapacity() { + return this.allocatedNodeCapacity; } - public int getAvailNodeCapacity() { - return this.availNodeCapacity; + public int getUnallocatedNodeCapacity() { + return this.unallocatedNodeCapacity; } public int getTotalNodeCapacity() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java index 0f877f8..f7973eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java @@ -45,10 +45,10 @@ protected String version; protected String healthReport; protected int numContainers; - protected long usedMemoryMB; - protected long availMemoryMB; - protected long usedVirtualCores; - protected long availableVirtualCores; + protected long allocateddMemoryMB; + protected long unallocatedMemoryMB; + protected long allocatedVirtualCores; + protected long unallocatedVirtualCores; protected ArrayList nodeLabels = new ArrayList(); protected ResourceUtilizationInfo resourceUtilization; @@ -59,14 +59,16 @@ public NodeInfo(RMNode ni, ResourceScheduler sched) { NodeId id = ni.getNodeID(); SchedulerNodeReport report = sched.getNodeReport(id); this.numContainers = 0; - this.usedMemoryMB = 0; - this.availMemoryMB = 0; + this.allocateddMemoryMB = 0; + this.unallocatedMemoryMB = 0; if (report != null) { this.numContainers = report.getNumContainers(); - this.usedMemoryMB = report.getUsedResource().getMemory(); - this.availMemoryMB = report.getAvailableResource().getMemory(); - this.usedVirtualCores = report.getUsedResource().getVirtualCores(); - this.availableVirtualCores = report.getAvailableResource().getVirtualCores(); + this.allocateddMemoryMB = report.getAllocatedResource().getMemory(); + this.unallocatedMemoryMB = report.getUnallocatedResource().getMemory(); + this.allocatedVirtualCores = + report.getAllocatedResource().getVirtualCores(); + this.unallocatedVirtualCores = + report.getUnallocatedResource().getVirtualCores(); } this.id = id.toString(); this.rack = ni.getRackName(); @@ -124,20 +126,20 @@ public int getNumContainers() { return this.numContainers; } - public long getUsedMemory() { - return this.usedMemoryMB; + public long getAllocatedMemory() { + return this.allocateddMemoryMB; } - public long getAvailableMemory() { - return this.availMemoryMB; + public long getUnallocatedMemory() { + return this.unallocatedMemoryMB; } - public long getUsedVirtualCores() { - return this.usedVirtualCores; + public long getAllocatedVirtualCores() { + return this.allocatedVirtualCores; } - public long getAvailableVirtualCores() { - return this.availableVirtualCores; + public long getUnallocatedVirtualCores() { + return this.unallocatedVirtualCores; } public ArrayList getNodeLabels() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java index b4ebf92..3ee5dbb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java @@ -215,10 +215,10 @@ synchronized public void checkResourceUsage() { LOG.info("Checking resource usage for " + containerManagerAddress); Assert.assertEquals(available.getMemory(), resourceManager.getResourceScheduler().getNodeReport( - this.nodeId).getAvailableResource().getMemory()); + this.nodeId).getUnallocatedResource().getMemory()); Assert.assertEquals(used.getMemory(), resourceManager.getResourceScheduler().getNodeReport( - this.nodeId).getUsedResource().getMemory()); + this.nodeId).getAllocatedResource().getMemory()); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java index f0aa470..b32dfbf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java @@ -213,11 +213,11 @@ public void testSchedulerRecovery() throws Exception { assertTrue( "SchedulerNode#toString is not in expected format", schedulerNode1 - .toString().contains(schedulerNode1.getAvailableResource().toString())); + .toString().contains(schedulerNode1.getUnallocatedResource().toString())); assertTrue( "SchedulerNode#toString is not in expected format", schedulerNode1 - .toString().contains(schedulerNode1.getUsedResource().toString())); + .toString().contains(schedulerNode1.getAllocatedResource().toString())); // ********* check scheduler node state.******* // 2 running containers. @@ -234,8 +234,8 @@ public void testSchedulerRecovery() throws Exception { assertEquals(2, schedulerNode1.getNumContainers()); assertEquals(Resources.subtract(nmResource, usedResources), - schedulerNode1.getAvailableResource()); - assertEquals(usedResources, schedulerNode1.getUsedResource()); + schedulerNode1.getUnallocatedResource()); + assertEquals(usedResources, schedulerNode1.getAllocatedResource()); Resource availableResources = Resources.subtract(nmResource, usedResources); // ***** check queue state based on the underlying scheduler ******** @@ -379,8 +379,8 @@ public void testDynamicQueueRecovery() throws Exception { assertEquals(2, schedulerNode1.getNumContainers()); assertEquals(Resources.subtract(nmResource, usedResources), - schedulerNode1.getAvailableResource()); - assertEquals(usedResources, schedulerNode1.getUsedResource()); + schedulerNode1.getUnallocatedResource()); + assertEquals(usedResources, schedulerNode1.getAllocatedResource()); Resource availableResources = Resources.subtract(nmResource, usedResources); // 6. Verify the scheduler state like attempt info. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java index 05c897f..8411a4d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java @@ -268,12 +268,12 @@ public void testUpdateMaxAllocationUsesTotal() throws IOException { SchedulerNode mockNode1 = mock(SchedulerNode.class); when(mockNode1.getNodeID()).thenReturn(NodeId.newInstance("foo", 8080)); - when(mockNode1.getAvailableResource()).thenReturn(emptyResource); + when(mockNode1.getUnallocatedResource()).thenReturn(emptyResource); when(mockNode1.getTotalResource()).thenReturn(fullResource1); SchedulerNode mockNode2 = mock(SchedulerNode.class); when(mockNode1.getNodeID()).thenReturn(NodeId.newInstance("bar", 8081)); - when(mockNode2.getAvailableResource()).thenReturn(emptyResource); + when(mockNode2.getUnallocatedResource()).thenReturn(emptyResource); when(mockNode2.getTotalResource()).thenReturn(fullResource2); verifyMaximumResourceCapability(configuredMaximumResource, scheduler); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java index 1569a12..58dd59a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java @@ -176,8 +176,8 @@ public void testApplicationPriorityAllocation() throws Exception { // check node report, 15 GB used (1 AM and 7 containers) and 1 GB available SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport( nm1.getNodeId()); - Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(15 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(1 * GB, report_nm1.getUnallocatedResource().getMemory()); // Submit the second app App2 with priority 8 (Higher than App1) Priority appPriority2 = Priority.newInstance(8); @@ -189,8 +189,8 @@ public void testApplicationPriorityAllocation() throws Exception { // check node report, 16 GB used and 0 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(16 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(0 * GB, report_nm1.getUnallocatedResource().getMemory()); // get scheduler CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); @@ -210,8 +210,8 @@ public void testApplicationPriorityAllocation() throws Exception { // check node report, 12 GB used and 4 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(12 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(4 * GB, report_nm1.getUnallocatedResource().getMemory()); // send updated request for App1 am1.allocate("127.0.0.1", 2 * GB, 10, new ArrayList()); @@ -226,8 +226,8 @@ public void testApplicationPriorityAllocation() throws Exception { // check node report, 16 GB used and 0 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(16 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(0 * GB, report_nm1.getUnallocatedResource().getMemory()); rm.stop(); } @@ -261,8 +261,8 @@ public void testPriorityWithPendingApplications() throws Exception { // check node report, 8 GB used (1 AM and 7 containers) and 0 GB available SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport( nm1.getNodeId()); - Assert.assertEquals(8 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(8 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(0 * GB, report_nm1.getUnallocatedResource().getMemory()); // Submit the second app App2 with priority 7 Priority appPriority2 = Priority.newInstance(7); @@ -287,8 +287,8 @@ public void testPriorityWithPendingApplications() throws Exception { // check node report, 1 GB used and 7 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(7 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(1 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(7 * GB, report_nm1.getUnallocatedResource().getMemory()); rm.stop(); } @@ -482,8 +482,8 @@ public void testApplicationPriorityAllocationWithChangeInPriority() // check node report, 15 GB used (1 AM and 7 containers) and 1 GB available SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport( nm1.getNodeId()); - Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(15 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(1 * GB, report_nm1.getUnallocatedResource().getMemory()); // Submit the second app App2 with priority 8 (Higher than App1) Priority appPriority2 = Priority.newInstance(8); @@ -495,8 +495,8 @@ public void testApplicationPriorityAllocationWithChangeInPriority() // check node report, 16 GB used and 0 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(16 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(0 * GB, report_nm1.getUnallocatedResource().getMemory()); // get scheduler CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); @@ -518,8 +518,8 @@ public void testApplicationPriorityAllocationWithChangeInPriority() // check node report, 12 GB used and 4 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(12 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(4 * GB, report_nm1.getUnallocatedResource().getMemory()); // add request for containers App1 am1.allocate("127.0.0.1", 2 * GB, 10, new ArrayList()); @@ -531,8 +531,8 @@ public void testApplicationPriorityAllocationWithChangeInPriority() Assert.assertEquals(2, allocated2.size()); // check node report, 16 GB used and 0 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(16 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(0 * GB, report_nm1.getUnallocatedResource().getMemory()); // kill 1 more counter = 0; @@ -548,8 +548,8 @@ public void testApplicationPriorityAllocationWithChangeInPriority() // check node report, 14 GB used and 2 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(14 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(14 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getUnallocatedResource().getMemory()); // Change the priority of App1 to 3 (lowest) Priority appPriority3 = Priority.newInstance(3); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index bd2c4fe..1f896ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -837,8 +837,8 @@ public void testResourceOverCommit() throws Exception { SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport( nm1.getNodeId()); // check node report, 2 GB used and 2 GB available - Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getUnallocatedResource().getMemory()); // add request for containers am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1); @@ -859,8 +859,8 @@ public void testResourceOverCommit() throws Exception { report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); // check node report, 4 GB used and 0 GB available - Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory()); - Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(0, report_nm1.getUnallocatedResource().getMemory()); + Assert.assertEquals(4 * GB, report_nm1.getAllocatedResource().getMemory()); // check container is assigned with 2 GB. Container c1 = allocated1.get(0); @@ -878,8 +878,8 @@ public void testResourceOverCommit() throws Exception { // Now, the used resource is still 4 GB, and available resource is minus value. report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(4 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(-2 * GB, report_nm1.getUnallocatedResource().getMemory()); // Check container can complete successfully in case of resource over-commitment. ContainerStatus containerStatus = BuilderUtils.newContainerStatus( @@ -895,9 +895,9 @@ public void testResourceOverCommit() throws Exception { Assert.assertEquals(1, attempt1.getJustFinishedContainers().size()); Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getAllocatedResource().getMemory()); // As container return 2 GB back, the available resource becomes 0 again. - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(0 * GB, report_nm1.getUnallocatedResource().getMemory()); // Verify no NPE is trigger in schedule after resource is updated. am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 1, 1); @@ -2190,8 +2190,8 @@ public void testAppReservationWithDominantResourceCalculator() throws Exception rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); // check node report - Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(9 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(1 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(9 * GB, report_nm1.getUnallocatedResource().getMemory()); // add request for containers am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 1 * GB, 1, 1); @@ -3431,7 +3431,7 @@ public void handle(Event event) { Assert.assertEquals(1 * GB, nm_0.getUsed().getMemory()); Resource usedResource = resourceManager.getResourceScheduler() - .getSchedulerNode(nm_0.getNodeId()).getUsedResource(); + .getSchedulerNode(nm_0.getNodeId()).getAllocatedResource(); Assert.assertEquals(usedResource.getMemory(), 1 * GB); Assert.assertEquals(usedResource.getVirtualCores(), 1); // Check total resource of scheduler node is also changed to 1 GB 1 core @@ -3443,7 +3443,7 @@ public void handle(Event event) { // Check the available resource is 0/0 Resource availableResource = resourceManager.getResourceScheduler() - .getSchedulerNode(nm_0.getNodeId()).getAvailableResource(); + .getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource(); Assert.assertEquals(availableResource.getMemory(), 0); Assert.assertEquals(availableResource.getVirtualCores(), 0); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java index 88c7c13..5169337 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java @@ -147,9 +147,9 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable { any(ResourceLimits.class), any(SchedulingMode.class)); // Mock the node's resource availability - Resource available = node.getAvailableResource(); + Resource available = node.getUnallocatedResource(); doReturn(Resources.subtractFrom(available, allocatedResource)). - when(node).getAvailableResource(); + when(node).getUnallocatedResource(); } return new CSAssignment(allocatedResource, type); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java index b5b2222..1f22a06 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java @@ -388,7 +388,7 @@ public void testExcessReservationWillBeUnreserved() throws Exception { // NM1 has available resource = 2G (8G - 2 * 1G - 4G) Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()) - .getAvailableResource().getMemory()); + .getUnallocatedResource().getMemory()); Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); // Usage of queue = 4G + 2 * 1G + 4G (reserved) Assert.assertEquals(10 * GB, cs.getRootQueue().getQueueResourceUsage() @@ -401,7 +401,7 @@ public void testExcessReservationWillBeUnreserved() throws Exception { // App2's reservation will be cancelled Assert.assertTrue(schedulerApp2.getReservedContainers().size() == 0); Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()) - .getAvailableResource().getMemory()); + .getUnallocatedResource().getMemory()); Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); Assert.assertEquals(6 * GB, cs.getRootQueue().getQueueResourceUsage() .getUsed().getMemory()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java index 9e29842..f04748d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java @@ -1121,6 +1121,6 @@ private void verifyAvailableResourceOfSchedulerNode(MockRM rm, NodeId nodeId, CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); SchedulerNode node = cs.getNode(nodeId); Assert - .assertEquals(expectedMemory, node.getAvailableResource().getMemory()); + .assertEquals(expectedMemory, node.getUnallocatedResource().getMemory()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java index d7ac0b2..645086d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java @@ -429,7 +429,7 @@ private void verifyAvailableResourceOfSchedulerNode(MockRM rm, NodeId nodeId, CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); SchedulerNode node = cs.getNode(nodeId); Assert - .assertEquals(expectedMemory, node.getAvailableResource().getMemory()); + .assertEquals(expectedMemory, node.getUnallocatedResource().getMemory()); } private Container getContainer( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 42dcd6d..69b0813 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -1311,7 +1311,7 @@ public void testReservation() throws Exception { assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(2*GB, node_0.getUsedResource().getMemory()); + assertEquals(2*GB, node_0.getAllocatedResource().getMemory()); assertEquals(4*GB, a.getMetrics().getReservedMB()); assertEquals(2*GB, a.getMetrics().getAllocatedMB()); @@ -1328,7 +1328,7 @@ public void testReservation() throws Exception { assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(1*GB, node_0.getUsedResource().getMemory()); + assertEquals(1*GB, node_0.getAllocatedResource().getMemory()); assertEquals(4*GB, a.getMetrics().getReservedMB()); assertEquals(1*GB, a.getMetrics().getAllocatedMB()); @@ -1345,7 +1345,7 @@ public void testReservation() throws Exception { assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(4*GB, node_0.getUsedResource().getMemory()); + assertEquals(4*GB, node_0.getAllocatedResource().getMemory()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(4*GB, a.getMetrics().getAllocatedMB()); } @@ -1434,7 +1434,7 @@ public void testReservationExchange() throws Exception { assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(2*GB, node_0.getUsedResource().getMemory()); + assertEquals(2*GB, node_0.getAllocatedResource().getMemory()); // Now free 1 container from app_0 i.e. 1G, and re-reserve it RMContainer rmContainer = app_0.getLiveContainers().iterator().next(); @@ -1449,7 +1449,7 @@ public void testReservationExchange() throws Exception { assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(1*GB, node_0.getUsedResource().getMemory()); + assertEquals(1*GB, node_0.getAllocatedResource().getMemory()); assertEquals(1, app_1.getReReservations(priority)); // Re-reserve @@ -1459,7 +1459,7 @@ public void testReservationExchange() throws Exception { assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(1*GB, node_0.getUsedResource().getMemory()); + assertEquals(1*GB, node_0.getAllocatedResource().getMemory()); assertEquals(2, app_1.getReReservations(priority)); // Try to schedule on node_1 now, should *move* the reservation @@ -1469,7 +1469,7 @@ public void testReservationExchange() throws Exception { assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(4*GB, node_1.getUsedResource().getMemory()); + assertEquals(4*GB, node_1.getAllocatedResource().getMemory()); // Doesn't change yet... only when reservation is cancelled or a different // container is reserved assertEquals(2, app_1.getReReservations(priority)); @@ -1487,7 +1487,7 @@ public void testReservationExchange() throws Exception { assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(0*GB, node_0.getUsedResource().getMemory()); + assertEquals(0*GB, node_0.getAllocatedResource().getMemory()); } private void verifyContainerAllocated(CSAssignment assignment, NodeType nodeType) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java index 4a815f5..f73baa4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java @@ -158,9 +158,9 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable { any(ResourceLimits.class), any(SchedulingMode.class)); // Mock the node's resource availability - Resource available = node.getAvailableResource(); + Resource available = node.getUnallocatedResource(); doReturn(Resources.subtractFrom(available, allocatedResource)). - when(node).getAvailableResource(); + when(node).getUnallocatedResource(); } return new CSAssignment(allocatedResource, type); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java index 9b920d0..9047138 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java @@ -263,9 +263,9 @@ public void testReservation() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(22 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, @@ -275,9 +275,9 @@ public void testReservation() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(19 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, @@ -289,9 +289,9 @@ public void testReservation() throws Exception { assertEquals(16 * GB, a.getMetrics().getAvailableMB()); assertEquals(16 * GB, app_0.getHeadroom().getMemory()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // try to assign reducer (5G on node 0 and should reserve) @@ -305,9 +305,9 @@ public void testReservation() throws Exception { assertEquals(11 * GB, app_0.getHeadroom().getMemory()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() .getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // assign reducer to node 2 @@ -321,9 +321,9 @@ public void testReservation() throws Exception { assertEquals(6 * GB, app_0.getHeadroom().getMemory()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() .getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(5 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); // node_1 heartbeat and unreserves from node_0 in order to allocate @@ -337,9 +337,9 @@ public void testReservation() throws Exception { assertEquals(6 * GB, a.getMetrics().getAvailableMB()); assertEquals(6 * GB, app_0.getHeadroom().getMemory()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(8 * GB, node_1.getUsedResource().getMemory()); - assertEquals(5 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(8 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); assertEquals(0, app_0.getTotalRequiredResources(priorityReduce)); } @@ -425,9 +425,9 @@ public void testReservationLimitOtherUsers() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(22 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); @@ -437,9 +437,9 @@ public void testReservationLimitOtherUsers() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(4 * GB, a.getMetrics().getAllocatedMB()); assertEquals(20 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getUsedResource().getMemory()); - assertEquals(2 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); // Add a few requests to each app app_0.updateResourceRequests(Collections.singletonList(TestUtils @@ -458,9 +458,9 @@ public void testReservationLimitOtherUsers() throws Exception { assertEquals(8 * GB, a.getMetrics().getReservedMB()); assertEquals(4 * GB, a.getMetrics().getAllocatedMB()); assertEquals(12 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getUsedResource().getMemory()); - assertEquals(2 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); // next assignment is beyond user limit for user_0 but it should assign to // app_1 for user_1 @@ -472,9 +472,9 @@ public void testReservationLimitOtherUsers() throws Exception { assertEquals(8 * GB, a.getMetrics().getReservedMB()); assertEquals(6 * GB, a.getMetrics().getAllocatedMB()); assertEquals(10 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getUsedResource().getMemory()); - assertEquals(4 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(4 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); } @Test @@ -559,9 +559,9 @@ public void testReservationNoContinueLook() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(22 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, @@ -571,9 +571,9 @@ public void testReservationNoContinueLook() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(19 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, @@ -585,9 +585,9 @@ public void testReservationNoContinueLook() throws Exception { assertEquals(16 * GB, a.getMetrics().getAvailableMB()); assertEquals(16 * GB, app_0.getHeadroom().getMemory()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // try to assign reducer (5G on node 0 and should reserve) @@ -601,9 +601,9 @@ public void testReservationNoContinueLook() throws Exception { assertEquals(11 * GB, app_0.getHeadroom().getMemory()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() .getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // assign reducer to node 2 @@ -617,9 +617,9 @@ public void testReservationNoContinueLook() throws Exception { assertEquals(6 * GB, app_0.getHeadroom().getMemory()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() .getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(5 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); // node_1 heartbeat and won't unreserve from node_0, potentially stuck @@ -634,9 +634,9 @@ public void testReservationNoContinueLook() throws Exception { assertEquals(6 * GB, app_0.getHeadroom().getMemory()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() .getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(5 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); } @@ -718,8 +718,8 @@ public void testAssignContainersNeedToUnreserve() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(14 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, @@ -729,8 +729,8 @@ public void testAssignContainersNeedToUnreserve() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(11 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, @@ -742,8 +742,8 @@ public void testAssignContainersNeedToUnreserve() throws Exception { assertEquals(8 * GB, a.getMetrics().getAvailableMB()); assertEquals(8 * GB, app_0.getHeadroom().getMemory()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // try to assign reducer (5G on node 0 and should reserve) @@ -757,8 +757,8 @@ public void testAssignContainersNeedToUnreserve() throws Exception { assertEquals(3 * GB, app_0.getHeadroom().getMemory()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() .getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // could allocate but told need to unreserve first @@ -771,8 +771,8 @@ public void testAssignContainersNeedToUnreserve() throws Exception { assertEquals(3 * GB, a.getMetrics().getAvailableMB()); assertEquals(3 * GB, app_0.getHeadroom().getMemory()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(8 * GB, node_1.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(8 * GB, node_1.getAllocatedResource().getMemory()); assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); } @@ -981,8 +981,8 @@ public void testAssignToQueue() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(14 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, @@ -992,8 +992,8 @@ public void testAssignToQueue() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(11 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, @@ -1004,8 +1004,8 @@ public void testAssignToQueue() throws Exception { assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(8 * GB, a.getMetrics().getAvailableMB()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); // now add in reservations and make sure it continues if config set // allocate to queue so that the potential new capacity is greater then @@ -1018,8 +1018,8 @@ public void testAssignToQueue() throws Exception { assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(3 * GB, a.getMetrics().getAvailableMB()); assertEquals(3 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); ResourceLimits limits = new ResourceLimits(Resources.createResource(13 * GB)); @@ -1155,8 +1155,8 @@ public void testAssignToUser() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(14 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, @@ -1166,8 +1166,8 @@ public void testAssignToUser() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(11 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, @@ -1178,8 +1178,8 @@ public void testAssignToUser() throws Exception { assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(8 * GB, a.getMetrics().getAvailableMB()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); // now add in reservations and make sure it continues if config set // allocate to queue so that the potential new capacity is greater then @@ -1194,8 +1194,8 @@ public void testAssignToUser() throws Exception { assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(3 * GB, a.getMetrics().getAvailableMB()); assertEquals(3 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); // not over the limit Resource limit = Resources.createResource(14 * GB, 0); @@ -1307,9 +1307,9 @@ public void testReservationsNoneAvailable() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(22 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, @@ -1319,9 +1319,9 @@ public void testReservationsNoneAvailable() throws Exception { assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(19 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(0 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, @@ -1332,9 +1332,9 @@ public void testReservationsNoneAvailable() throws Exception { assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(16 * GB, a.getMetrics().getAvailableMB()); assertEquals(16 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); // try to assign reducer (5G on node 0), but tell it's resource limits < // used (8G) + required (5G). It will not reserved since it has to unreserve @@ -1349,9 +1349,9 @@ public void testReservationsNoneAvailable() throws Exception { assertEquals(16 * GB, a.getMetrics().getAvailableMB()); // app_0's headroom = limit (10G) - used (8G) = 2G assertEquals(2 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); // try to assign reducer (5G on node 0), but tell it's resource limits < // used (8G) + required (5G). It will not reserved since it has to unreserve @@ -1365,9 +1365,9 @@ public void testReservationsNoneAvailable() throws Exception { assertEquals(16 * GB, a.getMetrics().getAvailableMB()); // app_0's headroom = limit (10G) - used (8G) = 2G assertEquals(2 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(0 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); // let it assign 5G to node_2 a.assignContainers(clusterResource, node_2, @@ -1378,9 +1378,9 @@ public void testReservationsNoneAvailable() throws Exception { assertEquals(13 * GB, a.getMetrics().getAllocatedMB()); assertEquals(11 * GB, a.getMetrics().getAvailableMB()); assertEquals(11 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(5 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); // reserve 8G node_0 a.assignContainers(clusterResource, node_0, @@ -1391,9 +1391,9 @@ public void testReservationsNoneAvailable() throws Exception { assertEquals(13 * GB, a.getMetrics().getAllocatedMB()); assertEquals(3 * GB, a.getMetrics().getAvailableMB()); assertEquals(3 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(5 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); // try to assign (8G on node 2). No room to allocate, // continued to try due to having reservation above, @@ -1406,8 +1406,8 @@ public void testReservationsNoneAvailable() throws Exception { assertEquals(13 * GB, a.getMetrics().getAllocatedMB()); assertEquals(3 * GB, a.getMetrics().getAvailableMB()); assertEquals(3 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getUsedResource().getMemory()); - assertEquals(3 * GB, node_1.getUsedResource().getMemory()); - assertEquals(5 * GB, node_2.getUsedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 1786069..2694957 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -195,7 +195,7 @@ public static FiCaSchedulerNode getMockNode( when(rmNode.getRackName()).thenReturn(rack); FiCaSchedulerNode node = spy(new FiCaSchedulerNode(rmNode, false)); - LOG.info("node = " + host + " avail=" + node.getAvailableResource()); + LOG.info("node = " + host + " avail=" + node.getUnallocatedResource()); when(node.getNodeID()).thenReturn(nodeId); return node; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java index d29d92c..e733b1c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java @@ -318,7 +318,7 @@ public void testHeadroomWithBlackListedNodes() { .when(spyApp).isWaitingForAMContainer(); assertTrue(spyApp.isBlacklisted(n1.getNodeName())); assertFalse(spyApp.isBlacklisted(n2.getNodeName())); - assertEquals(n2.getAvailableResource(), spyApp.getHeadroom()); + assertEquals(n2.getUnallocatedResource(), spyApp.getHeadroom()); blacklistAdditions.clear(); blacklistAdditions.add(n2.getNodeName()); @@ -326,7 +326,7 @@ public void testHeadroomWithBlackListedNodes() { app.updateBlacklist(blacklistAdditions, blacklistRemovals); assertFalse(spyApp.isBlacklisted(n1.getNodeName())); assertTrue(spyApp.isBlacklisted(n2.getNodeName())); - assertEquals(n1.getAvailableResource(), spyApp.getHeadroom()); + assertEquals(n1.getUnallocatedResource(), spyApp.getHeadroom()); blacklistAdditions.clear(); blacklistRemovals.clear(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index a15e8d1..8d7c22e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -4433,7 +4433,7 @@ public void handle(Event event) { // Assert.assertEquals(1 * GB, nm_0.getUsed().getMemory()); Resource usedResource = resourceManager.getResourceScheduler() - .getSchedulerNode(nm_0.getNodeId()).getUsedResource(); + .getSchedulerNode(nm_0.getNodeId()).getAllocatedResource(); Assert.assertEquals(usedResource.getMemory(), 0); Assert.assertEquals(usedResource.getVirtualCores(), 0); // Check total resource of scheduler node is also changed to 0 GB 0 core @@ -4445,7 +4445,7 @@ public void handle(Event event) { // Check the available resource is 0/0 Resource availableResource = resourceManager.getResourceScheduler() - .getSchedulerNode(nm_0.getNodeId()).getAvailableResource(); + .getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource(); Assert.assertEquals(availableResource.getMemory(), 0); Assert.assertEquals(availableResource.getVirtualCores(), 0); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 86a017e..13af0c1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -348,7 +348,7 @@ public void testUpdateResourceOnNode() throws Exception { assertEquals(schedulerNodes.get(node0.getNodeID()).getTotalResource() .getMemory(), 1024); assertEquals(schedulerNodes.get(node0.getNodeID()). - getAvailableResource().getMemory(), 1024); + getUnallocatedResource().getMemory(), 1024); QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false); Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f); @@ -704,7 +704,7 @@ public void testFifoScheduling() throws Exception { am1.registerAppAttempt(); SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getAllocatedResource().getMemory()); RMApp app2 = rm.submitApp(2048); // kick the scheduling, 2GB given to AM, remaining 2 GB on nm2 @@ -714,7 +714,7 @@ public void testFifoScheduling() throws Exception { am2.registerAppAttempt(); SchedulerNodeReport report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId()); - Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm2.getAllocatedResource().getMemory()); // add request for containers am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, GB, 1, 1); @@ -750,11 +750,11 @@ public void testFifoScheduling() throws Exception { report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId()); - Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory()); - Assert.assertEquals(2 * GB, report_nm2.getAvailableResource().getMemory()); + Assert.assertEquals(0, report_nm1.getUnallocatedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm2.getUnallocatedResource().getMemory()); - Assert.assertEquals(6 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory()); + Assert.assertEquals(6 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm2.getAllocatedResource().getMemory()); Container c1 = allocated1.get(0); Assert.assertEquals(GB, c1.getResource().getMemory()); @@ -772,7 +772,7 @@ public void testFifoScheduling() throws Exception { Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses() .size()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(5 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(5 * GB, report_nm1.getAllocatedResource().getMemory()); rm.stop(); } @@ -829,7 +829,7 @@ private void testMinimumAllocation(YarnConfiguration conf, int testAlloc) int checkAlloc = conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB); - Assert.assertEquals(checkAlloc, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(checkAlloc, report_nm1.getAllocatedResource().getMemory()); rm.stop(); } @@ -1109,8 +1109,8 @@ public void testResourceOverCommit() throws Exception { SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); // check node report, 2 GB used and 2 GB available - Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getUnallocatedResource().getMemory()); // add request for containers am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1); @@ -1131,8 +1131,8 @@ public void testResourceOverCommit() throws Exception { report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); // check node report, 4 GB used and 0 GB available - Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory()); - Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(0, report_nm1.getUnallocatedResource().getMemory()); + Assert.assertEquals(4 * GB, report_nm1.getAllocatedResource().getMemory()); // check container is assigned with 2 GB. Container c1 = allocated1.get(0); @@ -1150,8 +1150,8 @@ public void testResourceOverCommit() throws Exception { // Now, the used resource is still 4 GB, and available resource is minus // value. report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(4 * GB, report_nm1.getAllocatedResource().getMemory()); + Assert.assertEquals(-2 * GB, report_nm1.getUnallocatedResource().getMemory()); // Check container can complete successfully in case of resource // over-commitment. @@ -1169,9 +1169,9 @@ public void testResourceOverCommit() throws Exception { Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses() .size()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getAllocatedResource().getMemory()); // As container return 2 GB back, the available resource becomes 0 again. - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(0 * GB, report_nm1.getUnallocatedResource().getMemory()); rm.stop(); } @@ -1247,7 +1247,7 @@ public void handle(Event event) { // Assert.assertEquals(1 * GB, nm_0.getUsed().getMemory()); Resource usedResource = resourceManager.getResourceScheduler() - .getSchedulerNode(nm_0.getNodeId()).getUsedResource(); + .getSchedulerNode(nm_0.getNodeId()).getAllocatedResource(); Assert.assertEquals(usedResource.getMemory(), 1 * GB); Assert.assertEquals(usedResource.getVirtualCores(), 1); // Check total resource of scheduler node is also changed to 1 GB 1 core @@ -1259,7 +1259,7 @@ public void handle(Event event) { // Check the available resource is 0/0 Resource availableResource = resourceManager.getResourceScheduler() - .getSchedulerNode(nm_0.getNodeId()).getAvailableResource(); + .getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource(); Assert.assertEquals(availableResource.getMemory(), 0); Assert.assertEquals(availableResource.getVirtualCores(), 0); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java index 3fd1fd5..07d2ada 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java @@ -780,13 +780,13 @@ public void verifyNodeInfoGeneric(MockNM nm, String state, String rack, assertEquals("numContainers doesn't match: " + numContainers, report.getNumContainers(), numContainers); assertEquals("usedMemoryMB doesn't match: " + usedMemoryMB, report - .getUsedResource().getMemory(), usedMemoryMB); + .getAllocatedResource().getMemory(), usedMemoryMB); assertEquals("availMemoryMB doesn't match: " + availMemoryMB, report - .getAvailableResource().getMemory(), availMemoryMB); + .getUnallocatedResource().getMemory(), availMemoryMB); assertEquals("usedVirtualCores doesn't match: " + usedVirtualCores, report - .getUsedResource().getVirtualCores(), usedVirtualCores); + .getAllocatedResource().getVirtualCores(), usedVirtualCores); assertEquals("availVirtualCores doesn't match: " + availVirtualCores, report - .getAvailableResource().getVirtualCores(), availVirtualCores); + .getUnallocatedResource().getVirtualCores(), availVirtualCores); } }