From 3a2742c2f402af112ce93ab7d6177c245fc7a78d Mon Sep 17 00:00:00 2001 From: wjlei Date: Wed, 10 May 2017 19:31:01 +0800 Subject: [PATCH 1/3] ports support --- .../hadoop/yarn/sls/nodemanager/NodeInfo.java | 24 ++++++ .../hadoop/yarn/sls/scheduler/RMNodeWrapper.java | 20 +++++ .../apache/hadoop/yarn/api/records/Resource.java | 15 ++++ .../apache/hadoop/yarn/conf/YarnConfiguration.java | 14 ++++ .../yarn/api/records/impl/pb/ResourcePBImpl.java | 16 +++- .../util/resource/DefaultResourceCalculator.java | 5 ++ .../util/resource/DominantResourceCalculator.java | 26 ++++++- .../yarn/util/resource/ResourceCalculator.java | 3 + .../hadoop/yarn/util/resource/Resources.java | 88 +++++++++++++++++++++- .../hadoop/yarn/api/BasePBImplRecordsTest.java | 22 ++++++ .../apache/hadoop/yarn/api/TestPBImplRecords.java | 10 +++ .../server/nodemanager/NodeStatusUpdaterImpl.java | 52 ++++++++++++- .../resourcemanager/ResourceTrackerService.java | 37 ++++++++- .../yarn/server/resourcemanager/rmnode/RMNode.java | 28 +++++++ .../server/resourcemanager/rmnode/RMNodeImpl.java | 26 +++++++ .../resourcemanager/scheduler/SchedulerNode.java | 53 ++++++++++--- .../scheduler/capacity/LeafQueue.java | 8 +- .../allocator/RegularContainerAllocator.java | 31 ++++++-- .../yarn/server/resourcemanager/MockNodes.java | 18 +++++ .../scheduler/capacity/TestUtils.java | 28 +++++++ 20 files changed, 497 insertions(+), 27 deletions(-) diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java index 8962aba..5a7c6c9 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java @@ -225,6 +225,30 @@ public Resource getPhysicalResource() { } } + @Override + public void setLocalUsedPortsSnapshot(ValueRanges ports) { + } + + @Override + public ValueRanges getAvailablePorts() { + return null; + } + + @Override + public void setAvailablePorts(ValueRanges ports) { + } + + @Override + public ValueRanges getContainerAllocatedPorts() { + return null; + } + + @Override + public void setContainerAllocatedPorts(ValueRanges ports) { + } + } + + public static RMNode newNodeInfo(String rackName, String hostName, final Resource resource, int port) { final NodeId nodeId = newNodeID(hostName, port); diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java index d7b159c..a59d9ef 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java @@ -212,4 +212,24 @@ public Integer getDecommissioningTimeout() { public Resource getPhysicalResource() { return null; } + + @Override + public ValueRanges getAvailablePorts() { + return node.getAvailablePorts(); + } + + @Override + public void setAvailablePorts(ValueRanges ports) { + node.setAvailablePorts(ports); + } + + @Override + public ValueRanges getContainerAllocatedPorts() { + return node.getContainerAllocatedPorts(); + } + + @Override + public void setContainerAllocatedPorts(ValueRanges ports) { + node.setContainerAllocatedPorts(ports); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java index 0d31b6f..7d850b0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java @@ -167,6 +167,21 @@ public boolean equals(Object obj) { return true; } + public boolean equalsWithPorts(Object obj) { + if (!this.equals(obj)) { + return false; + } else { + Resource other = (Resource) obj; + ValueRanges lPorts = this.getPorts(); + ValueRanges rPorts = other.getPorts(); + if (lPorts == null) { + return rPorts == null; + } else { + return lPorts.equals(rPorts); + } + } + } + @Override public String toString() { return ""; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 82274fe..5a48372 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1140,6 +1140,20 @@ public static boolean isAclEnabled(Configuration conf) { public static final String NM_PCORES_VCORES_MULTIPLIER = NM_PREFIX + "resource.pcores-vcores-multiplier"; public static final float DEFAULT_NM_PCORES_VCORES_MULTIPLIER = 1.0f; + /** + * Rounds of updating ports. This parameter is circle controller for updating + * local allocated ports info, since the ports info is big. We can control the + * update frequency to have balance with cluster scale and ports info's + * accuracy + */ + public static final String NM_PORTS_UPDATE_ROUNDS = NM_PREFIX + + "resource.ports-update-rounds"; + public static final int DEFAULT_NM_PORTS_UPDATE_ROUNDS = 10; + + /** Whether to enable ports collection */ + public static final String PORTS_AS_RESOURCE_ENABLE = YARN_PREFIX + + "ports_as_resource.enable"; + public static final boolean DEFAULT_PORTS_AS_RESOURCE_ENABLE = false; /** Percentage of overall CPU which can be allocated for containers. */ public static final String NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java index 6686696..6468f0e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java @@ -31,7 +31,6 @@ ResourceProto proto = ResourceProto.getDefaultInstance(); ResourceProto.Builder builder = null; boolean viaProto = false; - public ResourcePBImpl() { builder = ResourceProto.newBuilder(); } @@ -47,6 +46,21 @@ public ResourceProto getProto() { return proto; } + private synchronized void mergeLocalToBuilder() { + if (this.ports != null) { + builder.setPorts(convertToProtoFormat(this.ports)); + } + } + + private synchronized void mergeLocalToProto() { + if (viaProto){ + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + private void maybeInitBuilder() { if (viaProto || builder == null) { builder = ResourceProto.newBuilder(proto); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java index ef7229c..f0255db 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java @@ -36,6 +36,11 @@ public int compare(Resource unused, Resource lhs, Resource rhs, return Long.compare(lhs.getMemorySize(), rhs.getMemorySize()); } + public int compareWithPorts(Resource clusterResource, Resource lhs, + Resource rhs) { + return compare(clusterResource, lhs, rhs); + } + @Override public long computeAvailableContainers(Resource available, Resource required) { // Only consider memory diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java index 032aa02..0e1ee9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java @@ -93,6 +93,24 @@ public int compare(Resource clusterResource, Resource lhs, Resource rhs, return 0; } + public int compareWithPorts(Resource clusterResource, Resource lhs, + Resource rhs) { + int diff = compare(clusterResource, lhs, rhs); + + if (diff == 0) { + ValueRanges lPorts = lhs.getPorts(); + ValueRanges rPorts = rhs.getPorts(); + if(lPorts == null){ + diff = rPorts == null ? 0 : 1; + } else if (rPorts == null) { + diff = -1; + } else { + diff = lPorts.compareTo(rPorts); + } + } + return diff; + } + /** * Use 'dominant' for now since we only have 2 resources - gives us a slight * performance boost. @@ -117,8 +135,14 @@ protected float getResourceAsValue( @Override public long computeAvailableContainers(Resource available, Resource required) { + if (required.getPorts() != null && required.getPorts().getRangesCount()>0) { + // required ports resource, so we can not allocate more than one container + return Math.min( + Math.min(available.getMemorySize() / required.getMemorySize(), + available.getVirtualCores() / required.getVirtualCores()), 1); + } return Math.min( - available.getMemorySize() / required.getMemorySize(), + available.getMemorySize() / required.getMemorySize(), available.getVirtualCores() / required.getVirtualCores()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java index a2f85b3..633cea3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java @@ -58,6 +58,9 @@ public abstract int compare( public int compare(Resource clusterResource, Resource lhs, Resource rhs) { return compare(clusterResource, lhs, rhs, false); } + public abstract int compareWithPorts(Resource clusterResource, Resource lhs, + Resource rhs); + public static int divideAndCeil(int a, int b) { if (b == 0) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java index 7020300..8d6d6ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java @@ -161,25 +161,67 @@ public static Resource unbounded() { public static Resource clone(Resource res) { return createResource(res.getMemorySize(), res.getVirtualCores()); } + + public static Resource cloneWithPorts(Resource res) { + return createResource(res.getMemory(), res.getVirtualCores(), + res.getPorts()); + } public static Resource addTo(Resource lhs, Resource rhs) { - lhs.setMemorySize(lhs.getMemorySize() + rhs.getMemorySize()); + return addTo(lhs, rhs, true); + } + + public static Resource addToWithPorts(Resource lhs, Resource rhs) { + return addTo(lhs, rhs, false); + } + + public static Resource addTo(Resource lhs, Resource rhs, boolean ignorePorts) { + lhs.setMemory(lhs.getMemory() + rhs.getMemory()); lhs.setVirtualCores(lhs.getVirtualCores() + rhs.getVirtualCores()); + if (!ignorePorts) { + if (lhs.getPorts() != null) { + lhs.setPorts(lhs.getPorts().addSelf(rhs.getPorts())); + } else { + lhs.setPorts(rhs.getPorts()); + } + } return lhs; } public static Resource add(Resource lhs, Resource rhs) { return addTo(clone(lhs), rhs); } + + public static Resource addWithPorts(Resource lhs, Resource rhs) { + return addToWithPorts(cloneWithPorts(lhs), rhs); + } public static Resource subtractFrom(Resource lhs, Resource rhs) { - lhs.setMemorySize(lhs.getMemorySize() - rhs.getMemorySize()); + return subtractFrom(lhs, rhs, true); + } + + public static Resource subtractFromWithPorts(Resource lhs, Resource rhs) { + return subtractFrom(lhs, rhs, false); + } + + public static Resource subtractFrom(Resource lhs, Resource rhs, + boolean ignorePorts) { + lhs.setMemory(lhs.getMemory() - rhs.getMemory()); lhs.setVirtualCores(lhs.getVirtualCores() - rhs.getVirtualCores()); + if (!ignorePorts) { + if (lhs.getPorts() != null) { + lhs.setPorts(lhs.getPorts().minusSelf(rhs.getPorts())); + } + } return lhs; } public static Resource subtract(Resource lhs, Resource rhs) { - return subtractFrom(clone(lhs), rhs); + return subtractFrom(clone(lhs), rhs , true); + } + + public static Resource subtractWithPorts(Resource lhs, Resource rhs) { + return subtractFrom(cloneWithPorts(lhs), rhs , false); } /** @@ -333,6 +375,46 @@ public static Resource max( return resourceCalculator.compare(clusterResource, lhs, rhs) >= 0 ? lhs : rhs; } + public static boolean equalsWithPorts(Resource lhs, Resource rhs) { + return lhs.equalsWithPorts(rhs); + } + + public static boolean lessThanWithPorts( + ResourceCalculator resourceCalculator, + Resource clusterResource, Resource lhs, Resource rhs) { + return (resourceCalculator.compareWithPorts(clusterResource, lhs, rhs) < 0); + } + + public static boolean lessThanOrEqualWithPorts( + ResourceCalculator resourceCalculator, + Resource clusterResource, Resource lhs, Resource rhs) { + return (resourceCalculator.compareWithPorts(clusterResource, lhs, rhs) <= 0); + } + + public static boolean greaterThanWithPorts( + ResourceCalculator resourceCalculator, + Resource clusterResource, Resource lhs, Resource rhs) { + return resourceCalculator.compareWithPorts(clusterResource, lhs, rhs) > 0; + } + + public static boolean greaterThanOrEqualWithPorts( + ResourceCalculator resourceCalculator, Resource clusterResource, + Resource lhs, Resource rhs) { + return resourceCalculator.compareWithPorts(clusterResource, lhs, rhs) >= 0; + } + + public static Resource minWithPorts(ResourceCalculator resourceCalculator, + Resource clusterResource, Resource lhs, Resource rhs) { + return resourceCalculator.compareWithPorts(clusterResource, lhs, rhs) <= 0 ? lhs + : rhs; + } + + public static Resource maxWithPorts(ResourceCalculator resourceCalculator, + Resource clusterResource, Resource lhs, Resource rhs) { + return resourceCalculator.compareWithPorts(clusterResource, lhs, rhs) >= 0 ? lhs + : rhs; + } + public static boolean fitsIn(Resource smaller, Resource bigger) { return smaller.getMemorySize() <= bigger.getMemorySize() && smaller.getVirtualCores() <= bigger.getVirtualCores(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java index 82170b3..bdd09de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java @@ -22,6 +22,11 @@ import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.api.records.ExecutionTimeEstimate; +import org.apache.hadoop.yarn.api.records.NodeLabel; +import org.apache.hadoop.yarn.api.records.QueueStatistics; +import org.apache.hadoop.yarn.api.records.ValueRange; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.junit.Assert; import java.lang.reflect.*; @@ -103,6 +108,23 @@ private static Object genTypeValue(Type type) { ret = map; } } + if (type.equals(ValueRanges.class)) { + ret = ValueRanges.newInstance(); + } + if (type.equals(ValueRange.class)) { + ret = ValueRange.newInstance(0, 0); + } + if (type.equals(ExecutionTimeEstimate.class)) { + ret = ExecutionTimeEstimate.newInstance(); + } + if (type.equals(NodeLabel.class)) { + ret = NodeLabel.newInstance("test"); + } + if (type.equals(QueueStatistics.class)) { + ret = + QueueStatistics.newInstance(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0); + } if (ret == null) { throw new IllegalArgumentException("type " + type + " is not supported"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index b62b4ee..46743fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -1147,4 +1147,14 @@ public void testExecutionTypeRequestPBImpl() throws Exception { validatePBImplRecord(ExecutionTypeRequestPBImpl.class, ExecutionTypeRequestProto.class); } + + @Test + public void testCheckForValueRangePBImpl() throws Exception { + validatePBImplRecord(ValueRangePBImpl.class, ValueRangeProto.class); + } + + @Test + public void testCheckForValueRangesPBImpl() throws Exception { + validatePBImplRecord(ValueRangesPBImpl.class, ValueRangesProto.class); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 00073d8..68968bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -149,6 +150,12 @@ private NMNodeLabelsHandler nodeLabelsHandler; private final NodeLabelsProvider nodeLabelsProvider; + /** + * this parameter is circle controller for updating local allocated ports + * info, since the ports info is big. we can control the update frequency to + * have balance with cluster scale and ports info's accuracy + */ + private int numOfRoundsToUpdatePorts; public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) { @@ -184,7 +191,22 @@ protected void serviceInit(Configuration conf) throws Exception { LOG.info("Nodemanager resources: memory set to " + memoryMb + "MB."); LOG.info("Nodemanager resources: vcores set to " + virtualCores + "."); - this.totalResource = Resource.newInstance(memoryMb, virtualCores); + numOfRoundsToUpdatePorts = + conf.getInt(YarnConfiguration.NM_PORTS_UPDATE_ROUNDS, + YarnConfiguration.DEFAULT_NM_PORTS_UPDATE_ROUNDS); + + enablePortsAsResource = + conf.getBoolean(YarnConfiguration.PORTS_AS_RESOURCE_ENABLE, + YarnConfiguration.DEFAULT_PORTS_AS_RESOURCE_ENABLE); + + ValueRanges ports = null; + if (enablePortsAsResource) { + ports = + ValueRanges.iniFromExpression(conf.get(YarnConfiguration.NM_PORTS, + YarnConfiguration.DEFAULT_NM_PORTS)); + } + + this.totalResource = Resource.newInstance(memoryMb, virtualCores, ports); metrics.addResource(totalResource); // Get actual node physical resources @@ -352,10 +374,21 @@ protected void registerWithRM() // during RM recovery synchronized (this.context) { List containerReports = getNMContainerStatuses(); + Set nodeLabels = null; + if (hasNodeLabelsProvider) { + nodeLabels = nodeLabelsProvider.getNodeLabels(); + nodeLabels = + (null == nodeLabels) ? CommonNodeLabelsManager.EMPTY_NODELABEL_SET + : nodeLabels; + } + ValueRanges ports = null; + if (enablePortsAsResource) { + ports = new PortsInfo().GetAllocatedPorts(); + } RegisterNodeManagerRequest request = RegisterNodeManagerRequest.newInstance(nodeId, httpPort, totalResource, nodeManagerVersionId, containerReports, getRunningApplications(), - nodeLabels, physicalResource); + nodeLabels, physicalResource, ports); if (containerReports != null) { LOG.info("Registering with RM using containers :" + containerReports); } @@ -473,7 +506,7 @@ protected NodeStatus getNodeStatus(int responseId) throws IOException { NodeStatus nodeStatus = NodeStatus.newInstance(nodeId, responseId, containersStatuses, createKeepAliveApplicationList(), nodeHealthStatus, - containersUtilization, nodeUtilization, increasedContainers); + containersUtilization, nodeUtilization, increasedContainers, ports); nodeStatus.setOpportunisticContainersStatus( getOpportunisticContainersStatus()); @@ -760,7 +793,6 @@ public long getRMIdentifier() { } protected void startStatusUpdater() { - statusUpdaterRunnable = new StatusUpdaterRunnable(); statusUpdater = new Thread(statusUpdaterRunnable, "Node Status Updater"); @@ -1032,9 +1064,21 @@ public void run() { // Send heartbeat try { NodeHeartbeatResponse response = null; + ValueRanges lastUpdatePorts = null; + int rounds = 0; Set nodeLabelsForHeartbeat = nodeLabelsHandler.getNodeLabelsForHeartbeat(); NodeStatus nodeStatus = getNodeStatus(lastHeartbeatID); + if (enablePortsAsResource) { + if (rounds++ >= numOfRoundsToUpdatePorts) { + ValueRanges ports = new PortsInfo().GetAllocatedPorts(); + if (lastUpdatePorts == null || !lastUpdatePorts.equals(ports)) { + nodeStatus.setLocalUsedPortsSnapshot(ports); + lastUpdatePorts = ports; + } + rounds = 0; + } + } NodeHeartbeatRequest request = NodeHeartbeatRequest.newInstance(nodeStatus, NodeStatusUpdaterImpl.this.context diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index 40bd610..c6aaa76 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -353,6 +353,10 @@ public RegisterNodeManagerResponse registerNodeManager( response.setNodeAction(NodeAction.SHUTDOWN); return response; } + // reset illegal resource report + if (!this.enablePortsAsResource) { + capability.setPorts(null); + } // check if node's capacity is load from dynamic-resources.xml String nid = nodeId.toString(); @@ -387,8 +391,20 @@ public RegisterNodeManagerResponse registerNodeManager( response.setNMTokenMasterKey(nmTokenSecretManager .getCurrentKey()); + ValueRanges localUsedPorts = null; + if (this.enablePortsAsResource) { + localUsedPorts = request.getLocalUsedPortsSnapshot(); + } + RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort, - resolve(host), capability, nodeManagerVersion, physicalResource); + resolve(host), capability, nodeManagerVersion, physicalResource, localUsedPorts); + if (this.enablePortsAsResource) { + rmNode.setAvailablePorts( + getAvailablePorts( + rmNode.getTotalCapability().getPorts(), + rmNode.getContainerAllocatedPorts(), + rmNode.getLocalUsedPortsSnapshot())); + } RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode); if (oldNode == null) { @@ -608,9 +624,18 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) nodeHeartBeatResponse.setContainerQueuingLimit( this.rmContext.getNodeManagerQueueLimitCalculator() .createContainerQueuingLimit()); - } + ValueRanges availablePorts = null; + if (rmNode.getTotalCapability().getPorts() != null) { + availablePorts = + getAvailablePorts(rmNode.getTotalCapability().getPorts(), + rmNode.getContainerAllocatedPorts(), + rmNode.getLocalUsedPortsSnapshot()); + } + rmNode.setAvailablePorts(availablePorts); + } return nodeHeartBeatResponse; } + } private void setAppCollectorsMapToResponse( List runningApps, NodeHeartbeatResponse response) { @@ -632,6 +657,14 @@ private void setAppCollectorsMapToResponse( response.setAppCollectorsMap(liveAppCollectorsMap); } + private static ValueRanges getAvailablePorts(ValueRanges total, + ValueRanges allocated, ValueRanges localUsed) { + if (total == null) { + return null; + } + return total.minusSelf(allocated).minusSelf(localUsed); + } + private void updateAppCollectorsMap(NodeHeartbeatRequest request) { Map registeredCollectorsMap = request.getRegisteredCollectors(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java index 86f8679..b48567d4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java @@ -187,4 +187,32 @@ public void updateNodeHeartbeatResponseForContainersDecreasing( * @return the decommissioning timeout in second. */ Integer getDecommissioningTimeout(); + + /** + * Get available ports. + * + * @return ports range. + */ + public ValueRanges getAvailablePorts(); + + /** + * update {@link ValueRanges} available ports. + * + * @param use {@link ValueRanges} to update + */ + public void setAvailablePorts(ValueRanges ports); + + /** + * Get container allocated ports. + * + * @return ports range. + */ + public ValueRanges getContainerAllocatedPorts(); + + /** + * update {@link ValueRanges} container allocated ports. + * + * @param use {@link ValueRanges} to update + */ + public void setContainerAllocatedPorts(ValueRanges ports); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 1f121f8..85f591d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -140,6 +140,11 @@ private OpportunisticContainersStatus opportunisticContainersStatus; private final ContainerAllocationExpirer containerAllocationExpirer; + /** Port ranges used in the host. */ + private ValueRanges localUsedPortsSnapshot = null; + private ValueRanges containerAllocatedPorts = null; + private ValueRanges availabelPorts = null; + /* set of containers that have just launched */ private final Set launchedContainers = new HashSet(); @@ -1474,6 +1479,27 @@ private void handleLogAggregationStatus( } @Override + public ValueRanges getAvailablePorts() { + return availabelPorts; + } + + @Override + public void setAvailablePorts(ValueRanges ports) { + this.availabelPorts = ports; + } + + @Override + public ValueRanges getContainerAllocatedPorts() { + return containerAllocatedPorts; + } + + @Override + public void setContainerAllocatedPorts(ValueRanges ports) { + this.containerAllocatedPorts = ports; + } + + } + @Override public List pullNewlyIncreasedContainers() { try { writeLock.lock(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java index 272537c..43bfa22 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java @@ -19,13 +19,12 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import java.util.ArrayList; -import java.util.LinkedList; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; -import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -36,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; @@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.util.resource.Resources; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableSet; @@ -79,8 +80,14 @@ public SchedulerNode(RMNode node, boolean usePortForNodeName, Set labels) { this.rmNode = node; - this.unallocatedResource = Resources.clone(node.getTotalCapability()); - this.totalResource = Resources.clone(node.getTotalCapability()); + Resource capacity = node.getTotalCapability(); + if (capacity != null && capacity.getPorts() != null) { + this.unallocatedResource = Resources.cloneWithPorts(capacity); + this.totalResource = Resources.cloneWithPorts(capacity); + } else { + this.unallocatedResource = Resources.clone(capacity); + this.totalResource = Resources.clone(capacity); + } if (usePortForNodeName) { nodeName = rmNode.getHostName() + ":" + node.getNodeID().getPort(); } else { @@ -188,6 +195,10 @@ public synchronized Resource getUnallocatedResource() { return this.unallocatedResource; } + public synchronized ValueRanges getAvailablePorts() { + return this.rmNode.getAvailablePorts(); + } + /** * Get allocated resources on the node. * @return Allocated resources on the node @@ -266,7 +277,16 @@ public synchronized void containerStarted(ContainerId containerId) { info.launchedOnNode = true; } } - + + private ValueRanges calculateAvailablePorts() { + if (rmNode.getTotalCapability().getPorts() == null) { + return null; + } + return rmNode.getTotalCapability().getPorts() + .minusSelf(rmNode.getContainerAllocatedPorts()) + .minusSelf(rmNode.getLocalUsedPortsSnapshot()); + } + /** * Add unallocated resources to the node. This is used when unallocating a * container. @@ -278,8 +298,16 @@ private synchronized void addUnallocatedResource(Resource resource) { + rmNode.getNodeAddress()); return; } - Resources.addTo(unallocatedResource, resource); - Resources.subtractFrom(allocatedResource, resource); + + if (resource.getPorts() != null) { + Resources.addToWithPorts(unallocatedResource, resource); + Resources.subtractFromWithPorts(allocatedResource, resource); + rmNode.setContainerAllocatedPorts(allocated.getPorts()); + rmNode.setAvailablePorts(calculateAvailablePorts()); + } else { + Resources.addTo(unallocatedResource, resource); + Resources.subtractFrom(allocatedResource, resource); + } } /** @@ -294,8 +322,15 @@ public synchronized void deductUnallocatedResource(Resource resource) { + rmNode.getNodeAddress()); return; } - Resources.subtractFrom(unallocatedResource, resource); - Resources.addTo(allocatedResource, resource); + if (resource.getPorts() != null) { + Resources.subtractFromWithPorts(unallocatedResource, resource); + Resources.addToWithPorts(allocatedResource, resource); + rmNode.setContainerAllocatedPorts(allocated.getPorts()); + rmNode.setAvailablePorts(calculateAvailablePorts()); + } else { + Resources.subtractFrom(unallocatedResource, resource); + Resources.addTo(allocatedResource, resource); + } } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 9059ef0..3522b06 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; @@ -97,6 +98,7 @@ private volatile int nodeLocalityDelay; private volatile int rackLocalityAdditionalDelay; private volatile boolean rackLocalityFullReset; + private boolean enablePortsAsResource; Map applicationAttemptMap = new ConcurrentHashMap<>(); @@ -143,6 +145,10 @@ public LeafQueue(CapacitySchedulerContext cs, // One time initialization is enough since it is static ordering policy this.pendingOrderingPolicy = new FifoOrderingPolicyForPendingApps(); + + this.enablePortsAsResource = + conf.getBoolean(YarnConfiguration.PORTS_AS_RESOURCE_ENABLE, + YarnConfiguration.DEFAULT_PORTS_AS_RESOURCE_ENABLE); if(LOG.isDebugEnabled()) { LOG.debug("LeafQueue:" + " name=" + queueName @@ -1005,7 +1011,7 @@ public CSAssignment assignContainers(Resource clusterResource, ActivityDiagnosticConstant.QUEUE_DO_NOT_NEED_MORE_RESOURCE); return CSAssignment.NULL_ASSIGNMENT; } - + for (Iterator assignmentIterator = orderingPolicy.getAssignmentIterator(); assignmentIterator.hasNext(); ) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java index f753d31..b512215 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java @@ -30,30 +30,28 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; - import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils; -import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; - -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesLogger; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityDiagnosticConstant; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState; - import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSAMContainerLaunchDiagnosticsConstants; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSAssignment; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSetUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SchedulingPlacementSet; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk; +import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; @@ -484,6 +482,19 @@ private ContainerAllocation assignContainersOnNode(Resource clusterResource, ActivityDiagnosticConstant.PRIORITY_SKIPPED); return ContainerAllocation.PRIORITY_SKIPPED; } + + private boolean validatePortsAvailable(ValueRanges availablePorts, + ValueRanges requiredPorts) { + if (availablePorts == null || requiredPorts == null) { + // no ports request + return true; + } + if (requiredPorts.isLessOrEqual(availablePorts)) { + return true; + } else { + return false; + } + } private ContainerAllocation assignContainer(Resource clusterResource, FiCaSchedulerNode node, SchedulerRequestKey schedulerKey, @@ -516,6 +527,14 @@ private ContainerAllocation assignContainer(Resource clusterResource, boolean shouldAllocOrReserveNewContainer = shouldAllocOrReserveNewContainer( schedulerKey, capability); + + if (enablePortsAsResource + && !validatePortsAvailable( + node.getAvailablePorts(), capability.getPorts())) { + LOG.info("no available ports, current available:" + + node.getAvailablePorts() + ", required:" + capability.getPorts()); + return new CSAssignment(Resources.none(), type); + } // Can we allocate a container on this node? long availableContainers = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index 91170d1..d6caa84 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -284,6 +284,24 @@ public Integer getDecommissioningTimeout() { public Resource getPhysicalResource() { return this.physicalResource; } + + @Override + public ValueRanges getAvailablePorts() { + return null; + } + + @Override + public void setAvailablePorts(ValueRanges ports) { + } + + @Override + public ValueRanges getContainerAllocatedPorts() { + return null; + } + + @Override + public void setContainerAllocatedPorts(ValueRanges ports) { + } }; private static RMNode buildRMNode(int rack, final Resource perNode, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 4bc5127..0c45ee1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -52,6 +52,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; @@ -65,6 +66,7 @@ import org.apache.hadoop.yarn.util.resource.Resources; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import org.apache.hadoop.net.Node; import com.google.common.collect.Sets; import org.apache.hadoop.yarn.event.Event; @@ -225,6 +227,32 @@ public static FiCaSchedulerNode getMockNode(String host, String rack, return node; } + public static FiCaSchedulerNode getMockNodeForPortsCaculate(String host, + String rack, int port, int mem, int vCores, ValueRanges ports, + Configuration conf) { + NodeId nodeId = mock(NodeId.class); + when(nodeId.getHost()).thenReturn(host); + when(nodeId.getPort()).thenReturn(port); + RMContext rmContext = mock(RMContext.class); + when(rmContext.getYarnConfiguration()).thenReturn(conf); + Node mockNode = mock(Node.class); + when(mockNode.getNetworkLocation()).thenReturn(rack); + RMNode rmNode = + new RMNodeImpl( + nodeId, + rmContext, + host, + 0, + 0, + mockNode, + Resources.createResource(mem, vCores, ports), + "", + OverAllocationInfo.newInstance(ResourceThresholds.newInstance(1.0f))); + FiCaSchedulerNode node = spy(new FiCaSchedulerNode(rmNode, false)); + LOG.info("node = " + host + " avail=" + node.getUnallocatedResource()); + return node; + } + @SuppressWarnings("deprecation") public static ContainerId getMockContainerId(FiCaSchedulerApp application) { ContainerId containerId = mock(ContainerId.class); -- 1.9.1 From 832d710f63eb331e61c176822c140c0376975945 Mon Sep 17 00:00:00 2001 From: wjlei Date: Wed, 10 May 2017 19:43:31 +0800 Subject: [PATCH 2/3] add missing files --- .../apache/hadoop/yarn/api/records/ValueRange.java | 83 ++++ .../hadoop/yarn/api/records/ValueRanges.java | 333 +++++++++++++++ .../yarn/api/records/impl/pb/ValueRangePBImpl.java | 103 +++++ .../api/records/impl/pb/ValueRangesPBImpl.java | 171 ++++++++ .../hadoop/yarn/api/records/TestValueRanges.java | 167 ++++++++ .../resource/TestResourcesCalculatorWithPorts.java | 257 ++++++++++++ .../scheduler/capacity/TestPortsAllocation.java | 457 +++++++++++++++++++++ 7 files changed, 1571 insertions(+) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ValueRange.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ValueRanges.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ValueRangePBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ValueRangesPBImpl.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestValueRanges.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourcesCalculatorWithPorts.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestPortsAllocation.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ValueRange.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ValueRange.java new file mode 100644 index 0000000..59a7fa1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ValueRange.java @@ -0,0 +1,83 @@ +package org.apache.hadoop.yarn.api.records; + +import org.apache.hadoop.yarn.util.Records; + +public abstract class ValueRange implements Comparable { + + public abstract int getBegin(); + + public abstract int getEnd(); + + public abstract void setBegin(int value); + + public abstract void setEnd(int value); + + public abstract boolean isLessOrEqual(ValueRange other); + + public static ValueRange newInstance(int begin, int end) { + ValueRange valueRange = Records.newRecord(ValueRange.class); + valueRange.setBegin(begin); + valueRange.setEnd(end); + return valueRange; + } + + @Override + public String toString() { + StringBuilder result = new StringBuilder(); + if (getBegin() == getEnd()) { + result.append(getBegin()); + } else { + result.append("[" + getBegin() + "-" + getEnd() + "]"); + } + return result.toString(); + } + + @Override + public int compareTo(ValueRange other) { + if (other == null) { + return -1; + } + + if (getBegin() == other.getBegin() && getEnd() == other.getEnd()) { + return 0; + } else if (getBegin() - other.getBegin() < 0) { + return -1; + } else if (getBegin() - other.getBegin() == 0 + && getEnd() - other.getEnd() < 0) { + return -1; + } else { + return 1; + } + + } + + @Override + public ValueRange clone() { + return ValueRange.newInstance(getBegin(), getEnd()); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (!(obj instanceof ValueRange)) + return false; + ValueRange other = (ValueRange) obj; + if (getBegin() == other.getBegin() && getEnd() == other.getEnd()) { + return true; + } else { + return false; + } + } + + @Override + public int hashCode() { + final int prime = 263167; + int result = 0; + result = prime * result + this.getBegin(); + result = prime * result + this.getEnd(); + return result; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ValueRanges.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ValueRanges.java new file mode 100644 index 0000000..822ae1e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ValueRanges.java @@ -0,0 +1,333 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.api.records; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.yarn.util.Records; + +public abstract class ValueRanges implements Comparable { + + public static ValueRanges newInstance(List rangesList) { + ValueRanges valueRanges = Records.newRecord(ValueRanges.class); + valueRanges.setRangesList(rangesList); + return valueRanges; + } + + public static ValueRanges newInstance() { + ValueRanges valueRanges = Records.newRecord(ValueRanges.class); + return valueRanges; + } + + public abstract List getRangesList(); + + public abstract List getSortedRangesList(); + + public abstract void setRangesList(List rangesList); + + public boolean isLessOrEqual(ValueRanges other) { + if (other == null) { + return false; + } + ValueRanges left = coalesce(this); + ValueRanges right = coalesce(other); + for (ValueRange leftRange : left.getRangesList()) { + boolean matched = false; + for (ValueRange rightRange : right.getRangesList()) { + if (leftRange.isLessOrEqual(rightRange)) { + matched = true; + break; + } + } + if (!matched) { + return false; + } + } + return true; + } + + public ValueRanges add(ValueRanges left, ValueRanges right) { + if (left == null) { + return coalesce(right); + } + if (right == null) { + return coalesce(left); + } + return coalesce(left, right); + } + + public ValueRanges minus(ValueRanges left, ValueRanges right) { + if (left == null) { + return null; + } + if (right == null) { + return coalesce(left); + } + return coalesce(left).minusSelf(right); + } + + public ValueRanges addSelf(ValueRanges other){ + if (other == null) { + return coalesce(this); + } + return coalesce(this, other); + } + + public ValueRanges minusSelf(ValueRanges other) { + if (other == null) { + return this; + } + + List leftList = cloneList(coalesce(this).getRangesList()); + List rightList = coalesce(other).getRangesList(); + int i = 0; + int j = 0; + while (i < leftList.size() && j < rightList.size()) { + ValueRange left = leftList.get(i); + ValueRange right = rightList.get(j); + // 1. no overlap, right is bigger than left + if (left.getEnd() < right.getBegin()) { + i++; + // 2. no overlap, left is bigger than right + } else if (right.getEnd() < left.getBegin()) { + j++; + // 3. has overlap, left is less than right + } else if ((left.getBegin() <= right.getBegin()) + && (left.getEnd() <= right.getEnd())) { + if (left.getBegin() == right.getBegin()) { + leftList.remove(i); + } else { + left.setEnd(right.getBegin() - 1); + } + // 4. has overlap, left is bigger than right + } else if ((left.getBegin() >= right.getBegin()) + && (left.getEnd() >= right.getEnd())) { + if (left.getEnd() == right.getEnd()) { + leftList.remove(i); + } else { + left.setBegin(right.getEnd() + 1); + } + // 5. left contains right + } else if ((left.getBegin() < right.getBegin()) + && (left.getEnd() > right.getEnd())) { + ValueRange newRange = + ValueRange.newInstance(right.getEnd() + 1, left.getEnd()); + leftList.add(i + 1, newRange); + left.setEnd(right.getBegin() - 1); + // 6. right contains left + } else if ((left.getBegin() > right.getBegin()) + && (left.getEnd() < right.getEnd())) { + leftList.remove(i); + } + } + + ValueRanges result = ValueRanges.newInstance(); + result.setRangesList(leftList); + result.setCoalescedFlag(true); + return result; + } + + private ValueRanges coalesce(ValueRanges left, ValueRanges right) { + if (left == null) { + return right; + } + if (right == null) { + return left; + } + List leftList = cloneList(left.getRangesList()); + leftList.addAll(cloneList(right.getRangesList())); + + Collections.sort(leftList); + ValueRanges mergedRanges = ValueRanges.newInstance(coalesceList(leftList)); + mergedRanges.setCoalescedFlag(true); + + return mergedRanges; + } + + private static List coalesceList(List sortedList) { + if (sortedList == null || sortedList.isEmpty()) { + return sortedList; + } + + List resultList = new ArrayList(); + + ValueRange current = sortedList.get(0).clone(); + resultList.add(current); + + // In a single pass, we compute the size of the end result, as well as + // modify + // in place the intermediate data structure to build up result as we + // solve it. + + for (ValueRange range : sortedList) { + // Skip if this range is equivalent to the current range. + if (range.getBegin() == current.getBegin() + && range.getEnd() == current.getEnd()) { + continue; + } + // If the current range just needs to be extended on the right. + if (range.getBegin() == current.getBegin() + && range.getEnd() > current.getEnd()) { + current.setEnd(range.getEnd()); + } else if (range.getBegin() > current.getBegin()) { + // If we are starting farther ahead, then there are 2 cases: + if (range.getBegin() <= current.getEnd() + 1) { + // 1. Ranges are overlapping and we can merge them. + current.setEnd(Math.max(current.getEnd(), range.getEnd())); + } else { + // 2. No overlap and we are adding a new range. + current = range.clone(); + resultList.add(current); + } + } + } + return resultList; + } + + private static ValueRanges coalesce(ValueRanges uranges) { + if (uranges == null) { + return null; + } + + if (uranges.isCoalesced()) { + return uranges; + } + + ValueRanges result = ValueRanges.newInstance(); + if (uranges.getRangesCount() == 0) { + return result; + } + List rangesList = uranges.getSortedRangesList(); + + result.setRangesList(coalesceList(rangesList)); + result.setCoalescedFlag(true); + + return result; + } + + public synchronized static List cloneList(List list) { + List newList = new ArrayList(); + for (ValueRange range : list) { + newList.add(range.clone()); + } + return newList; + } + + public abstract int getRangesCount(); + + /** + * this method is used to check if the valueranges coalesced, coalesced means + * no override parts and well sorted. For example, [1-3],[5-10] is coalesced, + * and [1-4],[3-10] and [5-10].[1-3] is not. + * + * @return true or false + */ + public abstract boolean isCoalesced(); + + public abstract void setCoalescedFlag(boolean flag); + + /** + * initialize the valueranges from expression, we current support[1-3],[5-10] + * style + * + * @param expression + * @return + */ + public static ValueRanges iniFromExpression(String expression) { + ValueRanges valueRanges = Records.newRecord(ValueRanges.class); + String[] items = expression.split(","); + List rangesList = new ArrayList(); + Pattern pattern = Pattern.compile("^\\[(\\d+)\\-(\\d+)\\]$"); + for (String item : items) { + Matcher matcher = pattern.matcher(item); + if (matcher.find()) { + ValueRange rang = + ValueRange.newInstance(Integer.parseInt(matcher.group(1)), + Integer.parseInt(matcher.group(2))); + rangesList.add(rang); + } else { + try { + int num = Integer.parseInt(item); + ValueRange rang = ValueRange.newInstance(num, num); + rangesList.add(rang); + } catch (NumberFormatException e) { + // ignore this num + } + } + } + valueRanges.setRangesList(rangesList); + return valueRanges; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (!(obj instanceof ValueRanges)) + return false; + ValueRanges other = (ValueRanges) obj; + if (this.equals(other)) { + return true; + } else { + return false; + } + } + + public synchronized boolean equals(ValueRanges other) { + if (other == null) { + return false; + } + + ValueRanges left = coalesce(this); + ValueRanges right = coalesce(other); + if (left.getRangesCount() != right.getRangesCount()) { + return false; + } + List leftRange = left.getRangesList(); + List rightRange = right.getRangesList(); + for (int i = 0; i < left.getRangesCount(); i++) { + if (!leftRange.get(i).equals(rightRange.get(i))) { + return false; + } + } + return true; + } + + @Override + public int hashCode() { + return getRangesList().hashCode(); + } + + @Override + public int compareTo(ValueRanges other) { + if (this.equals(other)) { + return 0; + } else if (this.isLessOrEqual(other)) { + return -1; + } else { + return 1; + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ValueRangePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ValueRangePBImpl.java new file mode 100644 index 0000000..f0ba964 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ValueRangePBImpl.java @@ -0,0 +1,103 @@ +package org.apache.hadoop.yarn.api.records.impl.pb; + +import org.apache.hadoop.yarn.api.records.ValueRange; +import org.apache.hadoop.yarn.proto.YarnProtos.ValueRangeProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ValueRangeProtoOrBuilder; + +public class ValueRangePBImpl extends ValueRange { + + ValueRangeProto proto = ValueRangeProto.getDefaultInstance(); + ValueRangeProto.Builder builder = null; + boolean viaProto = false; + int begin, end = -1; + + public ValueRangePBImpl(ValueRangeProto proto) { + this.proto = proto; + viaProto = true; + } + + public ValueRangePBImpl() { + } + + public ValueRangeProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int getBegin() { + initLocalRange(); + return begin; + } + + @Override + public int getEnd() { + initLocalRange(); + return end; + } + + @Override + public void setBegin(int value) { + begin = value; + } + + @Override + public void setEnd(int value) { + end = value; + } + + @Override + public boolean isLessOrEqual(ValueRange other) { + if (this.getBegin() >= other.getBegin() && this.getEnd() <= other.getEnd()) { + return true; + } + return false; + } + + private void maybeInitBuilder() { + if (viaProto) { + builder = ValueRangeProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void mergeLocalToBuilder() { + if (begin != -1 && end != -1) { + addRangeToProto(); + } + } + + private void addRangeToProto() { + maybeInitBuilder(); + if (begin == -1 && end == -1) + return; + if (builder == null) { + builder = ValueRangeProto.newBuilder(); + } + builder.setBegin(begin); + builder.setEnd(end); + } + + private void initLocalRange() { + if (begin != -1 && end != -1) { + return; + } + if (!viaProto && builder == null) { + builder = ValueRangeProto.newBuilder(); + } + ValueRangeProtoOrBuilder p = viaProto ? proto : builder; + begin = p.getBegin(); + end = p.getEnd(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ValueRangesPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ValueRangesPBImpl.java new file mode 100644 index 0000000..5b5fcab --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ValueRangesPBImpl.java @@ -0,0 +1,171 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.api.records.impl.pb; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; + +import org.apache.hadoop.yarn.api.records.ValueRange; +import org.apache.hadoop.yarn.api.records.ValueRanges; +import org.apache.hadoop.yarn.proto.YarnProtos.ValueRangeProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ValueRangesProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ValueRangesProtoOrBuilder; + +public class ValueRangesPBImpl extends ValueRanges { + + ValueRangesProto proto = ValueRangesProto.getDefaultInstance(); + ValueRangesProto.Builder builder = null; + boolean viaProto = false; + List ranges = null; + List unmodifiableRanges = null; + boolean isCoalesced = false; + + public ValueRangesPBImpl(ValueRangesProto proto) { + this.proto = proto; + viaProto = true; + } + + public ValueRangesPBImpl() { + builder = ValueRangesProto.newBuilder(); + } + + public ValueRangesProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + public boolean isCoalesced() { + return isCoalesced; + } + + public void setCoalescedFlag(boolean flag) { + isCoalesced = flag; + } + + @Override + public String toString() { + StringBuilder result = new StringBuilder(); + int i = 0; + List list = getRangesList(); + for (ValueRange range : list) { + if (i != list.size() - 1) { + result.append(range.toString() + ","); + } else { + result.append(range.toString()); + } + i++; + } + return result.toString(); + } + + @Override + public synchronized int getRangesCount() { + initLocalRanges(); + return getRangesList().size(); + } + + private void assignRanges(List value) { + List newList = new ArrayList(); + for (ValueRange range : value) { + newList.add(range.clone()); + } + ranges = newList; + unmodifiableRanges = Collections.unmodifiableList(value); + } + + private void initLocalRanges() { + if (this.ranges != null) { + return; + } + ValueRangesProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getRangesList(); + List tempRanges = new ArrayList(); + for (ValueRangeProto a : list) { + tempRanges.add(convertFromProtoFormat(a)); + } + assignRanges(tempRanges); + } + + private static ValueRangePBImpl convertFromProtoFormat(ValueRangeProto a) { + return new ValueRangePBImpl(a); + } + + private static ValueRangeProto convertToProtoFormat(ValueRange t) { + return ((ValueRangePBImpl) t).getProto(); + } + + @Override + public synchronized List getSortedRangesList() { + initLocalRanges(); + List newList = cloneList(this.getRangesList()); + Collections.sort(newList); + return newList; + } + + @Override + public synchronized List getRangesList() { + initLocalRanges(); + return unmodifiableRanges; + } + + public synchronized void setRangesList(List rangesList) { + if (rangesList == null) { + builder.clearRanges(); + } + assignRanges(rangesList); + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ValueRangesProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.ranges != null) { + addRangesToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void addRangesToProto() { + maybeInitBuilder(); + if (ranges == null || ranges.isEmpty()) { + builder.clearRanges(); + return; + } + List list = new LinkedList<>(); + for (ValueRange range : ranges) { + list.add(convertToProtoFormat(range)); + } + builder.clearRanges(); + builder.addAllRanges(list); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestValueRanges.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestValueRanges.java new file mode 100644 index 0000000..cdb4758 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestValueRanges.java @@ -0,0 +1,167 @@ +package org.apache.hadoop.yarn.api.records; + +import org.junit.Assert; +import org.junit.Test; + +public class TestValueRanges { + @Test(timeout = 2000) + public void testValueRnagesBasicOperation() { + + // Equal Test + ValueRanges lhs = ValueRanges.iniFromExpression("[1-3]"); + ValueRanges rhs = ValueRanges.iniFromExpression("[1-3]"); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + lhs.equals(rhs)); + + lhs = ValueRanges.iniFromExpression("[1-3]"); + rhs = ValueRanges.iniFromExpression("[1-10]"); + Assert.assertEquals("Equal operation is wrongly calculated.", false, + lhs.equals(rhs)); + + lhs = ValueRanges.iniFromExpression("1,2,3,4,5,6,7,8,9,10"); + rhs = ValueRanges.iniFromExpression("[1-10]"); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + lhs.equals(rhs)); + + // Add Test + lhs = + ValueRanges.iniFromExpression("[1-3]").addSelf( + ValueRanges.iniFromExpression("[4-10]")); + rhs = ValueRanges.iniFromExpression("[1-10]"); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + lhs.equals(rhs)); + + lhs = + ValueRanges.iniFromExpression("[1-3]").addSelf( + ValueRanges.iniFromExpression("[7-10]")); + rhs = ValueRanges.iniFromExpression("[1-3],[7-10]"); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + lhs.equals(rhs)); + + lhs = + ValueRanges.newInstance().addSelf( + ValueRanges.iniFromExpression("[1-10]")); + rhs = ValueRanges.iniFromExpression("[1-10]"); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + lhs.equals(rhs)); + + // Minus Test + lhs = ValueRanges.iniFromExpression("[1-3]"); + rhs = + ValueRanges.iniFromExpression("[1-10]").minusSelf( + ValueRanges.iniFromExpression("[4-10]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + lhs.equals(rhs)); + + lhs = ValueRanges.iniFromExpression("[1-3]"); + rhs = + ValueRanges.iniFromExpression("[1-10]").minusSelf( + ValueRanges.iniFromExpression("[3-10]")); + Assert.assertEquals("Equal operation is wrongly calculated.", false, + lhs.equals(rhs)); + + lhs = ValueRanges.iniFromExpression("[1-3]"); + rhs = + ValueRanges.iniFromExpression("[1-10]").minusSelf( + ValueRanges.iniFromExpression("[3-10]")); + Assert.assertEquals("Equal operation is wrongly calculated.", false, + lhs.equals(rhs)); + + lhs = ValueRanges.iniFromExpression("[1-3],[5-10]"); + rhs = + ValueRanges.iniFromExpression("[1-10]").minusSelf( + ValueRanges.iniFromExpression("4")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + lhs.equals(rhs)); + + lhs = ValueRanges.iniFromExpression("[1-3],[6-10]"); + rhs = + ValueRanges.iniFromExpression("[1-10]").minusSelf( + ValueRanges.iniFromExpression("[4-5]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + lhs.equals(rhs)); + + lhs = ValueRanges.iniFromExpression("[1-3],[10-20]"); + rhs = + ValueRanges.iniFromExpression("[1-10]").minusSelf( + ValueRanges.iniFromExpression("[4-10]")); + Assert.assertEquals("Equal operation is wrongly calculated.", false, + rhs.equals(lhs)); + + rhs = + ValueRanges.iniFromExpression("[3-5]").minusSelf( + ValueRanges.iniFromExpression("[1-10]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + rhs.equals(ValueRanges.newInstance())); + + lhs = ValueRanges.iniFromExpression("[10-20]"); + rhs = + ValueRanges.iniFromExpression("[8-20]").minusSelf( + ValueRanges.iniFromExpression("[1-9]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + rhs.equals(lhs)); + + lhs = ValueRanges.iniFromExpression("[1-3],[10-20]"); + rhs = + ValueRanges.iniFromExpression("[1-7],[9-20]").minusSelf( + ValueRanges.iniFromExpression("[4-9]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + rhs.equals(lhs)); + + lhs = ValueRanges.iniFromExpression("[1-3],[10-20],[40-80],[95-100]"); + rhs = ValueRanges.iniFromExpression("[1-100]"); + // 1. left contains right + rhs = rhs.minusSelf(ValueRanges.iniFromExpression("[4-6],[21-30]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + rhs.equals(ValueRanges.iniFromExpression("[1-3],[7-20],[31-100]"))); + // 2. has overlap, left is bigger than right + rhs = rhs.minusSelf(ValueRanges.iniFromExpression("[4-9],[93-94]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + rhs.equals(ValueRanges + .iniFromExpression("[1-3],[10-20],[31-92],[95-100]"))); + // 3. has overlap, left is less than right + rhs = rhs.minusSelf(ValueRanges.iniFromExpression("[81-83],[91-94]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + rhs.equals(ValueRanges + .iniFromExpression("[1-3],[10-20],[31-80],[84-90],[95-100]"))); + // 4. right contains left, and no overlap + rhs = rhs.minusSelf(ValueRanges.iniFromExpression("[81-93],94,[101-103]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + rhs.equals(ValueRanges + .iniFromExpression("[1-3],[10-20],[31-80],[95-100]"))); + + rhs = rhs.minusSelf(ValueRanges.iniFromExpression("[31-39],[81-94]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + rhs.equals(lhs)); + + // Less Test + lhs = ValueRanges.iniFromExpression("[1-3]"); + rhs = + ValueRanges.iniFromExpression("[1-10]").minusSelf( + ValueRanges.iniFromExpression("[3-10]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + rhs.isLessOrEqual(lhs)); + + lhs = ValueRanges.newInstance(); + rhs = + ValueRanges.iniFromExpression("[1-10]").minusSelf( + ValueRanges.iniFromExpression("[1-10]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + rhs.equals(lhs)); + + lhs = ValueRanges.iniFromExpression("[1-3],[10-20]"); + rhs = + ValueRanges.iniFromExpression("[1-20]").minusSelf( + ValueRanges.iniFromExpression("[4-5]")); + Assert.assertEquals("Equal operation is wrongly calculated.", false, + rhs.isLessOrEqual(lhs)); + + lhs = ValueRanges.iniFromExpression("[1-3],[10-20]"); + rhs = + ValueRanges.iniFromExpression("[1-20]").minusSelf( + ValueRanges.iniFromExpression("[4-10]")); + Assert.assertEquals("Equal operation is wrongly calculated.", true, + rhs.isLessOrEqual(lhs)); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourcesCalculatorWithPorts.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourcesCalculatorWithPorts.java new file mode 100644 index 0000000..d9bb1dc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourcesCalculatorWithPorts.java @@ -0,0 +1,257 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.util.resource; + +import java.util.Arrays; +import java.util.Collection; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ValueRanges; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@RunWith(Parameterized.class) +public class TestResourcesCalculatorWithPorts { + private ResourceCalculator resourceCalculator; + + @Parameterized.Parameters + public static Collection getParameters() { + return Arrays.asList(new ResourceCalculator[][] { + { new DefaultResourceCalculator() }, + { new DominantResourceCalculator() } }); + } + + public TestResourcesCalculatorWithPorts(ResourceCalculator rs) { + this.resourceCalculator = rs; + } + + @Test(timeout = 10000) + public void testResourceCalculatorCompareMethodWithPorts() { + Resource clusterResource = Resource.newInstance(0, 0); + + // For lhs == rhs + Resource lhs = + Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[1-3]")); + Resources.addToWithPorts(lhs, + Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[4-10]"))); + Resource rhs = + Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, true, + false, true, lhs, lhs); + + // For lhs == rhs + lhs = Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[1-3]")); + lhs = + Resources + .addWithPorts( + lhs, + Resource.newInstance(1, 1, + ValueRanges.iniFromExpression("[4-10]"))); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, true, + false, true, lhs, lhs); + + // For lhs == rhs + lhs = Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[1-3]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + Resources.subtractFromWithPorts(rhs, + Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[4-10]"))); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, true, + false, true, lhs, lhs); + + // For lhs == rhs + lhs = Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[1-3]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + rhs = + Resources + .subtractWithPorts( + rhs, + Resource.newInstance(1, 1, + ValueRanges.iniFromExpression("[4-10]"))); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, true, + false, true, lhs, lhs); + + // For lhs == rhs + lhs = Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[1-3]")); + rhs = Resources.cloneWithPorts(lhs); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, true, + false, true, lhs, lhs); + + if (!(resourceCalculator instanceof DominantResourceCalculator)) { + return; + } + + // dominant resource types + // For lhs > rhs + lhs = Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[1-3]")); + Resources.addToWithPorts(lhs, + Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[4-11]"))); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, false, + true, true, lhs, rhs); + + // For lhs > rhs + lhs = Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[1-3]")); + lhs = + Resources + .addWithPorts( + lhs, + Resource.newInstance(1, 1, + ValueRanges.iniFromExpression("[4-11]"))); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, false, + true, true, lhs, rhs); + + // For lhs > rhs + lhs = Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[1-3]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + Resources.subtractFromWithPorts(rhs, + Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[3-10]"))); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, false, + true, true, lhs, rhs); + + // For lhs > rhs + lhs = Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[1-3]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + rhs = + Resources + .subtractWithPorts( + rhs, + Resource.newInstance(1, 1, + ValueRanges.iniFromExpression("[3-10]"))); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, false, + true, true, lhs, rhs); + } + + @Test(timeout = 10000) + public void testResourceCalculatorCompareMethodWithPorts2() { + Resource clusterResource = Resource.newInstance(0, 0); + + // For lhs == rhs + Resource lhs = + Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[1-3]")); + Resource rhs = + Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[1-3]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, true, + false, true, lhs, lhs); + + // lhs > rhs + lhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + rhs = Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[3-5]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, false, + true, true, lhs, rhs); + + // For lhs < rhs + lhs = Resource.newInstance(0, 0, ValueRanges.iniFromExpression("[3-5]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, true, true, + false, false, rhs, lhs); + + if (!(resourceCalculator instanceof DominantResourceCalculator)) { + return; + } + + // verify for 2 dimensional resources i.e memory and cpu + // dominant resource types + lhs = Resource.newInstance(1, 0, ValueRanges.iniFromExpression("[1-10]")); + rhs = Resource.newInstance(0, 1, ValueRanges.iniFromExpression("[1-10]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, true, + false, true, lhs, lhs); + + lhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, true, + false, true, lhs, rhs); + + lhs = Resource.newInstance(0, 1, ValueRanges.iniFromExpression("[1-10]")); + rhs = Resource.newInstance(1, 0, ValueRanges.iniFromExpression("[1-10]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, true, + false, true, lhs, lhs); + + lhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[2-10]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-5]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, false, + true, true, lhs, rhs); + + lhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-20]")); + rhs = Resource.newInstance(1, 0, ValueRanges.iniFromExpression("[1-10]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, false, + true, true, lhs, rhs); + + lhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-20]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, true, true, + false, false, rhs, lhs); + + lhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-10]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[2-8]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, false, false, + true, true, lhs, rhs); + + lhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[2-10]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-20]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, true, true, + false, false, rhs, lhs); + + lhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[2-10]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-20]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, true, true, + false, false, rhs, lhs); + + lhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[2-10]")); + rhs = Resource.newInstance(1, 1, ValueRanges.iniFromExpression("[1-20]")); + assertResourcesOperationsWithPorts(clusterResource, lhs, rhs, true, true, + false, false, rhs, lhs); + + } + + private void assertResourcesOperationsWithPorts(Resource clusterResource, + Resource lhs, Resource rhs, boolean lessThan, boolean lessThanOrEqual, + boolean greaterThan, boolean greaterThanOrEqual, Resource max, + Resource min) { + + Assert.assertEquals("Less Than operation is wrongly calculated.", lessThan, + Resources.lessThanWithPorts(resourceCalculator, clusterResource, lhs, + rhs)); + + Assert.assertEquals( + "Less Than Or Equal To operation is wrongly calculated.", + lessThanOrEqual, Resources.lessThanOrEqualWithPorts(resourceCalculator, + clusterResource, lhs, rhs)); + + Assert.assertEquals("Greater Than operation is wrongly calculated.", + greaterThan, Resources.greaterThanWithPorts(resourceCalculator, + clusterResource, lhs, rhs)); + + Assert.assertEquals( + "Greater Than Or Equal To operation is wrongly calculated.", + greaterThanOrEqual, Resources.greaterThanOrEqualWithPorts( + resourceCalculator, clusterResource, lhs, rhs)); + + Assert.assertEquals("Max(value) Operation wrongly calculated.", max, + Resources.maxWithPorts(resourceCalculator, clusterResource, lhs, rhs)); + + Assert.assertEquals("Min(value) operation is wrongly calculated.", min, + Resources.minWithPorts(resourceCalculator, clusterResource, lhs, rhs)); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestPortsAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestPortsAllocation.java new file mode 100644 index 0000000..9e6f316 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestPortsAllocation.java @@ -0,0 +1,457 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.ConcurrentModificationException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CyclicBarrier; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerExitStatus; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.QueueACL; +import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.ValueRanges; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.factories.RecordFactory; +import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FifoOrderingPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.SchedulableEntity; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy; +import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; +import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; +import org.apache.hadoop.yarn.util.resource.ResourceCalculator; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Matchers; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class TestPortsAllocation { + + private static final Log LOG = LogFactory.getLog(TestLeafQueue.class); + + private final RecordFactory recordFactory = + RecordFactoryProvider.getRecordFactory(null); + + RMContext rmContext; + RMContext spyRMContext; + ResourceRequest amResourceRequest; + CapacityScheduler cs; + CapacitySchedulerConfiguration csConf; + CapacitySchedulerContext csContext; + + CSQueue root; + Map queues = new HashMap(); + + final static int GB = 1024; + final static String DEFAULT_RACK = "/default"; + + private final ResourceCalculator resourceCalculator = + new DominantResourceCalculator(); + + @Before + public void setUp() throws Exception { + CapacityScheduler spyCs = new CapacityScheduler(); + cs = spy(spyCs); + rmContext = TestUtils.getMockRMContext(); + spyRMContext = spy(rmContext); + + ConcurrentMap spyApps = + spy(new ConcurrentHashMap()); + RMApp rmApp = mock(RMApp.class); + when(rmApp.getRMAppAttempt((ApplicationAttemptId)Matchers.any())).thenReturn(null); + amResourceRequest = mock(ResourceRequest.class); + when(amResourceRequest.getCapability()).thenReturn( + Resources.createResource(0, 0)); + when(rmApp.getAMResourceRequest()).thenReturn(amResourceRequest); + Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId)Matchers.any()); + when(spyRMContext.getRMApps()).thenReturn(spyApps); + + csConf = + new CapacitySchedulerConfiguration(); + csConf.setBoolean("yarn.scheduler.capacity.user-metrics.enable", true); + csConf.setBoolean(YarnConfiguration.PORTS_AS_RESOURCE_ENABLE, true); + final String newRoot = "root" + System.currentTimeMillis(); + setupQueueConfiguration(csConf, newRoot); + YarnConfiguration conf = new YarnConfiguration(); + cs.setConf(conf); + + csContext = mock(CapacitySchedulerContext.class); + when(csContext.getConfiguration()).thenReturn(csConf); + when(csContext.getConf()).thenReturn(conf); + when(csContext.getMinimumResourceCapability()). + thenReturn(Resources.createResource(GB, 1)); + when(csContext.getMaximumResourceCapability()). + thenReturn(Resources.createResource(16*GB, 32)); + when(csContext.getClusterResource()). + thenReturn(Resources.createResource(100 * 16 * GB, 100 * 32)); + when(csContext.getApplicationComparator()). + thenReturn(CapacityScheduler.applicationComparator); + when(csContext.getNonPartitionedQueueComparator()). + thenReturn(CapacityScheduler.nonPartitionedQueueComparator); + when(csContext.getResourceCalculator()). + thenReturn(resourceCalculator); + when(csContext.getRMContext()).thenReturn(rmContext); + RMContainerTokenSecretManager containerTokenSecretManager = + new RMContainerTokenSecretManager(conf); + containerTokenSecretManager.rollMasterKey(); + when(csContext.getContainerTokenSecretManager()).thenReturn( + containerTokenSecretManager); + + root = + CapacityScheduler.parseQueue(csContext, csConf, null, + CapacitySchedulerConfiguration.ROOT, + queues, queues, + TestUtils.spyHook); + + cs.setRMContext(spyRMContext); + cs.init(csConf); + cs.start(); + + when(spyRMContext.getScheduler()).thenReturn(cs); + when(spyRMContext.getYarnConfiguration()) + .thenReturn(new YarnConfiguration()); + when(cs.getNumClusterNodes()).thenReturn(3); + } + + private static final String A = "a"; + private static final String B = "b"; + private static final String C = "c"; + private static final String C1 = "c1"; + private static final String D = "d"; + private static final String E = "e"; + private void setupQueueConfiguration( + CapacitySchedulerConfiguration conf, + final String newRoot) { + + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {newRoot}); + conf.setMaximumCapacity(CapacitySchedulerConfiguration.ROOT, 100); + conf.setAcl(CapacitySchedulerConfiguration.ROOT, + QueueACL.SUBMIT_APPLICATIONS, " "); + + final String Q_newRoot = CapacitySchedulerConfiguration.ROOT + "." + newRoot; + conf.setQueues(Q_newRoot, new String[] {A, B, C, D, E}); + conf.setCapacity(Q_newRoot, 100); + conf.setMaximumCapacity(Q_newRoot, 100); + conf.setAcl(Q_newRoot, QueueACL.SUBMIT_APPLICATIONS, " "); + + final String Q_A = Q_newRoot + "." + A; + conf.setCapacity(Q_A, 8.5f); + conf.setMaximumCapacity(Q_A, 20); + conf.setAcl(Q_A, QueueACL.SUBMIT_APPLICATIONS, "*"); + + final String Q_B = Q_newRoot + "." + B; + conf.setCapacity(Q_B, 80); + conf.setMaximumCapacity(Q_B, 99); + conf.setAcl(Q_B, QueueACL.SUBMIT_APPLICATIONS, "*"); + + final String Q_C = Q_newRoot + "." + C; + conf.setCapacity(Q_C, 1.5f); + conf.setMaximumCapacity(Q_C, 10); + conf.setAcl(Q_C, QueueACL.SUBMIT_APPLICATIONS, " "); + + conf.setQueues(Q_C, new String[] {C1}); + + final String Q_C1 = Q_C + "." + C1; + conf.setCapacity(Q_C1, 100); + + final String Q_D = Q_newRoot + "." + D; + conf.setCapacity(Q_D, 9); + conf.setMaximumCapacity(Q_D, 11); + conf.setAcl(Q_D, QueueACL.SUBMIT_APPLICATIONS, "user_d"); + + final String Q_E = Q_newRoot + "." + E; + conf.setCapacity(Q_E, 1); + conf.setMaximumCapacity(Q_E, 1); + conf.setAcl(Q_E, QueueACL.SUBMIT_APPLICATIONS, "user_e"); + + } + + static LeafQueue stubLeafQueue(LeafQueue queue) { + + // Mock some methods for ease in these unit tests + + // 1. LeafQueue.createContainer to return dummy containers + doAnswer( + new Answer() { + @Override + public Container answer(InvocationOnMock invocation) + throws Throwable { + final FiCaSchedulerApp application = + (FiCaSchedulerApp)(invocation.getArguments()[0]); + final ContainerId containerId = + TestUtils.getMockContainerId(application); + + Container container = TestUtils.getMockContainer( + containerId, + ((FiCaSchedulerNode)(invocation.getArguments()[1])).getNodeID(), + (Resource)(invocation.getArguments()[2]), + ((Priority)invocation.getArguments()[3])); + return container; + } + } + ). + when(queue).createContainer( + any(FiCaSchedulerApp.class), + any(FiCaSchedulerNode.class), + any(Resource.class), + any(Priority.class) + ); + + // 2. Stub out LeafQueue.parent.completedContainer + CSQueue parent = queue.getParent(); + doNothing().when(parent).completedContainer( + any(Resource.class), any(FiCaSchedulerApp.class), any(FiCaSchedulerNode.class), + any(RMContainer.class), any(ContainerStatus.class), + any(RMContainerEventType.class), any(CSQueue.class), anyBoolean()); + + return queue; + } + + public boolean hasQueueACL(List aclInfos, QueueACL acl) { + for (QueueUserACLInfo aclInfo : aclInfos) { + if (aclInfo.getUserAcls().contains(acl)) { + return true; + } + } + return false; + } + + @Test + public void testFifoAssignmentWithPorts() throws Exception { + + LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A)); + + a.setOrderingPolicy(new FifoOrderingPolicy()); + + String host_0_0 = "127.0.0.1"; + String rack_0 = "rack_0"; + FiCaSchedulerNode node_0_0 = + TestUtils.getMockNodeForPortsCaculate(host_0_0, rack_0, 0, 16 * GB, 10, + ValueRanges.iniFromExpression("[1-100]"),csConf); + + final int numNodes = 4; + Resource clusterResource = + Resources.createResource(numNodes * (16 * GB), numNodes * 16, + ValueRanges.iniFromExpression("[1-100]")); + when(csContext.getNumClusterNodes()).thenReturn(numNodes); + + String user_0 = "user_0"; + + final ApplicationAttemptId appAttemptId_0 = + TestUtils.getMockApplicationAttemptId(0, 0); + FiCaSchedulerApp app_0 = + spy(new FiCaSchedulerApp(appAttemptId_0, user_0, a, + mock(ActiveUsersManager.class), spyRMContext)); + a.submitApplicationAttempt(app_0, user_0); + + final ApplicationAttemptId appAttemptId_1 = + TestUtils.getMockApplicationAttemptId(1, 0); + FiCaSchedulerApp app_1 = + spy(new FiCaSchedulerApp(appAttemptId_1, user_0, a, + mock(ActiveUsersManager.class), spyRMContext)); + a.submitApplicationAttempt(app_1, user_0); + + Priority priority = TestUtils.createMockPriority(1); + List app_0_requests_0 = new ArrayList(); + List app_1_requests_0 = new ArrayList(); + + app_0_requests_0.clear(); + app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, + 2 * GB, ValueRanges.iniFromExpression("[1-10]"), 1, true, priority, + recordFactory)); + app_0.updateResourceRequests(app_0_requests_0); + + app_1_requests_0.clear(); + app_1_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, + 1 * GB, ValueRanges.iniFromExpression("[11-15]"), 1, true, priority, + recordFactory)); + app_1.updateResourceRequests(app_1_requests_0); + + a.assignContainers(clusterResource, node_0_0, new ResourceLimits( + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + Assert.assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); + //check the available port + Assert.assertEquals(ValueRanges.iniFromExpression("[11-100]"), node_0_0.getAvailablePorts()); + a.assignContainers(clusterResource, node_0_0, new ResourceLimits( + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + Assert.assertEquals(1 * GB, app_1.getCurrentConsumption().getMemory()); + //check the available port + Assert.assertEquals(ValueRanges.iniFromExpression("[16-100]"), node_0_0.getAvailablePorts()); + + + app_0_requests_0.clear(); + app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, + 1 * GB, 1, true, priority, recordFactory)); + app_0.updateResourceRequests(app_0_requests_0); + + app_1_requests_0.clear(); + app_1_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, + 1 * GB, 1, true, priority, recordFactory)); + app_1.updateResourceRequests(app_1_requests_0); + + // Even thought it already has more resources, app_0 will still get + // assigned first + a.assignContainers(clusterResource, node_0_0, new ResourceLimits( + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + Assert.assertEquals(3 * GB, app_0.getCurrentConsumption().getMemory()); + Assert.assertEquals(1 * GB, app_1.getCurrentConsumption().getMemory()); + //check the available port + Assert.assertEquals(ValueRanges.iniFromExpression("[16-100]"), node_0_0.getAvailablePorts()); + + // and only then will app_1 + a.assignContainers(clusterResource, node_0_0, new ResourceLimits( + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + Assert.assertEquals(2 * GB, app_1.getCurrentConsumption().getMemory()); + //check the available port + Assert.assertEquals(ValueRanges.iniFromExpression("[16-100]"), node_0_0.getAvailablePorts()); + } + + @Test + public void testFifoAssignmentWithPorts2() throws Exception { + + LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A)); + + a.setOrderingPolicy(new FifoOrderingPolicy()); + + String host_0_0 = "127.0.0.1"; + String rack_0 = "rack_0"; + FiCaSchedulerNode node_0_0 = + TestUtils.getMockNodeForPortsCaculate(host_0_0, rack_0, 0, 16 * GB, 10, + ValueRanges.iniFromExpression("[1-100]"), csConf); + + final int numNodes = 4; + Resource clusterResource = + Resources.createResource(numNodes * (16 * GB), numNodes * 16, + ValueRanges.iniFromExpression("[1-100]")); + when(csContext.getNumClusterNodes()).thenReturn(numNodes); + + String user_0 = "user_0"; + + final ApplicationAttemptId appAttemptId_0 = + TestUtils.getMockApplicationAttemptId(0, 0); + FiCaSchedulerApp app_0 = + spy(new FiCaSchedulerApp(appAttemptId_0, user_0, a, + mock(ActiveUsersManager.class), spyRMContext)); + a.submitApplicationAttempt(app_0, user_0); + + final ApplicationAttemptId appAttemptId_1 = + TestUtils.getMockApplicationAttemptId(1, 0); + FiCaSchedulerApp app_1 = + spy(new FiCaSchedulerApp(appAttemptId_1, user_0, a, + mock(ActiveUsersManager.class), spyRMContext)); + a.submitApplicationAttempt(app_1, user_0); + + Priority priority = TestUtils.createMockPriority(1); + List app_0_requests_0 = new ArrayList(); + List app_1_requests_0 = new ArrayList(); + + app_0_requests_0.clear(); + app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, + 2 * GB, ValueRanges.iniFromExpression("[1-10]"), 1, true, priority, + recordFactory)); + app_0.updateResourceRequests(app_0_requests_0); + + app_1_requests_0.clear(); + app_1_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, + 1 * GB, ValueRanges.iniFromExpression("[8-15]"), 1, true, priority, + recordFactory)); + app_1.updateResourceRequests(app_1_requests_0); + + a.assignContainers(clusterResource, node_0_0, new ResourceLimits( + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + // app_0 should allocate successfully + Assert.assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); + + a.assignContainers(clusterResource, node_0_0, new ResourceLimits( + clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); + // app_1 should not be allocated as ports conflict + Assert.assertEquals(0, app_1.getCurrentConsumption().getMemory()); + // check the available port + Assert.assertEquals(ValueRanges.iniFromExpression("[11-100]"), + node_0_0.getAvailablePorts()); + + } + + + @After + public void tearDown() throws Exception { + if (cs != null) { + cs.stop(); + } + } +} -- 1.9.1 From f12d3504379ce4e1fd0b68fe4392e4b3da0bcf49 Mon Sep 17 00:00:00 2001 From: wjlei Date: Thu, 11 May 2017 13:12:38 +0800 Subject: [PATCH 3/3] pass compile --- .../hadoop/yarn/sls/nodemanager/NodeInfo.java | 7 +- .../hadoop/yarn/sls/scheduler/RMNodeWrapper.java | 11 ++++ .../apache/hadoop/yarn/api/records/Resource.java | 37 +++++++++-- .../apache/hadoop/yarn/conf/YarnConfiguration.java | 5 ++ .../src/main/proto/yarn_protos.proto | 10 +++ .../yarn/api/records/impl/pb/ResourcePBImpl.java | 36 +++++++++- .../org/apache/hadoop/yarn/util/PortsInfo.java | 77 ++++++++++++++++++++++ .../util/resource/DominantResourceCalculator.java | 1 + .../hadoop/yarn/util/resource/Resources.java | 38 ++++++++++- .../hadoop/yarn/api/BasePBImplRecordsTest.java | 4 -- .../apache/hadoop/yarn/api/TestPBImplRecords.java | 7 ++ .../RegisterNodeManagerRequest.java | 15 +++++ .../impl/pb/RegisterNodeManagerRequestPBImpl.java | 37 +++++++++++ .../hadoop/yarn/server/api/records/NodeStatus.java | 5 ++ .../api/records/impl/pb/NodeStatusPBImpl.java | 38 +++++++++++ .../src/main/proto/yarn_server_common_protos.proto | 1 + .../proto/yarn_server_common_service_protos.proto | 1 + .../server/nodemanager/NodeStatusUpdaterImpl.java | 19 ++---- .../resourcemanager/ResourceTrackerService.java | 7 +- .../yarn/server/resourcemanager/rmnode/RMNode.java | 17 ++++- .../server/resourcemanager/rmnode/RMNodeImpl.java | 21 +++++- .../resourcemanager/scheduler/SchedulerNode.java | 4 +- .../scheduler/capacity/LeafQueue.java | 5 -- .../allocator/RegularContainerAllocator.java | 8 ++- .../yarn/server/resourcemanager/MockNodes.java | 11 ++++ .../scheduler/capacity/TestPortsAllocation.java | 46 ++++--------- .../scheduler/capacity/TestUtils.java | 21 +++++- 27 files changed, 413 insertions(+), 76 deletions(-) create mode 100755 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/PortsInfo.java diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java index 5a7c6c9..1c0e320 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java @@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; @@ -223,7 +224,6 @@ public Integer getDecommissioningTimeout() { public Resource getPhysicalResource() { return null; } - } @Override public void setLocalUsedPortsSnapshot(ValueRanges ports) { @@ -246,6 +246,11 @@ public ValueRanges getContainerAllocatedPorts() { @Override public void setContainerAllocatedPorts(ValueRanges ports) { } + + @Override + public ValueRanges getLocalUsedPortsSnapshot() { + return null; + } } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java index a59d9ef..40b82b2 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java @@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; @@ -232,4 +233,14 @@ public ValueRanges getContainerAllocatedPorts() { public void setContainerAllocatedPorts(ValueRanges ports) { node.setContainerAllocatedPorts(ports); } + + @Override + public ValueRanges getLocalUsedPortsSnapshot() { + return node.getLocalUsedPortsSnapshot(); + } + + @Override + public void setLocalUsedPortsSnapshot(ValueRanges ports) { + node.setLocalUsedPortsSnapshot(ports); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java index 7d850b0..a70f503 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java @@ -56,18 +56,32 @@ @Public @Stable public static Resource newInstance(int memory, int vCores) { - Resource resource = Records.newRecord(Resource.class); - resource.setMemorySize(memory); - resource.setVirtualCores(vCores); - return resource; + return newInstance(memory, vCores, null); } @Public @Stable + public static Resource newInstance(int memory, int vCores, ValueRanges ports) { + Resource resource = Records.newRecord(Resource.class); + resource.setMemorySize(memory); + resource.setVirtualCores(vCores); + resource.setPorts(ports); + return resource; + } + + @Public + @Stable public static Resource newInstance(long memory, int vCores) { + return newInstance(memory, vCores, null); + } + + @Public + @Stable + public static Resource newInstance(long memory, int vCores, ValueRanges ports) { Resource resource = Records.newRecord(Resource.class); resource.setMemorySize(memory); resource.setVirtualCores(vCores); + resource.setPorts(ports); return resource; } @@ -112,6 +126,21 @@ public void setMemorySize(long memory) { "This method is implemented by ResourcePBImpl"); } + /** + * Get ports of the resource. + * @return ports of the resource + */ + @Public + @Stable + public abstract ValueRanges getPorts(); + + /** + * Set ports of the resource. + * @param ports ports of the resource + */ + @Public + @Stable + public abstract void setPorts(ValueRanges ports); /** * Get number of virtual cpu cores of the resource. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 5a48372..6f701f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1140,6 +1140,11 @@ public static boolean isAclEnabled(Configuration conf) { public static final String NM_PCORES_VCORES_MULTIPLIER = NM_PREFIX + "resource.pcores-vcores-multiplier"; public static final float DEFAULT_NM_PCORES_VCORES_MULTIPLIER = 1.0f; + + /** Range of ports which can be allocated for containers. */ + public static final String NM_PORTS = NM_PREFIX + "resource.ports"; + public static final String DEFAULT_NM_PORTS = "[1-19999]"; + /** * Rounds of updating ports. This parameter is circle controller for updating * local allocated ports info, since the ports info is big. We can control the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 81ebd79..c2943f2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -53,9 +53,19 @@ message ContainerIdProto { optional int64 id = 3; } +message ValueRangeProto{ + required int32 begin = 1; + required int32 end = 2; +} + +message ValueRangesProto { + repeated ValueRangeProto ranges = 1; +} + message ResourceProto { optional int64 memory = 1; optional int32 virtual_cores = 2; + optional ValueRangesProto ports = 3; } message ResourceUtilizationProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java index 6468f0e..d6cc527 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java @@ -18,12 +18,13 @@ package org.apache.hadoop.yarn.api.records.impl.pb; - import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ValueRangesProto; @Private @Unstable @@ -31,6 +32,7 @@ ResourceProto proto = ResourceProto.getDefaultInstance(); ResourceProto.Builder builder = null; boolean viaProto = false; + ValueRanges ports = null; public ResourcePBImpl() { builder = ResourceProto.newBuilder(); } @@ -112,6 +114,34 @@ public int compareTo(Resource other) { } return diff == 0 ? 0 : (diff > 0 ? 1 : -1); } - - + + @Override + public void setPorts(ValueRanges ports) { + maybeInitBuilder(); + if (ports == null) { + builder.clearPorts(); + } + this.ports = ports; + } + + @Override + public ValueRanges getPorts() { + ResourceProtoOrBuilder p = viaProto ? proto : builder; + if (this.ports != null) { + return this.ports; + } + if (!p.hasPorts()) { + return null; + } + this.ports = convertFromProtoFormat(p.getPorts()); + return this.ports; + } + + private static ValueRanges convertFromProtoFormat(ValueRangesProto proto) { + return new ValueRangesPBImpl(proto); + } + + private ValueRangesProto convertToProtoFormat(ValueRanges m) { + return ((ValueRangesPBImpl) m).getProto(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/PortsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/PortsInfo.java new file mode 100755 index 0000000..7ee2e0e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/PortsInfo.java @@ -0,0 +1,77 @@ +package org.apache.hadoop.yarn.util; + +import java.io.BufferedReader; +import java.io.File; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.file.Files; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.util.SysInfoWindows; +import org.apache.hadoop.util.Time; +import org.apache.hadoop.yarn.api.records.ValueRanges; + +public class PortsInfo { + private static final Log LOG = LogFactory.getLog(PortsInfo.class); + private long lastRefreshTime; + static final int REFRESH_INTERVAL_MS = 2000; + + private ValueRanges ports; + + public PortsInfo() { + lastRefreshTime = 0; + reset(); + } + + long now() { + return Time.monotonicNow(); + } + + void reset() { + ports=null; + } + + void refreshIfNeeded() { + long now = now(); + if (now - lastRefreshTime > REFRESH_INTERVAL_MS) { + lastRefreshTime = now; + try { + File f = new File("GetAllocatedPorts.ps1"); + if(!f.exists()){ + Files.copy(PortsInfo.class.getResourceAsStream("/GetAllocatedPorts.ps1"),f.toPath()); + } + // Use a ProcessBuilder + ProcessBuilder pb = new ProcessBuilder("powershell.exe", f.getAbsolutePath()); + Process p = pb.start(); + InputStream is = p.getInputStream(); + BufferedReader br = new BufferedReader(new InputStreamReader(is)); + String line = null; + String portsString = null; + while ((line = br.readLine()) != null) { + if(!line.isEmpty()){ + portsString = line; + } + } + if(portsString!=null && !portsString.isEmpty()){ + ports=ValueRanges.iniFromExpression(portsString); + } + else{ + LOG.warn("Get allocated ports result is empty, fail to get ports info "); + } + int r = p.waitFor(); // Let the process finish. + //remove it after finish + f.deleteOnExit(); + } catch (Exception e) { + LOG.warn("Fail to get allocated ports info "); + e.printStackTrace(); + } + } + } + + public ValueRanges GetAllocatedPorts(){ + refreshIfNeeded(); + return ports; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java index 0e1ee9d..6b6b1a7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java @@ -22,6 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ValueRanges; /** * A {@link ResourceCalculator} which uses the concept of diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java index 8d6d6ab..f2be0ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.util.Records; @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) @@ -63,6 +64,16 @@ public void setVirtualCores(int cores) { } @Override + public ValueRanges getPorts() { + return null; + } + + @Override + public void setPorts(ValueRanges port) { + throw new RuntimeException("NONE cannot be modified!"); + } + + @Override public int compareTo(Resource o) { long diff = 0 - o.getMemorySize(); if (diff == 0) { @@ -106,6 +117,16 @@ public int getVirtualCores() { public void setVirtualCores(int cores) { throw new RuntimeException("UNBOUNDED cannot be modified!"); } + + @Override + public ValueRanges getPorts() { + return null; + } + + @Override + public void setPorts(ValueRanges port) { + throw new RuntimeException("NONE cannot be modified!"); + } @Override public int compareTo(Resource o) { @@ -122,21 +143,32 @@ public static Resource createResource(int memory) { return createResource(memory, (memory > 0) ? 1 : 0); } - public static Resource createResource(int memory, int cores) { + public static Resource createResource(int memory, int cores, ValueRanges ports) { Resource resource = Records.newRecord(Resource.class); resource.setMemorySize(memory); resource.setVirtualCores(cores); + resource.setPorts(ports); return resource; } + public static Resource createResource(int memory, int cores) { + return createResource(memory, cores, null); + } + public static Resource createResource(long memory) { return createResource(memory, (memory > 0) ? 1 : 0); } public static Resource createResource(long memory, int cores) { + return createResource(memory, cores, null); + } + + public static Resource createResource(long memory, int cores, + ValueRanges ports) { Resource resource = Records.newRecord(Resource.class); resource.setMemorySize(memory); resource.setVirtualCores(cores); + resource.setPorts(ports); return resource; } @@ -163,7 +195,7 @@ public static Resource clone(Resource res) { } public static Resource cloneWithPorts(Resource res) { - return createResource(res.getMemory(), res.getVirtualCores(), + return createResource(res.getMemorySize(), res.getVirtualCores(), res.getPorts()); } @@ -176,7 +208,7 @@ public static Resource addToWithPorts(Resource lhs, Resource rhs) { } public static Resource addTo(Resource lhs, Resource rhs, boolean ignorePorts) { - lhs.setMemory(lhs.getMemory() + rhs.getMemory()); + lhs.setMemorySize(lhs.getMemorySize() + rhs.getMemorySize()); lhs.setVirtualCores(lhs.getVirtualCores() + rhs.getVirtualCores()); if (!ignorePorts) { if (lhs.getPorts() != null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java index bdd09de..3751afe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java @@ -22,7 +22,6 @@ import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.yarn.api.records.ExecutionTimeEstimate; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.QueueStatistics; import org.apache.hadoop.yarn.api.records.ValueRange; @@ -114,9 +113,6 @@ private static Object genTypeValue(Type type) { if (type.equals(ValueRange.class)) { ret = ValueRange.newInstance(0, 0); } - if (type.equals(ExecutionTimeEstimate.class)) { - ret = ExecutionTimeEstimate.newInstance(); - } if (type.equals(NodeLabel.class)) { ret = NodeLabel.newInstance("test"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index 46743fc..bba1c38 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -322,7 +322,14 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl; +import org.apache.hadoop.yarn.api.records.ValueRange; +import org.apache.hadoop.yarn.proto.YarnProtos.ValueRangeProto; +import org.apache.hadoop.yarn.api.records.impl.pb.ValueRangePBImpl; +import org.apache.hadoop.yarn.api.records.ValueRanges; +import org.apache.hadoop.yarn.proto.YarnProtos.ValueRangesProto; +import org.apache.hadoop.yarn.api.records.impl.pb.ValueRangesPBImpl; import org.apache.hadoop.yarn.util.resource.Resources; + import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Ignore; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java index fc30a80..5dec4ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java @@ -25,6 +25,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.util.Records; public abstract class RegisterNodeManagerRequest { @@ -50,6 +51,15 @@ public static RegisterNodeManagerRequest newInstance(NodeId nodeId, List containerStatuses, List runningApplications, Set nodeLabels, Resource physicalResource) { + return newInstance(nodeId, httpPort, resource, nodeManagerVersionId, + containerStatuses, runningApplications, nodeLabels, physicalResource, null); + } + + public static RegisterNodeManagerRequest newInstance(NodeId nodeId, + int httpPort, Resource resource, String nodeManagerVersionId, + List containerStatuses, + List runningApplications, Set nodeLabels, + Resource physicalResource, ValueRanges ports) { RegisterNodeManagerRequest request = Records.newRecord(RegisterNodeManagerRequest.class); request.setHttpPort(httpPort); @@ -60,6 +70,7 @@ public static RegisterNodeManagerRequest newInstance(NodeId nodeId, request.setRunningApplications(runningApplications); request.setNodeLabels(nodeLabels); request.setPhysicalResource(physicalResource); + request.setLocalUsedPortsSnapshot(ports); return request; } @@ -112,4 +123,8 @@ public abstract void setRunningApplications( * @param physicalResource Physical resources in the node. */ public abstract void setPhysicalResource(Resource physicalResource); + + public abstract void setLocalUsedPortsSnapshot(ValueRanges ports); + + public abstract ValueRanges getLocalUsedPortsSnapshot() ; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java index 0291e0b..8e7ce43 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java @@ -29,14 +29,17 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ValueRangesPBImpl; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ValueRangesProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NMContainerStatusProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto; import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto.Builder; @@ -55,6 +58,7 @@ private List containerStatuses = null; private List runningApplications = null; private Set labels = null; + private ValueRanges localUsedPortsSnapshot = null; /** Physical resources in the node. */ private Resource physicalResource = null; @@ -99,6 +103,10 @@ private synchronized void mergeLocalToBuilder() { if (this.physicalResource != null) { builder.setPhysicalResource(convertToProtoFormat(this.physicalResource)); } + if (this.localUsedPortsSnapshot != null) { + builder + .setLocalUsedPortsSnapshot(convertToProtoFormat(this.localUsedPortsSnapshot)); + } } private synchronized void addNMContainerStatusesToProto() { @@ -357,6 +365,27 @@ private synchronized void initNodeLabels() { } } + @Override + public synchronized ValueRanges getLocalUsedPortsSnapshot() { + RegisterNodeManagerRequestProtoOrBuilder p = viaProto ? proto : builder; + if (this.localUsedPortsSnapshot != null) { + return this.localUsedPortsSnapshot; + } + if (!p.hasLocalUsedPortsSnapshot()) { + return null; + } + this.localUsedPortsSnapshot = + convertFromProtoFormat(p.getLocalUsedPortsSnapshot()); + return this.localUsedPortsSnapshot; + } + + @Override + public synchronized void setLocalUsedPortsSnapshot(ValueRanges ports) { + maybeInitBuilder(); + builder.clearLocalUsedPortsSnapshot(); + localUsedPortsSnapshot = ports; + } + private static NodeLabelPBImpl convertFromProtoFormat(NodeLabelProto p) { return new NodeLabelPBImpl(p); } @@ -399,4 +428,12 @@ private static NMContainerStatusProto convertToProtoFormat( NMContainerStatus c) { return ((NMContainerStatusPBImpl)c).getProto(); } + + private static ValueRanges convertFromProtoFormat(ValueRangesProto proto) { + return new ValueRangesPBImpl(proto); + } + + private ValueRangesProto convertToProtoFormat(ValueRanges m) { + return ((ValueRangesPBImpl) m).getProto(); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java index 440cd0a..bde5e3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java @@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.util.Records; /** @@ -132,4 +133,8 @@ public abstract void setIncreasedContainers( @Unstable public abstract void setOpportunisticContainersStatus( OpportunisticContainersStatus opportunisticContainersStatus); + + public abstract ValueRanges getLocalUsedPortsSnapshot(); + + public abstract void setLocalUsedPortsSnapshot(ValueRanges ports); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java index 8aebc6f..2adcee3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java @@ -28,16 +28,19 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ResourceUtilizationPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ValueRangesPBImpl; import org.apache.hadoop.yarn.proto.YarnProtos; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ValueRangesProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProtoOrBuilder; @@ -57,6 +60,7 @@ private NodeHealthStatus nodeHealthStatus = null; private List keepAliveApplications = null; private List increasedContainers = null; + private ValueRanges localUsedPortsSnapshot = null; public NodeStatusPBImpl() { builder = NodeStatusProto.newBuilder(); @@ -90,6 +94,11 @@ private synchronized void mergeLocalToBuilder() { if (this.increasedContainers != null) { addIncreasedContainersToProto(); } + + if (this.localUsedPortsSnapshot != null) { + builder + .setLocalUsedPortsSnapshot(convertToProtoFormat(this.localUsedPortsSnapshot)); + } } private synchronized void mergeLocalToProto() { @@ -487,4 +496,33 @@ private ContainerProto convertToProtoFormat( Container c) { return ((ContainerPBImpl)c).getProto(); } + + @Override + public ValueRanges getLocalUsedPortsSnapshot() { + NodeStatusProtoOrBuilder p = viaProto ? proto : builder; + if (this.localUsedPortsSnapshot != null) { + return this.localUsedPortsSnapshot; + } + if (!p.hasLocalUsedPortsSnapshot()) { + return null; + } + this.localUsedPortsSnapshot = + convertFromProtoFormat(p.getLocalUsedPortsSnapshot()); + return this.localUsedPortsSnapshot; + } + + @Override + public void setLocalUsedPortsSnapshot(ValueRanges ports) { + maybeInitBuilder(); + builder.clearLocalUsedPortsSnapshot(); + localUsedPortsSnapshot = ports; + } + + private static ValueRanges convertFromProtoFormat(ValueRangesProto proto) { + return new ValueRangesPBImpl(proto); + } + + private ValueRangesProto convertToProtoFormat(ValueRanges m) { + return ((ValueRangesPBImpl) m).getProto(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto index 98b172d..28f1cd5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto @@ -40,6 +40,7 @@ message NodeStatusProto { optional ResourceUtilizationProto node_utilization = 7; repeated ContainerProto increased_containers = 8; optional OpportunisticContainersStatusProto opportunistic_containers_status = 9; + optional ValueRangesProto local_used_ports_snapshot = 10; } message OpportunisticContainersStatusProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto index edb2d9c..fd9a5c7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto @@ -64,6 +64,7 @@ message RegisterNodeManagerRequestProto { repeated ApplicationIdProto runningApplications = 7; optional NodeLabelsProto nodeLabels = 8; optional ResourceProto physicalResource = 9; + optional ValueRangesProto local_used_ports_snapshot = 240; } message RegisterNodeManagerResponseProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 68968bd..43a2cb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -90,6 +90,7 @@ import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils; import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; +import org.apache.hadoop.yarn.util.PortsInfo; import org.apache.hadoop.yarn.util.YarnVersionInfo; import com.google.common.annotations.VisibleForTesting; @@ -150,6 +151,7 @@ private NMNodeLabelsHandler nodeLabelsHandler; private final NodeLabelsProvider nodeLabelsProvider; + private boolean enablePortsAsResource; /** * this parameter is circle controller for updating local allocated ports * info, since the ports info is big. we can control the update frequency to @@ -374,17 +376,10 @@ protected void registerWithRM() // during RM recovery synchronized (this.context) { List containerReports = getNMContainerStatuses(); - Set nodeLabels = null; - if (hasNodeLabelsProvider) { - nodeLabels = nodeLabelsProvider.getNodeLabels(); - nodeLabels = - (null == nodeLabels) ? CommonNodeLabelsManager.EMPTY_NODELABEL_SET - : nodeLabels; - } - ValueRanges ports = null; - if (enablePortsAsResource) { - ports = new PortsInfo().GetAllocatedPorts(); - } + ValueRanges ports = null; + if (enablePortsAsResource) { + ports = new PortsInfo().GetAllocatedPorts(); + } RegisterNodeManagerRequest request = RegisterNodeManagerRequest.newInstance(nodeId, httpPort, totalResource, nodeManagerVersionId, containerReports, getRunningApplications(), @@ -506,7 +501,7 @@ protected NodeStatus getNodeStatus(int responseId) throws IOException { NodeStatus nodeStatus = NodeStatus.newInstance(nodeId, responseId, containersStatuses, createKeepAliveApplicationList(), nodeHealthStatus, - containersUtilization, nodeUtilization, increasedContainers, ports); + containersUtilization, nodeUtilization, increasedContainers); nodeStatus.setOpportunisticContainersStatus( getOpportunisticContainersStatus()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index c6aaa76..00df055 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -109,6 +110,7 @@ private Server server; private InetSocketAddress resourceTrackerAddress; private String minimumNodeManagerVersion; + private boolean enablePortsAsResource; private int minAllocMb; private int minAllocVcores; @@ -144,6 +146,10 @@ protected void serviceInit(Configuration conf) throws Exception { YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); + enablePortsAsResource = + conf.getBoolean(YarnConfiguration.PORTS_AS_RESOURCE_ENABLE, + YarnConfiguration.DEFAULT_PORTS_AS_RESOURCE_ENABLE); + RackResolver.init(conf); nextHeartBeatInterval = conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, @@ -635,7 +641,6 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) } return nodeHeartBeatResponse; } - } private void setAppCollectorsMapToResponse( List runningApps, NodeHeartbeatResponse response) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java index b48567d4..892aba0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java @@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus; @@ -189,8 +190,22 @@ public void updateNodeHeartbeatResponseForContainersDecreasing( Integer getDecommissioningTimeout(); /** + * Get local used ports snapshot. + * + * @return ports range. + */ + public ValueRanges getLocalUsedPortsSnapshot(); + + /** + * update {@link ValueRanges} local used ports snapshot. + * + * @param use {@link ValueRanges} to update + */ + public void setLocalUsedPortsSnapshot(ValueRanges ports); + + /** * Get available ports. - * + * * @return ports range. */ public ValueRanges getAvailablePorts(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 85f591d..27adae3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; @@ -370,6 +371,13 @@ public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, int cmPort, int httpPort, Node node, Resource capability, String nodeManagerVersion, Resource physResource) { + this(nodeId, context, hostName, cmPort, httpPort, node, capability, + nodeManagerVersion, physResource, null); + } + + public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, + int cmPort, int httpPort, Node node, Resource capability, + String nodeManagerVersion, Resource physResource, ValueRanges ports) { this.nodeId = nodeId; this.context = context; this.hostName = hostName; @@ -396,6 +404,8 @@ public RMNodeImpl(NodeId nodeId, RMContext context, String hostName, this.nodeUpdateQueue = new ConcurrentLinkedQueue(); this.containerAllocationExpirer = context.getContainerAllocationExpirer(); + + this.localUsedPortsSnapshot = ports; } @Override @@ -1498,7 +1508,6 @@ public void setContainerAllocatedPorts(ValueRanges ports) { this.containerAllocatedPorts = ports; } - } @Override public List pullNewlyIncreasedContainers() { try { @@ -1557,4 +1566,14 @@ public void setUntrackedTimeStamp(long ts) { public Integer getDecommissioningTimeout() { return decommissioningTimeout; } + + @Override + public ValueRanges getLocalUsedPortsSnapshot() { + return this.localUsedPortsSnapshot; + } + + @Override + public void setLocalUsedPortsSnapshot(ValueRanges ports) { + this.localUsedPortsSnapshot = ports; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java index 43bfa22..9f1879c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java @@ -302,7 +302,7 @@ private synchronized void addUnallocatedResource(Resource resource) { if (resource.getPorts() != null) { Resources.addToWithPorts(unallocatedResource, resource); Resources.subtractFromWithPorts(allocatedResource, resource); - rmNode.setContainerAllocatedPorts(allocated.getPorts()); + rmNode.setContainerAllocatedPorts(allocatedResource.getPorts()); rmNode.setAvailablePorts(calculateAvailablePorts()); } else { Resources.addTo(unallocatedResource, resource); @@ -325,7 +325,7 @@ public synchronized void deductUnallocatedResource(Resource resource) { if (resource.getPorts() != null) { Resources.subtractFromWithPorts(unallocatedResource, resource); Resources.addToWithPorts(allocatedResource, resource); - rmNode.setContainerAllocatedPorts(allocated.getPorts()); + rmNode.setContainerAllocatedPorts(allocatedResource.getPorts()); rmNode.setAvailablePorts(calculateAvailablePorts()); } else { Resources.subtractFrom(unallocatedResource, resource); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 3522b06..52f7f9f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -98,7 +98,6 @@ private volatile int nodeLocalityDelay; private volatile int rackLocalityAdditionalDelay; private volatile boolean rackLocalityFullReset; - private boolean enablePortsAsResource; Map applicationAttemptMap = new ConcurrentHashMap<>(); @@ -146,10 +145,6 @@ public LeafQueue(CapacitySchedulerContext cs, // One time initialization is enough since it is static ordering policy this.pendingOrderingPolicy = new FifoOrderingPolicyForPendingApps(); - this.enablePortsAsResource = - conf.getBoolean(YarnConfiguration.PORTS_AS_RESOURCE_ENABLE, - YarnConfiguration.DEFAULT_PORTS_AS_RESOURCE_ENABLE); - if(LOG.isDebugEnabled()) { LOG.debug("LeafQueue:" + " name=" + queueName + ", fullname=" + getQueuePath()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java index b512215..6783a90 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java @@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.ValueRanges; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; @@ -62,11 +63,16 @@ */ public class RegularContainerAllocator extends AbstractContainerAllocator { private static final Log LOG = LogFactory.getLog(RegularContainerAllocator.class); + private boolean enablePortsAsResource; public RegularContainerAllocator(FiCaSchedulerApp application, ResourceCalculator rc, RMContext rmContext, ActivitiesManager activitiesManager) { super(application, rc, rmContext, activitiesManager); + this.enablePortsAsResource = + rmContext.getYarnConfiguration().getBoolean( + YarnConfiguration.PORTS_AS_RESOURCE_ENABLE, + YarnConfiguration.DEFAULT_PORTS_AS_RESOURCE_ENABLE); } private boolean checkHeadroom(Resource clusterResource, @@ -533,7 +539,7 @@ private ContainerAllocation assignContainer(Resource clusterResource, node.getAvailablePorts(), capability.getPorts())) { LOG.info("no available ports, current available:" + node.getAvailablePorts() + ", required:" + capability.getPorts()); - return new CSAssignment(Resources.none(), type); + return ContainerAllocation.LOCALITY_SKIPPED; } // Can we allocate a container on this node? diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index d6caa84..ebe1d18 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceUtilization; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; @@ -302,6 +303,16 @@ public ValueRanges getContainerAllocatedPorts() { @Override public void setContainerAllocatedPorts(ValueRanges ports) { } + + @Override + public ValueRanges getLocalUsedPortsSnapshot() { + return null; + } + + @Override + public void setLocalUsedPortsSnapshot(ValueRanges port) { + } + }; private static RMNode buildRMNode(int rack, final Resource perNode, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestPortsAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestPortsAllocation.java index 9e6f316..1057e0b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestPortsAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestPortsAllocation.java @@ -78,6 +78,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.SchedulableEntity; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; @@ -136,7 +137,8 @@ public void setUp() throws Exception { amResourceRequest = mock(ResourceRequest.class); when(amResourceRequest.getCapability()).thenReturn( Resources.createResource(0, 0)); - when(rmApp.getAMResourceRequest()).thenReturn(amResourceRequest); + when(rmApp.getAMResourceRequests()).thenReturn( + Collections.singletonList(amResourceRequest)); Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId)Matchers.any()); when(spyRMContext.getRMApps()).thenReturn(spyApps); @@ -158,10 +160,6 @@ public void setUp() throws Exception { thenReturn(Resources.createResource(16*GB, 32)); when(csContext.getClusterResource()). thenReturn(Resources.createResource(100 * 16 * GB, 100 * 32)); - when(csContext.getApplicationComparator()). - thenReturn(CapacityScheduler.applicationComparator); - when(csContext.getNonPartitionedQueueComparator()). - thenReturn(CapacityScheduler.nonPartitionedQueueComparator); when(csContext.getResourceCalculator()). thenReturn(resourceCalculator); when(csContext.getRMContext()).thenReturn(rmContext); @@ -172,7 +170,7 @@ public void setUp() throws Exception { containerTokenSecretManager); root = - CapacityScheduler.parseQueue(csContext, csConf, null, + CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, TestUtils.spyHook); @@ -242,42 +240,20 @@ private void setupQueueConfiguration( } static LeafQueue stubLeafQueue(LeafQueue queue) { - // Mock some methods for ease in these unit tests - // 1. LeafQueue.createContainer to return dummy containers - doAnswer( - new Answer() { - @Override - public Container answer(InvocationOnMock invocation) - throws Throwable { - final FiCaSchedulerApp application = - (FiCaSchedulerApp)(invocation.getArguments()[0]); - final ContainerId containerId = - TestUtils.getMockContainerId(application); - - Container container = TestUtils.getMockContainer( - containerId, - ((FiCaSchedulerNode)(invocation.getArguments()[1])).getNodeID(), - (Resource)(invocation.getArguments()[2]), - ((Priority)invocation.getArguments()[3])); - return container; - } - } - ). - when(queue).createContainer( - any(FiCaSchedulerApp.class), - any(FiCaSchedulerNode.class), - any(Resource.class), - any(Priority.class) - ); - - // 2. Stub out LeafQueue.parent.completedContainer + // 1. Stub out LeafQueue.parent.completedContainer CSQueue parent = queue.getParent(); doNothing().when(parent).completedContainer( any(Resource.class), any(FiCaSchedulerApp.class), any(FiCaSchedulerNode.class), any(RMContainer.class), any(ContainerStatus.class), any(RMContainerEventType.class), any(CSQueue.class), anyBoolean()); + + // Stub out parent queue's accept and apply. + doReturn(true).when(parent).accept(any(Resource.class), + any(ResourceCommitRequest.class)); + doNothing().when(parent).apply(any(Resource.class), + any(ResourceCommitRequest.class)); return queue; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 0c45ee1..076257f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.ValueRanges; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.Event; import org.apache.hadoop.yarn.event.EventHandler; @@ -186,6 +187,22 @@ public static ResourceRequest createResourceRequest(String resourceName, return request; } + public static ResourceRequest createResourceRequest(String resourceName, + int memory, ValueRanges ports, int numContainers, boolean relaxLocality, + Priority priority, RecordFactory recordFactory) { + ResourceRequest request = + recordFactory.newRecordInstance(ResourceRequest.class); + Resource capability = Resources.createResource(memory, 1, ports); + + request.setNumContainers(numContainers); + request.setResourceName(resourceName); + request.setCapability(capability); + request.setRelaxLocality(relaxLocality); + request.setPriority(priority); + request.setNodeLabelExpression(RMNodeLabelsManager.NO_LABEL); + return request; + } + public static ResourceRequest createResourceRequest( String resourceName, int memory, int numContainers, boolean relaxLocality, Priority priority, @@ -245,9 +262,7 @@ public static FiCaSchedulerNode getMockNodeForPortsCaculate(String host, 0, 0, mockNode, - Resources.createResource(mem, vCores, ports), - "", - OverAllocationInfo.newInstance(ResourceThresholds.newInstance(1.0f))); + Resources.createResource(mem, vCores, ports), ""); FiCaSchedulerNode node = spy(new FiCaSchedulerNode(rmNode, false)); LOG.info("node = " + host + " avail=" + node.getUnallocatedResource()); return node; -- 1.9.1