diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java index 21cd1bb..67cb5a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java @@ -77,7 +77,7 @@ public static ApplicationSubmissionContext newInstance( boolean isUnmanagedAM, boolean cancelTokensWhenComplete, int maxAppAttempts, Resource resource, String applicationType, boolean keepContainers, String appLabelExpression, - String amContainerLabelExpression) { + String amContainerLabelExpression, ConstraintDefinition cd) { ApplicationSubmissionContext context = Records.newRecord(ApplicationSubmissionContext.class); context.setApplicationId(applicationId); @@ -91,6 +91,7 @@ public static ApplicationSubmissionContext newInstance( context.setApplicationType(applicationType); context.setKeepContainersAcrossApplicationAttempts(keepContainers); context.setNodeLabelExpression(appLabelExpression); + context.setConstraintDefinition(cd); context.setResource(resource); ResourceRequest amReq = Records.newRecord(ResourceRequest.class); @@ -111,7 +112,7 @@ public static ApplicationSubmissionContext newInstance( boolean keepContainers) { return newInstance(applicationId, applicationName, queue, priority, amContainer, isUnmanagedAM, cancelTokensWhenComplete, maxAppAttempts, - resource, applicationType, keepContainers, null, null); + resource, applicationType, keepContainers, null, null, null); } @Public @@ -123,7 +124,7 @@ public static ApplicationSubmissionContext newInstance( int maxAppAttempts, Resource resource, String applicationType) { return newInstance(applicationId, applicationName, queue, priority, amContainer, isUnmanagedAM, cancelTokensWhenComplete, maxAppAttempts, - resource, applicationType, false, null, null); + resource, applicationType, false, null, null, null); } @Public @@ -535,4 +536,29 @@ public abstract void setLogAggregationContext( @Public @Unstable public abstract void setReservationID(ReservationId reservationID); + + /** + * Get a Constraint Definition associated with this application. The + * definition may consist of multiple Placement expression + * + * @return + */ + @Public + @Evolving + public abstract ConstraintDefinition getConstraintDefinition(); + + /** + * Set a Constraint Definition associated with this application. The + * definition may consist of multiple Placement expressions specifying + * application specific constraints like: Affinity, Anti-Affinity, Cardinality + * + * (Time constraints to be added in the future) + * + * @param constraintDefinition + */ + @Public + @Evolving + public abstract void setConstraintDefinition( + ConstraintDefinition constraintDefinition); + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ConstraintDefinition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ConstraintDefinition.java new file mode 100644 index 0000000..7de2cde --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ConstraintDefinition.java @@ -0,0 +1,57 @@ +package org.apache.hadoop.yarn.api.records; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * {@link ConstraintDefinition} class is the top class defining a set of + * placement constraints the service/user cares about during a Resource + * Allocation + * + * @see ResourceRequest Created by pgaref on 7/7/16 + */ + +@Public +@Unstable +public abstract class ConstraintDefinition + implements Comparable { + + @Private + @Unstable + public static ConstraintDefinition newInstance(long timestamp, + List placementconstraintsexpression) { + ConstraintDefinition cd = Records.newRecord(ConstraintDefinition.class); + cd.setTimestamp(timestamp); + cd.setPlacementConstraintsExpressions(placementconstraintsexpression); + return cd; + } + + /** + * If the constraintDefinition is part of a RR we could just use the + * applicationID which is unique Otherwise we need to generate a uniqueID + * including a timestamp + * + */ + + @Private + @Unstable + public abstract long getTimestamp(); + + @Private + @Unstable + public abstract void setTimestamp(long ts); + + @Private + @Unstable + public abstract List getPlacementConstraintsExpressions(); + + @Private + @Unstable + public abstract void setPlacementConstraintsExpressions( + List pce); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraint.java new file mode 100644 index 0000000..b760ed6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraint.java @@ -0,0 +1,96 @@ +package org.apache.hadoop.yarn.api.records; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * {@link PlacementConstraint} captures the set of placement constraints the + * service/user cares about regarding a Resource Allocation + * + * @see ResourceRequest Created by pgaref on 7/7/16 + */ +@Public +@Unstable +public abstract class PlacementConstraint { + + @Private + @Unstable + public static PlacementConstraint newInstance(String source, String target, + PlacementConstraintType type, PlacementConstraintScope scope) { + PlacementConstraint pc = Records.newRecord(PlacementConstraint.class); + pc.setSource(source); + pc.setTarget(target); + pc.setScope(scope); + pc.setType(type); + return pc; + } + + /** + * Source Methods + * + * @param source + */ + @Private + @Unstable + public abstract void setSource(String source); + + @Private + @Unstable + public abstract String getSource(); + + /** + * Target Methods + */ + @Private + @Unstable + public abstract void setTarget(String target); + + @Private + @Unstable + public abstract String getTarget(); + + /** + * Scope Methods + */ + @Private + @Unstable + public abstract void setScope(PlacementConstraintScope pcs); + + @Private + @Unstable + public abstract PlacementConstraintScope getScope(); + + /** + * Type Methods + */ + @Private + @Unstable + public abstract void setType(PlacementConstraintType pct); + + @Private + @Unstable + public abstract PlacementConstraintType getType(); + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + PlacementConstraint other = (PlacementConstraint) obj; + if (other.getSource().compareTo(this.getSource()) != 0) + return false; + if (other.getTarget().compareTo(this.getTarget()) != 0) + return false; + if (other.getScope() != this.getScope()) + return false; + if (other.getType() != (this.getType())) + return false; + return true; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraintScope.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraintScope.java new file mode 100644 index 0000000..3702f04 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraintScope.java @@ -0,0 +1,40 @@ +package org.apache.hadoop.yarn.api.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; + +/** + * + */ +/** + * Enumeration of various Scopes of locality Constraints defined as part of a + * {@link PlacementConstraintsExpression} within one + * {@link ConstraintDefinition} Created by pgaref on 7/7/16 + */ +@Public +@Evolving +public enum PlacementConstraintScope { + /** + * Do not request any special placement constraint + */ + DEFAULT, + + /** + * Request Node placement locality within the target defined in + * {@link PlacementConstraintsExpression} + */ + NODE, + + /** + * Request Rack placement locality within the target defined in + * {@link PlacementConstraintsExpression} + */ + RACK, + + /** + * Request Process placement locality within the target defined in + * {@link PlacementConstraintsExpression} As many Yarn NMs can run within the + * same node this option ensures we are located on the same NM + */ + PROCESS +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraintType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraintType.java new file mode 100644 index 0000000..3cf27f6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraintType.java @@ -0,0 +1,44 @@ +package org.apache.hadoop.yarn.api.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; + +/** + * + */ +/** + * Enumeration of various Types of Constraints defined as part of a + * {@link PlacementConstraintsExpression} within one + * {@link ConstraintDefinition} Created by pgaref on 7/7/16 + */ +@Public +@Evolving +public enum PlacementConstraintType { + + NO_TYPE, + /** + * Request the allocated container to be co-located with the target defined in + * {@link PlacementConstraintsExpression} + */ + AFFINITY, + /** + * Request the allocated container NOT to be co-located with the target + * defined in {@link PlacementConstraintsExpression} + */ + ANTI_AFFINITY, + /** + * Set the upper limit of the container instances within the scope defined in + * {@link PlacementConstraintsExpression} + */ + CARDINALITY, + /** + * Request to AVOID a CPU noisy environment as the container is expected to be + * CPU hungry {@link PlacementConstraintsExpression} + */ + CPU_ISOLATION, + /** + * Request to AVOID a Memory noisy environment as the container is expected to + * be Memory hungry {@link PlacementConstraintsExpression} + */ + MEM_ISOLATION +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraintsExpression.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraintsExpression.java new file mode 100644 index 0000000..6b230ef --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PlacementConstraintsExpression.java @@ -0,0 +1,39 @@ +package org.apache.hadoop.yarn.api.records; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * {@link PlacementConstraintsExpression} represeants an expression of a set of + * placement constraints the service/user cares about during a Resource + * Allocation + * + * @see ResourceRequest Created by pgaref on 7/7/16 + */ +@Public +@Unstable +public abstract class PlacementConstraintsExpression { + + @Private + @Unstable + public static PlacementConstraintsExpression newInstance( + List placementContraints) { + PlacementConstraintsExpression pce = + Records.newRecord(PlacementConstraintsExpression.class); + pce.setPlacementConstraints(placementContraints); + return pce; + } + + @Private + @Unstable + public abstract List getPlacementConstraints(); + + @Private + @Unstable + public abstract void setPlacementConstraints(List lpc); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java index 07f132c..64b985c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java @@ -19,6 +19,8 @@ package org.apache.hadoop.yarn.api.records; import java.io.Serializable; +import java.util.Collections; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; @@ -86,8 +88,19 @@ public static ResourceRequest newInstance(Priority priority, String hostName, @Public @Evolving public static ResourceRequest newInstance(Priority priority, String hostName, - Resource capability, int numContainers, boolean relaxLocality, String - labelExpression, ExecutionTypeRequest executionTypeRequest) { + Resource capability, int numContainers, boolean relaxLocality, + String labelExpression, ExecutionTypeRequest executionTypeRequest) { + return newInstance(priority, hostName, capability, numContainers, + relaxLocality, labelExpression, executionTypeRequest, + Collections. emptySet()); + } + + @Public + @Evolving + public static ResourceRequest newInstance(Priority priority, String hostName, + Resource capability, int numContainers, boolean relaxLocality, + String labelExpression, ExecutionTypeRequest executionTypeRequest, + Set allocationTags) { ResourceRequest request = Records.newRecord(ResourceRequest.class); request.setPriority(priority); request.setResourceName(hostName); @@ -96,6 +109,7 @@ public static ResourceRequest newInstance(Priority priority, String hostName, request.setRelaxLocality(relaxLocality); request.setNodeLabelExpression(labelExpression); request.setExecutionTypeRequest(executionTypeRequest); + request.setAllocationTags(allocationTags); return request; } @@ -317,6 +331,31 @@ public ExecutionTypeRequest getExecutionTypeRequest() { @Evolving public abstract void setNodeLabelExpression(String nodelabelExpression); + + /** + * Get allocation tags associated with this specific ResourceRequest These + * tags will be coupled with the allocation(s) during the whole applications' + * lifecycle. These tags can be used to track the specific allocation and set + * more sophisticated and dynamic constraints! + * + * @return + */ + @Public + @Evolving + public abstract Set getAllocationTags(); + + /** + * Set allocation tags associated with this specific ResourceRequest These + * tags will be coupled with the allocation(s) during the whole applications' + * lifecycle. These tags can be used to track the specific allocation and set + * more sophisticated and dynamic constraints! + * + * @param tags + */ + @Public + @Evolving + public abstract void setAllocationTags(Set tags); + /** * Get the optional ID corresponding to this allocation request. This * ID is an identifier for different {@code ResourceRequest}s from the same @@ -368,7 +407,7 @@ public long getAllocationRequestId() { public void setAllocationRequestId(long allocationRequestID) { throw new UnsupportedOperationException(); } - + @Override public int hashCode() { final int prime = 2153; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 6c921cd..4c91976 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2559,7 +2559,7 @@ public static boolean isAclEnabled(Configuration conf) { NODE_LABELS_PREFIX + "fs-store.retry-policy-spec"; public static final String DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC = "2000, 500"; - + /** * Flag to indicate if the node labels feature enabled, by default it's * disabled @@ -2614,6 +2614,12 @@ public static boolean areNodeLabelsEnabled( return conf.getBoolean(NODE_LABELS_ENABLED, DEFAULT_NODE_LABELS_ENABLED); } + public static boolean areTagLabelsEnabled( + Configuration conf) { + return conf.getBoolean(TAG_LABELS_ENABLED, DEFAULT_TAG_LABELS_ENABLED); + + } + private static final String NM_NODE_LABELS_PREFIX = NM_PREFIX + "node-labels."; @@ -2734,6 +2740,17 @@ public static boolean areNodeLabelsEnabled( public static final String TIMELINE_XFS_OPTIONS = TIMELINE_XFS_PREFIX + "xframe-options"; + + //////////////////////////////// + // Yarn Dynamic-Tags Conf + // for long-running Services + //////////////////////////////// + public static final String TAG_LABELS_PREFIX = YARN_PREFIX + "dynamic-tags."; + public static final String TAG_LABELS_ENABLED = TAG_LABELS_PREFIX + + "enabled"; + public static final boolean DEFAULT_TAG_LABELS_ENABLED = false; + + public YarnConfiguration() { super(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 6c337cf..79fec30 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -304,6 +304,7 @@ message ResourceRequestProto { optional string node_label_expression = 6; optional ExecutionTypeRequestProto execution_type_request = 7; optional int64 allocation_request_id = 8 [default = -1]; + repeated string allocation_tags = 9; } message ExecutionTypeRequestProto { @@ -364,6 +365,7 @@ message ApplicationSubmissionContextProto { optional ReservationIdProto reservation_id = 15; optional string node_label_expression = 16; optional ResourceRequestProto am_container_resource_request = 17; + optional ConstraintDefinitionProto constraint_definition = 18; } message LogAggregationContextProto { @@ -452,6 +454,47 @@ enum SignalContainerCommandProto { //////////////////////////////////////////////////////////////////////// +////// constraint_protocol ///////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////// + +message ConstraintDefinitionProto { + optional int64 constraint_definition_timestamp = 1; + repeated PlacementConstraintsExpressionProto placement_constraints_expression = 2; + // We consider that all the expressions are connected with an *OR* operator at this level + // Every logical formula has a unique disjunctive normal form (DNF) +} + +message PlacementConstraintsExpressionProto { + repeated PlacementConstraintProto placement_constraints =1; + // We consider that all the constraints are connected with an *AND* operator at this level + // Every logical formula has a unique disjunctive normal form (DNF) +} + +message PlacementConstraintProto { + optional string source = 1; + optional string target = 2; + optional PlacementConstraintScopeProto scope = 3 [default = DEFAULT]; + optional PlacementConstraintTypeProto type = 4 [default = NO_TYPE]; +} + +enum PlacementConstraintScopeProto { + DEFAULT = 0; + RACK = 1; + NODE = 2; + PROCESS = 3; // Same nodeID in a Host +} + +enum PlacementConstraintTypeProto { + NO_TYPE = 0; + AFFINITY = 1; + ANTI_AFFINITY = 2; + CARDINALITY = 3; //Rename to LIMIT ? + CPU_ISOLATION = 4; + MEM_ISOLATION = 5; +} + + +//////////////////////////////////////////////////////////////////////// ////// From reservation_protocol ///////////////////////////////////// //////////////////////////////////////////////////////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index b9949e1..556274f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -50,8 +50,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -64,9 +64,9 @@ import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.ApplicationConstants; -import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; +import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; @@ -81,6 +81,7 @@ import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; @@ -94,8 +95,8 @@ import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId; import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse; -import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.client.api.TimelineClient; +import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; import org.apache.hadoop.yarn.client.api.async.NMClientAsync; import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl; @@ -106,6 +107,7 @@ import org.apache.log4j.LogManager; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Sets; import com.sun.jersey.api.client.ClientHandlerException; /** @@ -892,10 +894,12 @@ public void onContainersAllocated(List allocatedContainers) { + ", containerNode=" + allocatedContainer.getNodeId().getHost() + ":" + allocatedContainer.getNodeId().getPort() + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress() - + ", containerResourceMemory" + + ", containerResourceMemory=" + allocatedContainer.getResource().getMemorySize() - + ", containerResourceVirtualCores" - + allocatedContainer.getResource().getVirtualCores()); + + ", containerResourceVirtualCores=" + + allocatedContainer.getResource().getVirtualCores() + + ", containerPriority=" + + allocatedContainer.getPriority()); // + ", containerToken" // +allocatedContainer.getContainerToken().getIdentifier().toString()); @@ -1180,16 +1184,18 @@ private ContainerRequest setupContainerAskForRM() { // using * as any host will do for the distributed shell app // set the priority for the request // TODO - what is the range for priority? how to decide? - Priority pri = Priority.newInstance(requestPriority); + Priority pri = Priority.newInstance(++requestPriority); // Set up resource type requirements // For now, memory and CPU are supported so we set memory and cpu requirements Resource capability = Resource.newInstance(containerMemory, containerVirtualCores); - ContainerRequest request = new ContainerRequest(capability, null, null, - pri); - LOG.info("Requested container ask: " + request.toString()); + ContainerRequest request = new ContainerRequest(capability, null, null, pri, + true, null, ExecutionTypeRequest.newInstance(), + Sets.newHashSet("RegionServer", "DynamicTag" + requestPriority)); + LOG.info("Requested container ask: " + request.toString() + " TAGs=" + + request.getAllocationTags()); return request; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index eedb501..7a1a69a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ConstraintDefinition; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.LocalResource; @@ -65,6 +66,10 @@ import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.PlacementConstraintScope; +import org.apache.hadoop.yarn.api.records.PlacementConstraintType; +import org.apache.hadoop.yarn.api.records.PlacementConstraintsExpression; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; @@ -80,7 +85,6 @@ import org.apache.hadoop.yarn.client.util.YarnClientUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; /** @@ -774,6 +778,26 @@ public boolean run() throws IOException, YarnException { // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); + + PlacementConstraint placementConstraint = + PlacementConstraint.newInstance("RegionServer", "RegionServer", + PlacementConstraintType.AFFINITY, PlacementConstraintScope.NODE); + PlacementConstraintsExpression placementConstraintsExpression = + PlacementConstraintsExpression + .newInstance(new ArrayList() { + { + add(placementConstraint); + } + }); + ConstraintDefinition cd = + ConstraintDefinition.newInstance(System.currentTimeMillis(), + new ArrayList() { + { + add(placementConstraintsExpression); + } + }); + appContext.setConstraintDefinition(cd); + // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java index 7acaf11..b503cb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java @@ -20,18 +20,19 @@ import java.io.IOException; import java.util.Collection; +import java.util.Collections; import java.util.List; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; -import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; - import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ExecutionType; @@ -45,6 +46,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; @InterfaceAudience.Public @InterfaceStability.Stable @@ -113,7 +115,7 @@ protected AMRMClient(String name) { final boolean relaxLocality; final String nodeLabelsExpression; final ExecutionTypeRequest executionTypeRequest; - + final Set allocationTags; /** * Instantiates a {@link ContainerRequest} with the given constraints and * locality relaxation enabled. @@ -212,17 +214,25 @@ public ContainerRequest(Resource capability, String[] nodes, String[] racks, public ContainerRequest(Resource capability, String[] nodes, String[] racks, Priority priority, boolean relaxLocality, String nodeLabelsExpression, ExecutionTypeRequest executionTypeRequest) { + this(capability, nodes, racks, priority, relaxLocality, + nodeLabelsExpression, + executionTypeRequest, Collections.emptySet()); + } + + public ContainerRequest(Resource capability, String[] nodes, String[] racks, + Priority priority, boolean relaxLocality, String nodeLabelsExpression, + ExecutionTypeRequest executionTypeRequest, Set allocationTags){ // Validate request Preconditions.checkArgument(capability != null, - "The Resource to be requested for each container " + - "should not be null "); + "The Resource to be requested for each container " + + "should not be null "); Preconditions.checkArgument(priority != null, - "The priority at which to request containers should not be null "); + "The priority at which to request containers should not be null "); Preconditions.checkArgument( - !(!relaxLocality && (racks == null || racks.length == 0) - && (nodes == null || nodes.length == 0)), - "Can't turn off locality relaxation on a " + - "request with no location constraints"); + !(!relaxLocality && (racks == null || racks.length == 0) + && (nodes == null || nodes.length == 0)), + "Can't turn off locality relaxation on a " + + "request with no location constraints"); this.capability = capability; this.nodes = (nodes != null ? ImmutableList.copyOf(nodes) : null); this.racks = (racks != null ? ImmutableList.copyOf(racks) : null); @@ -230,6 +240,7 @@ public ContainerRequest(Resource capability, String[] nodes, String[] racks, this.relaxLocality = relaxLocality; this.nodeLabelsExpression = nodeLabelsExpression; this.executionTypeRequest = executionTypeRequest; + this.allocationTags = allocationTags != null ? ImmutableSet.copyOf(allocationTags) : null; } public Resource getCapability() { @@ -260,12 +271,15 @@ public ExecutionTypeRequest getExecutionTypeRequest() { return executionTypeRequest; } + public Set getAllocationTags() { return allocationTags; } + public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Capability[").append(capability).append("]"); sb.append("Priority[").append(priority).append("]"); sb.append("ExecutionTypeRequest[").append(executionTypeRequest) .append("]"); + sb.append("AllocationTags[").append(allocationTags).append("]"); return sb.toString(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index 4145944..b2759a8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -65,19 +65,19 @@ import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.client.api.AMRMClient; -import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.client.api.InvalidContainerRequestException; +import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.util.RackResolver; +import org.apache.hadoop.yarn.util.resource.Resources; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; -import org.apache.hadoop.yarn.util.resource.Resources; @Private @Unstable @@ -86,7 +86,7 @@ private static final Log LOG = LogFactory.getLog(AMRMClientImpl.class); private static final List ANY_LIST = Collections.singletonList(ResourceRequest.ANY); - + private int lastResponseId = 0; protected String appHostName; @@ -96,18 +96,18 @@ protected ApplicationMasterProtocol rmClient; protected Resource clusterAvailableResources; protected int clusterNodeCount; - + // blacklistedNodes is required for keeping history of blacklisted nodes that // are sent to RM. On RESYNC command from RM, blacklistedNodes are used to get // current blacklisted nodes and send back to RM. protected final Set blacklistedNodes = new HashSet(); protected final Set blacklistAdditions = new HashSet(); protected final Set blacklistRemovals = new HashSet(); - + static class ResourceRequestInfo { ResourceRequest remoteRequest; LinkedHashSet containerRequests; - + ResourceRequestInfo(Priority priority, String resourceName, Resource capability, boolean relaxLocality) { remoteRequest = ResourceRequest.newInstance(priority, resourceName, @@ -138,11 +138,11 @@ public int compare(Resource arg0, Resource arg1) { } return -1; } - if(mem0 < mem1) { + if(mem0 < mem1) { return 1; } return -1; - } + } } static boolean canFit(Resource arg0, Resource arg1) { @@ -150,7 +150,7 @@ static boolean canFit(Resource arg0, Resource arg1) { long mem1 = arg1.getMemorySize(); long cpu0 = arg0.getVirtualCores(); long cpu1 = arg1.getVirtualCores(); - + return (mem0 <= mem1 && cpu0 <= cpu1); } @@ -212,7 +212,7 @@ protected void serviceStop() throws Exception { } super.serviceStop(); } - + @Override public RegisterApplicationMasterResponse registerApplicationMaster( String appHostName, int appHostPort, String appTrackingUrl) @@ -245,7 +245,7 @@ private RegisterApplicationMasterResponse registerApplicationMaster() } @Override - public AllocateResponse allocate(float progressIndicator) + public AllocateResponse allocate(float progressIndicator) throws YarnException, IOException { Preconditions.checkArgument(progressIndicator >= 0, "Progress indicator should not be negative"); @@ -261,12 +261,12 @@ public AllocateResponse allocate(float progressIndicator) synchronized (this) { askList = new ArrayList(ask.size()); for(ResourceRequest r : ask) { - // create a copy of ResourceRequest as we might change it while the + // create a copy of ResourceRequest as we might change it while the // RPC layer is using it to send info across askList.add(ResourceRequest.newInstance(r.getPriority(), r.getResourceName(), r.getCapability(), r.getNumContainers(), r.getRelaxLocality(), r.getNodeLabelExpression(), - r.getExecutionTypeRequest())); + r.getExecutionTypeRequest(), r.getAllocationTags())); } List increaseList = new ArrayList<>(); List decreaseList = new ArrayList<>(); @@ -295,11 +295,11 @@ public AllocateResponse allocate(float progressIndicator) blacklistToAdd.addAll(blacklistAdditions); blacklistToRemove.addAll(blacklistRemovals); - + ResourceBlacklistRequest blacklistRequest = ResourceBlacklistRequest.newInstance(blacklistToAdd, blacklistToRemove); - + allocateRequest = AllocateRequest.newInstance(lastResponseId, progressIndicator, askList, releaseList, blacklistRequest, @@ -479,7 +479,7 @@ public void unregisterApplicationMaster(FinalApplicationStatus appStatus, unregisterApplicationMaster(appStatus, appMessage, appTrackingUrl); } } - + @Override public synchronized void addContainerRequest(T req) { Preconditions.checkArgument(req != null, @@ -500,8 +500,8 @@ public synchronized void addContainerRequest(T req) { // priority checkLocalityRelaxationConflict(req.getPriority(), ANY_LIST, req.getRelaxLocality()); - // check that specific rack cannot be mixed with specific node within a - // priority. If node and its rack are both specified then they must be + // check that specific rack cannot be mixed with specific node within a + // priority. If node and its rack are both specified then they must be // in the same request. // For explicitly requested racks, we set locality relaxation to true checkLocalityRelaxationConflict(req.getPriority(), dedupedRacks, true); @@ -515,18 +515,18 @@ public synchronized void addContainerRequest(T req) { if(dedupedNodes.size() != req.getNodes().size()) { Joiner joiner = Joiner.on(','); LOG.warn("ContainerRequest has duplicate nodes: " - + joiner.join(req.getNodes())); + + joiner.join(req.getNodes())); } for (String node : dedupedNodes) { addResourceRequest(req.getPriority(), node, req.getExecutionTypeRequest(), req.getCapability(), req, true, - req.getNodeLabelExpression()); + req.getNodeLabelExpression(), req.getAllocationTags()); } } for (String rack : dedupedRacks) { addResourceRequest(req.getPriority(), rack, req.getExecutionTypeRequest(), - req.getCapability(), req, true, req.getNodeLabelExpression()); + req.getCapability(), req, true, req.getNodeLabelExpression(), req.getAllocationTags()); } // Ensure node requests are accompanied by requests for @@ -534,12 +534,12 @@ public synchronized void addContainerRequest(T req) { for (String rack : inferredRacks) { addResourceRequest(req.getPriority(), rack, req.getExecutionTypeRequest(), req.getCapability(), req, req.getRelaxLocality(), - req.getNodeLabelExpression()); + req.getNodeLabelExpression(), req.getAllocationTags()); } // Off-switch addResourceRequest(req.getPriority(), ResourceRequest.ANY, req.getExecutionTypeRequest(), req.getCapability(), req, - req.getRelaxLocality(), req.getNodeLabelExpression()); + req.getRelaxLocality(), req.getNodeLabelExpression(), req.getAllocationTags()); } @Override @@ -596,12 +596,12 @@ public synchronized void releaseAssignedContainer(ContainerId containerId) { release.add(containerId); pendingChange.remove(containerId); } - + @Override public synchronized Resource getAvailableResources() { return clusterAvailableResources; } - + @Override public synchronized int getClusterNodeCount() { return clusterNodeCount; @@ -639,11 +639,11 @@ public synchronized int getClusterNodeCount() { } } // no match found - return list; + return list; } - + private Set resolveRacks(List nodes) { - Set racks = new HashSet(); + Set racks = new HashSet(); if (nodes != null) { for (String node : nodes) { // Ensure node requests are accompanied by requests for @@ -656,7 +656,7 @@ public synchronized int getClusterNodeCount() { } } } - + return racks; } @@ -684,16 +684,16 @@ private void checkLocalityRelaxationConflict(Priority priority, } } } - + /** * Valid if a node label expression specified on container request is valid or * not - * + * * @param containerRequest */ private void checkNodeLabelExpression(T containerRequest) { String exp = containerRequest.getNodeLabelExpression(); - + if (null == exp || exp.isEmpty()) { return; } @@ -724,13 +724,13 @@ private void validateContainerResourceChangeRequest( private void addResourceRequestToAsk(ResourceRequest remoteRequest) { // This code looks weird but is needed because of the following scenario. - // A ResourceRequest is removed from the remoteRequestTable. A 0 container + // A ResourceRequest is removed from the remoteRequestTable. A 0 container // request is added to 'ask' to notify the RM about not needing it any more. - // Before the call to allocate, the user now requests more containers. If + // Before the call to allocate, the user now requests more containers. If // the locations of the 0 size request and the new request are the same // (with the difference being only container count), then the set comparator - // will consider both to be the same and not add the new request to ask. So - // we need to check for the "same" request being present and remove it and + // will consider both to be the same and not add the new request to ask. So + // we need to check for the "same" request being present and remove it and // then add it back. The comparator is container count agnostic. // This should happen only rarely but we do need to guard against it. if(ask.contains(remoteRequest)) { @@ -741,11 +741,11 @@ private void addResourceRequestToAsk(ResourceRequest remoteRequest) { private void addResourceRequest(Priority priority, String resourceName, ExecutionTypeRequest execTypeReq, Resource capability, T req, - boolean relaxLocality, String labelExpression) { + boolean relaxLocality, String labelExpression, Set allocationTags) { @SuppressWarnings("unchecked") ResourceRequestInfo resourceRequestInfo = remoteRequestsTable .addResourceRequest(priority, resourceName, - execTypeReq, capability, req, relaxLocality, labelExpression); + execTypeReq, capability, req, relaxLocality, labelExpression, allocationTags); // Note this down for next interaction with ResourceManager addResourceRequestToAsk(resourceRequestInfo.remoteRequest); @@ -754,7 +754,7 @@ private void addResourceRequest(Priority priority, String resourceName, LOG.debug("addResourceRequest:" + " applicationId=" + " priority=" + priority.getPriority() + " resourceName=" + resourceName + " numContainers=" - + resourceRequestInfo.remoteRequest.getNumContainers() + + resourceRequestInfo.remoteRequest.getNumContainers() + " #asks=" + ask.size()); } } @@ -790,15 +790,15 @@ private void decResourceRequest(Priority priority, String resourceName, @Override public synchronized void updateBlacklist(List blacklistAdditions, List blacklistRemovals) { - + if (blacklistAdditions != null) { this.blacklistAdditions.addAll(blacklistAdditions); this.blacklistedNodes.addAll(blacklistAdditions); - // if some resources are also in blacklistRemovals updated before, we + // if some resources are also in blacklistRemovals updated before, we // should remove them here. this.blacklistRemovals.removeAll(blacklistAdditions); } - + if (blacklistRemovals != null) { this.blacklistRemovals.addAll(blacklistRemovals); this.blacklistedNodes.removeAll(blacklistRemovals); @@ -806,7 +806,7 @@ public synchronized void updateBlacklist(List blacklistAdditions, // them here. this.blacklistAdditions.removeAll(blacklistRemovals); } - + if (blacklistAdditions != null && blacklistRemovals != null && blacklistAdditions.removeAll(blacklistRemovals)) { // we allow resources to appear in addition list and removal list in the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java index 853a512..ced2737 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java @@ -18,21 +18,21 @@ package org.apache.hadoop.yarn.client.api.impl; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.yarn.api.records.ExecutionType; -import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; -import org.apache.hadoop.yarn.api.records.Priority; -import org.apache.hadoop.yarn.api.records.Resource; - import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeMap; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.api.records.ExecutionType; +import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl.ResourceRequestInfo; import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl.ResourceReverseMemoryThenCpuComparator; @@ -266,7 +266,7 @@ ResourceRequestInfo remove(Priority priority, String resourceName, @SuppressWarnings("unchecked") ResourceRequestInfo addResourceRequest(Priority priority, String resourceName, ExecutionTypeRequest execTypeReq, Resource capability, T req, - boolean relaxLocality, String labelExpression) { + boolean relaxLocality, String labelExpression, Set allocationTags) { ResourceRequestInfo resourceRequestInfo = get(priority, resourceName, execTypeReq.getExecutionType(), capability); if (resourceRequestInfo == null) { @@ -287,6 +287,10 @@ ResourceRequestInfo addResourceRequest(Priority priority, String resourceName, if (ResourceRequest.ANY.equals(resourceName)) { resourceRequestInfo.remoteRequest.setNodeLabelExpression(labelExpression); } + + if (allocationTags != null) + resourceRequestInfo.remoteRequest.setAllocationTags(allocationTags); + return resourceRequestInfo; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java index 67e3a84..069b458 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java @@ -26,6 +26,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ConstraintDefinition; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.api.records.Priority; @@ -36,6 +37,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ConstraintDefinitionProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto; import org.apache.hadoop.yarn.proto.YarnProtos.LogAggregationContextProto; import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto; @@ -63,6 +65,7 @@ private ResourceRequest amResourceRequest = null; private LogAggregationContext logAggregationContext = null; private ReservationId reservationId = null; + private ConstraintDefinition constraintDefinition = null; public ApplicationSubmissionContextPBImpl() { builder = ApplicationSubmissionContextProto.newBuilder(); @@ -131,6 +134,9 @@ private void mergeLocalToBuilder() { if (this.reservationId != null) { builder.setReservationId(convertToProtoFormat(this.reservationId)); } + if ( this.constraintDefinition != null ){ + builder.setConstraintDefinition(convertToProtoFormat(this.constraintDefinition)); + } } private void mergeLocalToProto() { @@ -541,6 +547,28 @@ public void setLogAggregationContext( this.logAggregationContext = logAggregationContext; } + @Override + public ConstraintDefinition getConstraintDefinition() { + ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; + if( this.constraintDefinition != null) + return this.constraintDefinition; + if( !p.hasConstraintDefinition() ) + return null; + this.constraintDefinition = convertFromProtoFormat(p.getConstraintDefinition()); + return this.constraintDefinition; + } + + @Override + public void setConstraintDefinition(ConstraintDefinition cd) { + maybeInitBuilder(); + if(cd == null) { + builder.clearConstraintDefinition(); + } + this.constraintDefinition = cd; + } + + + private ReservationIdPBImpl convertFromProtoFormat(ReservationIdProto p) { return new ReservationIdPBImpl(p); } @@ -548,4 +576,9 @@ private ReservationIdPBImpl convertFromProtoFormat(ReservationIdProto p) { private ReservationIdProto convertToProtoFormat(ReservationId t) { return ((ReservationIdPBImpl) t).getProto(); } -} + + public ConstraintDefinitionPBImpl convertFromProtoFormat(ConstraintDefinitionProto cdp){ return new ConstraintDefinitionPBImpl(cdp); } + + public ConstraintDefinitionProto convertToProtoFormat(ConstraintDefinition cd){ return ((ConstraintDefinitionPBImpl)cd).getProto(); } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ConstraintDefinitionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ConstraintDefinitionPBImpl.java new file mode 100644 index 0000000..eb42255 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ConstraintDefinitionPBImpl.java @@ -0,0 +1,185 @@ +package org.apache.hadoop.yarn.api.records.impl.pb; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.yarn.api.records.ConstraintDefinition; +import org.apache.hadoop.yarn.api.records.PlacementConstraintsExpression; +import org.apache.hadoop.yarn.proto.YarnProtos.ConstraintDefinitionProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ConstraintDefinitionProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintsExpressionProto; + +/** + * Created by pgaref on 7/7/16. + */ +public class ConstraintDefinitionPBImpl extends ConstraintDefinition { + + ConstraintDefinitionProto proto = + ConstraintDefinitionProto.getDefaultInstance(); + ConstraintDefinitionProto.Builder builder = null; + boolean viaProto = false; + + List placementConstraintsExpressions = null; + + public ConstraintDefinitionPBImpl() { + builder = ConstraintDefinitionProto.newBuilder(); + } + + public ConstraintDefinitionPBImpl(ConstraintDefinitionProto proto) { + this.proto = proto; + viaProto = true; + } + + public ConstraintDefinitionProto getProto() { + mergeLocalToProto(); + proto = viaProto ? this.proto : this.builder.build(); + viaProto = true; + return proto; + } + + public void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ConstraintDefinitionProto.newBuilder(proto); + } + viaProto = false; + } + + public void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public void mergeLocalToBuilder() { + if (this.placementConstraintsExpressions != null) + addPlacementConstraintExpressionToProto(); + } + + @Override + public long getTimestamp() { + ConstraintDefinitionProtoOrBuilder protoOrBuilder = + viaProto ? this.proto : this.builder; + if (!protoOrBuilder.hasConstraintDefinitionTimestamp()) + return 0; + return protoOrBuilder.getConstraintDefinitionTimestamp(); + } + + @Override + public void setTimestamp(long ts) { + maybeInitBuilder(); + if (ts <= 0) { + builder.clearConstraintDefinitionTimestamp(); + return; + } + builder.setConstraintDefinitionTimestamp(ts); + } + + @Override + public List getPlacementConstraintsExpressions() { + initPlacementConstraintsExpressionList(); + return this.placementConstraintsExpressions; + } + + @Override + public void setPlacementConstraintsExpressions( + List pce) { + if (pce == null) { + builder.clearPlacementConstraintsExpression(); + return; + } + this.placementConstraintsExpressions = pce; + + } + + public void initPlacementConstraintsExpressionList() { + if (this.placementConstraintsExpressions != null) + return; + ConstraintDefinitionProtoOrBuilder protoOrBuilder = + viaProto ? this.proto : this.builder; + List constraintsExpressions = + protoOrBuilder.getPlacementConstraintsExpressionList(); + this.placementConstraintsExpressions = + new ArrayList(); + + for (PlacementConstraintsExpressionProto tmp : constraintsExpressions) + this.placementConstraintsExpressions.add(convertFromProtoFormat(tmp)); + + } + + public void addPlacementConstraintExpressionToProto() { + maybeInitBuilder(); + builder.clearPlacementConstraintsExpression(); + if (this.placementConstraintsExpressions == null) + return; + Iterable iterable = + new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + Iterator iter = + placementConstraintsExpressions.iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public PlacementConstraintsExpressionProto next() { + return convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + }; + this.builder.addAllPlacementConstraintsExpression(iterable); + + } + + @Override + public String toString() { + return "{ Timestamp: " + getTimestamp() + ", ConstrainExpressions: " + + getPlacementConstraintsExpressions() + " }"; + } + + /* + * Util Functions! + */ + @Override + public int compareTo(ConstraintDefinition o) { + return (int) (getTimestamp() - o.getTimestamp()); + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + public PlacementConstraintsExpression convertFromProtoFormat( + PlacementConstraintsExpressionProto proto) { + return new PlacementConstraintsExpressionPBImpl(proto); + } + + public PlacementConstraintsExpressionProto convertToProtoFormat( + PlacementConstraintsExpression cd) { + return ((PlacementConstraintsExpressionPBImpl) cd).getProto(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PlacementConstraintPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PlacementConstraintPBImpl.java new file mode 100644 index 0000000..977a4c3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PlacementConstraintPBImpl.java @@ -0,0 +1,139 @@ +package org.apache.hadoop.yarn.api.records.impl.pb; + +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.PlacementConstraintScope; +import org.apache.hadoop.yarn.api.records.PlacementConstraintType; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintScopeProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTypeProto; +/** + * Created by pgaref on 7/7/16. + */ +public class PlacementConstraintPBImpl extends PlacementConstraint{ + + PlacementConstraintProto proto = PlacementConstraintProto.getDefaultInstance(); + PlacementConstraintProto.Builder builder = null; + boolean viaProto = false; + + + public PlacementConstraintPBImpl(){ + builder = PlacementConstraintProto.newBuilder(); + } + + public PlacementConstraintPBImpl(PlacementConstraintProto proto ){ + this.proto = proto; + this.viaProto = true; + } + + public PlacementConstraintProto getProto(){ + proto = this.viaProto ? this.proto : this.builder.build(); + viaProto = true; + return proto; + } + + private void maybeInitBuilder() { + if ( viaProto || builder == null ) { + builder = PlacementConstraintProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public void setSource(String source) { + maybeInitBuilder(); + if( source == null ){ + builder.clearSource(); + return; + } + builder.setSource(source); + } + + @Override + public String getSource() { + PlacementConstraintProtoOrBuilder pcb = this.viaProto ? this.proto : this.builder; + return pcb.getSource(); + } + + @Override + public void setTarget(String target) { + maybeInitBuilder(); + if( target == null ){ + builder.clearTarget(); + return; + } + builder.setTarget(target); + } + + @Override + public String getTarget() { + PlacementConstraintProtoOrBuilder pcb = this.viaProto ? this.proto : this.builder; + return pcb.getTarget(); + } + + @Override + public void setScope(PlacementConstraintScope pcs) { + maybeInitBuilder(); + if( pcs == null ){ + builder.clearScope(); + return; + } + builder.setScope(convertToProtoFormat(pcs)); + } + + @Override + public PlacementConstraintScope getScope() { + PlacementConstraintProtoOrBuilder pcb = this.viaProto ? this.proto : this.builder; + if( !pcb.hasScope() ) + return null; + return convertFromProtoFormat(pcb.getScope()); + } + + @Override + public void setType(PlacementConstraintType pct) { + maybeInitBuilder(); + if( pct == null ){ + builder.clearType(); + return; + } + builder.setType(convertToProtoFormat(pct)); + + } + + @Override + public PlacementConstraintType getType() { + PlacementConstraintProtoOrBuilder pcb = this.viaProto ? this.proto : this.builder; + if( !pcb.hasType() ) + return null; + return convertFromProtoFormat(pcb.getType()); + } + + @Override + public String toString() { + return "{ Source: " +this.getSource() + + ", Target: "+ this.getTarget() + + ", Scope: "+ this.getScope().name() + + ", Type: "+this.getType().name()+ + " }"; + } + + /* + * Util Functions + */ + public PlacementConstraintScopeProto convertToProtoFormat(PlacementConstraintScope pcs){ + return ProtoUtils.convertToProtoFormat(pcs); + } + + public PlacementConstraintScope convertFromProtoFormat(PlacementConstraintScopeProto pcsp){ + return ProtoUtils.convertFromProtoFormat(pcsp); + } + + public PlacementConstraintTypeProto convertToProtoFormat(PlacementConstraintType pct){ + return ProtoUtils.convertToProtoFormat(pct); + } + + public PlacementConstraintType convertFromProtoFormat(PlacementConstraintTypeProto pctp){ + return ProtoUtils.convertFromProtoFormat(pctp); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PlacementConstraintsExpressionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PlacementConstraintsExpressionPBImpl.java new file mode 100644 index 0000000..5081328 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PlacementConstraintsExpressionPBImpl.java @@ -0,0 +1,156 @@ +package org.apache.hadoop.yarn.api.records.impl.pb; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.PlacementConstraintsExpression; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintsExpressionProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintsExpressionProtoOrBuilder; + +/** + * Created by pgaref on 7/7/16. + */ +public class PlacementConstraintsExpressionPBImpl + extends PlacementConstraintsExpression { + + PlacementConstraintsExpressionProto proto = + PlacementConstraintsExpressionProto.getDefaultInstance(); + PlacementConstraintsExpressionProto.Builder builder = null; + boolean viaProto = false; + + List placementConstraints; + + public PlacementConstraintsExpressionPBImpl() { + this.builder = PlacementConstraintsExpressionProto.newBuilder(); + } + + public PlacementConstraintsExpressionPBImpl( + PlacementConstraintsExpressionProto proto) { + this.proto = proto; + viaProto = true; + } + + public PlacementConstraintsExpressionProto getProto() { + mergeLocalToProto(); + proto = viaProto ? this.proto : this.builder.build(); + viaProto = true; + return proto; + } + + public void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public void mergeLocalToBuilder() { + if (this.placementConstraints != null) + addPlacementConstraintsToProto(); + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = PlacementConstraintsExpressionProto.newBuilder(proto); + } + viaProto = false; + } + + public void initPlacementConstraintsList() { + if (placementConstraints != null) + return; + PlacementConstraintsExpressionProtoOrBuilder pcepb = + viaProto ? this.proto : this.builder; + List constraintList = + pcepb.getPlacementConstraintsList(); + this.placementConstraints = new ArrayList(); + for (PlacementConstraintProto constraint : constraintList) + this.placementConstraints.add(convertFromProtoFormat(constraint)); + + } + + @Override + public List getPlacementConstraints() { + initPlacementConstraintsList(); + return this.placementConstraints; + } + + @Override + public void setPlacementConstraints(List lpc) { + if (lpc == null) { + this.builder.clearPlacementConstraints(); + return; + } + this.placementConstraints = lpc; + } + + private void addPlacementConstraintsToProto() { + maybeInitBuilder(); + builder.clearPlacementConstraints(); + if (this.placementConstraints == null) + return; + Iterable iterable = + new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + Iterator iter = + placementConstraints.iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public PlacementConstraintProto next() { + return convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + }; + this.builder.addAllPlacementConstraints(iterable); + } + + @Override + public String toString() { + return "{ PlacementConstraints: " + getPlacementConstraints() + "}"; + } + + /* + * Conversion Utils + */ + public PlacementConstraint convertFromProtoFormat( + PlacementConstraintProto pcp) { + return new PlacementConstraintPBImpl(pcp); + } + + public PlacementConstraintProto convertToProtoFormat(PlacementConstraint pc) { + return ((PlacementConstraintPBImpl) pc).getProto(); + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java index 4b62358..1bd670a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java @@ -29,14 +29,16 @@ import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy; import org.apache.hadoop.yarn.api.records.ContainerState; -import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.api.records.ExecutionType; +import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.LogAggregationStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.PlacementConstraintScope; +import org.apache.hadoop.yarn.api.records.PlacementConstraintType; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter; @@ -44,26 +46,28 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.proto.YarnProtos; +import org.apache.hadoop.yarn.proto.YarnServiceProtos; import org.apache.hadoop.yarn.proto.YarnProtos.AMCommandProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAccessTypeProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerRetryPolicyProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerTypeProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeRequestProto; import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceTypeProto; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceVisibilityProto; import org.apache.hadoop.yarn.proto.YarnProtos.LogAggregationStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeStateProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintScopeProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTypeProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueACLProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto; import org.apache.hadoop.yarn.proto.YarnProtos.ReservationRequestInterpreterProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ContainerRetryPolicyProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ContainerTypeProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos; import org.apache.hadoop.yarn.server.api.ContainerType; import com.google.protobuf.ByteString; @@ -252,6 +256,32 @@ public static ApplicationAccessType convertFromProtoFormat( return ApplicationAccessType.valueOf(e.name().replace( APP_ACCESS_TYPE_PREFIX, "")); } + + /* + * PlacementConstraint Request Scope + */ + public static PlacementConstraintScopeProto convertToProtoFormat( + PlacementConstraintScope pcs) { + return PlacementConstraintScopeProto.valueOf(pcs.name()); + } + + public static PlacementConstraintScope convertFromProtoFormat( + PlacementConstraintScopeProto pcsp) { + return PlacementConstraintScope.valueOf(pcsp.name()); + } + + /* + * PlacementConstraint Request Type + */ + public static PlacementConstraintTypeProto convertToProtoFormat( + PlacementConstraintType pct) { + return PlacementConstraintTypeProto.valueOf(pct.name()); + } + + public static PlacementConstraintType convertFromProtoFormat( + PlacementConstraintTypeProto pctp) { + return PlacementConstraintType.valueOf(pctp.name()); + } /* * Reservation Request interpreter type diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java index 9890296..0949583 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java @@ -19,8 +19,12 @@ package org.apache.hadoop.yarn.api.records.impl.pb; +import java.util.HashSet; +import java.util.Set; + import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ConstraintDefinition; import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; @@ -30,6 +34,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProtoOrBuilder; + @Private @Unstable public class ResourceRequestPBImpl extends ResourceRequest { @@ -40,6 +45,7 @@ private Priority priority = null; private Resource capability = null; private ExecutionTypeRequest executionTypeRequest = null; + private Set allocationTags = null; public ResourceRequestPBImpl() { @@ -69,6 +75,10 @@ private void mergeLocalToBuilder() { builder.setExecutionTypeRequest( ProtoUtils.convertToProtoFormat(this.executionTypeRequest)); } + if (this.allocationTags != null) { + builder.clearAllocationTags(); + builder.addAllAllocationTags(this.allocationTags); + } } private void mergeLocalToProto() { @@ -229,7 +239,9 @@ public String toString() { + ", Location: " + getResourceName() + ", Relax Locality: " + getRelaxLocality() + ", Execution Type Request: " + getExecutionTypeRequest() - + ", Node Label Expression: " + getNodeLabelExpression() + "}"; + + ", Node Label Expression: " + getNodeLabelExpression() + + ", Allocation Tags: " + getAllocationTags() + + "}"; } @Override @@ -250,4 +262,26 @@ public void setNodeLabelExpression(String nodeLabelExpression) { } builder.setNodeLabelExpression(nodeLabelExpression); } + + private void initAllocationTags(){ + if(this.allocationTags != null) + return; + ResourceRequestProtoOrBuilder p = viaProto ? proto : builder; + this.allocationTags = new HashSet(); + this.allocationTags.addAll(p.getAllocationTagsList()); + } + + @Override + public Set getAllocationTags() { + initAllocationTags(); + return this.allocationTags; + } + + @Override + public void setAllocationTags(Set tags) { + maybeInitBuilder(); + builder.clearAllocationTags(); + this.allocationTags = tags; + + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java index 1a83632..53591cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java @@ -86,6 +86,11 @@ "Node-label-based scheduling is disabled. Please check " + YarnConfiguration.NODE_LABELS_ENABLED; + @VisibleForTesting + public static final String TAG_LABELS_NOT_ENABLED_ERR = + "Dynamic-tag-label-based scheduling is disabled. Please check " + + YarnConfiguration.TAG_LABELS_ENABLED; + /** * If a user doesn't specify label of a queue or node, it belongs * DEFAULT_LABEL @@ -99,11 +104,18 @@ protected ConcurrentMap nodeCollections = new ConcurrentHashMap(); + // Dynamic Tags used for services! + protected ConcurrentMap tagsCollections = + new ConcurrentHashMap(); + protected ConcurrentHashMap> containeriIdsToTags = + new ConcurrentHashMap>(); + protected final ReadLock readLock; protected final WriteLock writeLock; protected NodeLabelsStore store; private boolean nodeLabelsEnabled = false; + private boolean tagLabelsEnabled = false; private boolean isCentralizedNodeLabelConfiguration = true; @@ -221,11 +233,13 @@ protected void initDispatcher(Configuration conf) { protected void serviceInit(Configuration conf) throws Exception { // set if node labels enabled nodeLabelsEnabled = YarnConfiguration.areNodeLabelsEnabled(conf); + tagLabelsEnabled = YarnConfiguration.areTagLabelsEnabled(conf); isCentralizedNodeLabelConfiguration = YarnConfiguration.isCentralizedNodeLabelConfiguration(conf); labelCollections.put(NO_LABEL, new RMNodeLabel(NO_LABEL)); + tagsCollections.put(NO_LABEL, new RMNodeLabel(NO_LABEL)); } /** @@ -404,6 +418,57 @@ public void addLabelsToNode(Map> addedLabelsToNode) checkAddLabelsToNode(addedLabelsToNode); internalUpdateLabelsOnNodes(addedLabelsToNode, NodeLabelUpdateOperation.ADD); } + + public void addApplicationTags(String appId, NodeId nodeId, Long containerId, + Resource rs, Set tags) throws IOException { + if (!tagLabelsEnabled) { + return; + } + if (tags == null || appId == null) { + throw new IOException( + "Failed to insert Null tag for Node= " + nodeId + "."); + } + createHostIfNonExisted(nodeId.getHost()); + + Set allTags = new HashSet<>(); + for (String tag : tags) { + allTags.add(appId + "." + tag); + } + allTags.add(appId); + + for (String tag : allTags) { + RMNodeLabel foundTag = tagsCollections.get(tag); + if (foundTag == null) { + tagsCollections.put(tag, new RMNodeLabel(tag)); + foundTag = tagsCollections.get(tag); + } + foundTag.addNodeId(nodeId); + foundTag.addNode(rs); + } + // Keeping track of containers for efficient tag removal + containeriIdsToTags.put(containerId, allTags); + + } + + public void removeApplicationTags(String appId, NodeId nodeId, + Long containerId, Resource rs) throws IOException { + if (!tagLabelsEnabled) { + return; + } + if ((containeriIdsToTags.get(containerId) == null)) { + throw new IOException("Failed to remove tags for Application=" + appId + + " using container=" + containerId + " on Node= " + nodeId + "."); + } + + Set containerTags = containeriIdsToTags.get(containerId); + for (String tag : containerTags) { + tagsCollections.get(tag).removeNodeId(nodeId); + tagsCollections.get(tag).removeNode(rs); + // if(tagsCollections.get(tag).getNumActiveNMs() == 0) + // tagsCollections.remove(tag); + } + containeriIdsToTags.remove(containerId); + } protected void checkRemoveFromClusterNodeLabels( Collection labelsToRemove) throws IOException { @@ -801,6 +866,45 @@ public void replaceLabelsOnNode(Map> replaceLabelsToNode) } } + public Map> getTagsToNodes(Set tags) { + try { + readLock.lock(); + Map> toReturn = new HashMap>(); + for (String tag : tags) { + if (tag.equals(NO_LABEL)) + continue; + RMNodeLabel currentLabel = this.tagsCollections.get(tag); + if (currentLabel != null) { + Set labelNodeIDs = currentLabel.getAssociatedNodeIds(); + if (!labelNodeIDs.isEmpty()) { + toReturn.put(tag, labelNodeIDs); + } else { + LOG.warn("getTagsToNodes : Tag [ " + tag + " ] cannot be found!"); + } + } + } + return toReturn; + } finally { + readLock.unlock(); + } + } + + /** + * Get existing Service Tags as Immutable Set + * + * @return + */ + public Set getServiceTags() { + try { + readLock.lock(); + Set tags = new HashSet(this.tagsCollections.keySet()); + tags.remove(NO_LABEL); + return Collections.unmodifiableSet(tags); + } finally { + readLock.unlock(); + } + } + /** * Get mapping of labels to nodes for all the labels. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index 538b25a..2ed1a9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.yarn.api; + import java.io.IOException; import java.lang.reflect.Array; import java.lang.reflect.Constructor; @@ -29,9 +30,9 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Random; import java.util.Set; +import java.util.Map.Entry; import org.apache.commons.lang.math.LongRange; import org.apache.commons.logging.Log; @@ -43,9 +44,9 @@ import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; -import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse; +import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl; @@ -84,6 +85,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.MoveApplicationAcrossQueuesRequestPBImpl; @@ -105,8 +108,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersResponsePBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceRequestPBImpl; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -115,6 +116,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ConstraintDefinition; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -129,6 +131,8 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.PlacementConstraintsExpression; import org.apache.hadoop.yarn.api.records.PreemptionContainer; import org.apache.hadoop.yarn.api.records.PreemptionContract; import org.apache.hadoop.yarn.api.records.PreemptionMessage; @@ -143,8 +147,8 @@ import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.ReservationRequest; import org.apache.hadoop.yarn.api.records.ReservationRequests; -import org.apache.hadoop.yarn.api.records.ResourceAllocationRequest; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceAllocationRequest; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.api.records.ResourceRequest; @@ -160,6 +164,7 @@ import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationResourceUsageReportPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ConstraintDefinitionPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl; @@ -173,6 +178,8 @@ import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.PlacementConstraintPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.PlacementConstraintsExpressionPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContainerPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContractPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionMessagePBImpl; @@ -195,6 +202,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ConstraintDefinitionProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto; @@ -207,6 +215,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintsExpressionProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContractProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionMessageProto; @@ -280,6 +290,8 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.IncreaseContainersResourceRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.IncreaseContainersResourceResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.MoveApplicationAcrossQueuesRequestProto; @@ -295,8 +307,6 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationSubmissionResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationUpdateRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationUpdateResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.IncreaseContainersResourceRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.IncreaseContainersResourceResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersResponseProto; @@ -477,6 +487,10 @@ public static void setup() throws Exception { generateByNewInstance(NodeReport.class); generateByNewInstance(Token.class); generateByNewInstance(NMToken.class); + // for Placement Constraints + generateByNewInstance(PlacementConstraint.class); + generateByNewInstance(PlacementConstraintsExpression.class); + generateByNewInstance(ConstraintDefinition.class); generateByNewInstance(ResourceRequest.class); generateByNewInstance(ApplicationAttemptReport.class); generateByNewInstance(ApplicationResourceUsageReport.class); @@ -960,7 +974,7 @@ public void testApplicationSubmissionContextPBImpl() throws Exception { ApplicationSubmissionContext ctx = ApplicationSubmissionContext.newInstance(null, null, null, null, null, - false, false, 0, Resources.none(), null, false, null, null); + false, false, 0, Resources.none(), null, false, null, null, null); Assert.assertNotNull(ctx.getResource()); } @@ -1217,6 +1231,24 @@ public void testUpdateNodeResourceResponsePBImpl() throws Exception { } @Test + public void testPlacementContraintPBImpl() throws Exception { + validatePBImplRecord(PlacementConstraintPBImpl.class, + PlacementConstraintProto.class); + } + + @Test + public void testPlacementConstraintExpressionPBImpl() throws Exception { + validatePBImplRecord(PlacementConstraintsExpressionPBImpl.class, + PlacementConstraintsExpressionProto.class); + } + + @Test + public void testConstraintDefinitionPBImpl() throws Exception { + validatePBImplRecord(ConstraintDefinitionPBImpl.class, + ConstraintDefinitionProto.class); + } + + @Test public void testReservationSubmissionRequestPBImpl() throws Exception { validatePBImplRecord(ReservationSubmissionRequestPBImpl.class, ReservationSubmissionRequestProto.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index f575961..40d8d9e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -446,7 +446,6 @@ public AllocateResponse allocate(AllocateRequest request) this.rmContext.getDispatcher().getEventHandler().handle( new RMAppAttemptStatusupdateEvent(appAttemptId, request .getProgress())); - List ask = request.getAskList(); List release = request.getReleaseList(); @@ -685,12 +684,17 @@ public void registerAppAttempt(ApplicationAttemptId attemptId) { LOG.info("Registering app attempt : " + attemptId); responseMap.put(attemptId, new AllocateResponseLock(response)); rmContext.getNMTokenSecretManager().registerApplicationAttempt(attemptId); + + RMApp app = rmContext.getRMApps().get(attemptId.getApplicationId()); + rmContext.getRMConstraintsManager().registerApplicationAttempt(attemptId, app.getApplicationSubmissionContext().getConstraintDefinition()); } public void unregisterAttempt(ApplicationAttemptId attemptId) { LOG.info("Unregistering app attempt : " + attemptId); responseMap.remove(attemptId); rmContext.getNMTokenSecretManager().unregisterApplicationAttempt(attemptId); + + rmContext.getRMConstraintsManager().unregisterApplicationAttempt(attemptId); } public void refreshServiceAcls(Configuration configuration, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java index caa0ff13..a05635f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java @@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.constraints.RMConstraintsManager; import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; @@ -97,6 +98,7 @@ private RMNodeLabelsManager nodeLabelManager; private RMDelegatedNodeLabelsUpdater rmDelegatedNodeLabelsUpdater; + private RMConstraintsManager rmConstraintsManager; private long epoch; private Clock systemClock = SystemClock.getInstance(); private long schedulerRecoveryStartTime = 0; @@ -120,7 +122,8 @@ public RMActiveServiceContext(Dispatcher rmDispatcher, RMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInRM nmTokenSecretManager, ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager, - ResourceScheduler scheduler) { + ResourceScheduler scheduler, + RMConstraintsManager constraintsManager) { this(); this.setContainerAllocationExpirer(containerAllocationExpirer); this.setAMLivelinessMonitor(amLivelinessMonitor); @@ -131,6 +134,7 @@ public RMActiveServiceContext(Dispatcher rmDispatcher, this.setNMTokenSecretManager(nmTokenSecretManager); this.setClientToAMTokenSecretManager(clientToAMTokenSecretManager); this.setScheduler(scheduler); + this.setRMConstraintsManager(constraintsManager); RMStateStore nullStore = new NullRMStateStore(); nullStore.setRMDispatcher(rmDispatcher); @@ -467,4 +471,12 @@ public PlacementManager getQueuePlacementManager() { public void setQueuePlacementManager(PlacementManager placementMgr) { this.queuePlacementManager = placementMgr; } + + @Private + @Unstable + public RMConstraintsManager getRMConstraintsManager(){ return this.rmConstraintsManager; } + + @Private + @Unstable + public void setRMConstraintsManager(RMConstraintsManager manager){ this.rmConstraintsManager = manager; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java index 2ba445c..5cfbd96 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java @@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.constraints.RMConstraintsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator; import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; @@ -139,6 +140,10 @@ void setRMDelegatedNodeLabelsUpdater( boolean isSchedulerReadyForAllocatingContainers(); Configuration getYarnConfiguration(); + + void setRMConstraintsManager(RMConstraintsManager rmConstraintsManager); + + RMConstraintsManager getRMConstraintsManager(); PlacementManager getQueuePlacementManager(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index 1e702de..97ec547 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.constraints.RMConstraintsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator; import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; @@ -89,6 +90,31 @@ public RMContextImpl() { @VisibleForTesting // helper constructor for tests public RMContextImpl(Dispatcher rmDispatcher, + ContainerAllocationExpirer containerAllocationExpirer, + AMLivelinessMonitor amLivelinessMonitor, + AMLivelinessMonitor amFinishingMonitor, + DelegationTokenRenewer delegationTokenRenewer, + AMRMTokenSecretManager appTokenSecretManager, + RMContainerTokenSecretManager containerTokenSecretManager, + NMTokenSecretManagerInRM nmTokenSecretManager, + ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager, + ResourceScheduler scheduler) { + this(); + this.setDispatcher(rmDispatcher); + setActiveServiceContext(new RMActiveServiceContext(rmDispatcher, + containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, + delegationTokenRenewer, appTokenSecretManager, + containerTokenSecretManager, nmTokenSecretManager, + clientToAMTokenSecretManager, + scheduler, null)); + + ConfigurationProvider provider = new LocalConfigurationProvider(); + setConfigurationProvider(provider); + } + + @VisibleForTesting + // helper constructor for tests + public RMContextImpl(Dispatcher rmDispatcher, ContainerAllocationExpirer containerAllocationExpirer, AMLivelinessMonitor amLivelinessMonitor, AMLivelinessMonitor amFinishingMonitor, @@ -97,7 +123,8 @@ public RMContextImpl(Dispatcher rmDispatcher, RMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInRM nmTokenSecretManager, ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager, - ResourceScheduler scheduler) { + ResourceScheduler scheduler, + RMConstraintsManager constraintsManager) { this(); this.setDispatcher(rmDispatcher); setActiveServiceContext(new RMActiveServiceContext(rmDispatcher, @@ -105,7 +132,8 @@ public RMContextImpl(Dispatcher rmDispatcher, delegationTokenRenewer, appTokenSecretManager, containerTokenSecretManager, nmTokenSecretManager, clientToAMTokenSecretManager, - scheduler)); + scheduler, + constraintsManager)); ConfigurationProvider provider = new LocalConfigurationProvider(); setConfigurationProvider(provider); @@ -131,7 +159,7 @@ public RMContextImpl(Dispatcher rmDispatcher, appTokenSecretManager, containerTokenSecretManager, nmTokenSecretManager, - clientToAMTokenSecretManager, null); + clientToAMTokenSecretManager, null, null); } @Override @@ -499,4 +527,14 @@ public void setContainerQueueLimitCalculator( QueueLimitCalculator limitCalculator) { this.queueLimitCalculator = limitCalculator; } + + @Override + public void setRMConstraintsManager(RMConstraintsManager rmConstraintsManager) { + this.activeServiceContext.setRMConstraintsManager(rmConstraintsManager); + } + + @Override + public RMConstraintsManager getRMConstraintsManager() { + return activeServiceContext.getRMConstraintsManager(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 4509045..9fb9d21 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -103,6 +103,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.constraints.RMConstraintsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; @@ -569,6 +570,14 @@ protected void serviceInit(Configuration configuration) throws Exception { rmContext.setRMDelegatedNodeLabelsUpdater(delegatedNodeLabelsUpdater); } + /** + * Initializing Constraint Manager - Used for better Long-Running Services + * Scheduling + */ + RMConstraintsManager rmConstraintsManager = + new RMConstraintsManager(rmContext); + rmContext.setRMConstraintsManager(rmConstraintsManager); + recoveryEnabled = conf.getBoolean(YarnConfiguration.RECOVERY_ENABLED, YarnConfiguration.DEFAULT_RM_RECOVERY_ENABLED); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java index 5dc8392..d0a7c7d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/RMNodeLabelsManager.java @@ -88,6 +88,29 @@ public void addLabelsToNode(Map> addedLabelsToNode) } } + @Override + public void addApplicationTags(String appId, NodeId nodeId, Long containerId, + Resource rs, Set tags) throws IOException { + try { + writeLock.lock(); + super.addApplicationTags(appId, nodeId, containerId, rs, tags); + } finally { + writeLock.unlock(); + } + } + + public void removeApplicationTags(String appId, NodeId nodeId, + Long containerId, Resource rs) throws IOException { + try { + writeLock.lock(); + super.removeApplicationTags(appId, nodeId, containerId, rs); + } finally { + writeLock.unlock(); + } + } + + + protected void checkRemoveFromClusterNodeLabelsOfQueue( Collection labelsToRemove) throws IOException { // Check if label to remove doesn't existed or null/empty, will throw @@ -555,4 +578,36 @@ public void setRMContext(RMContext rmContext) { readLock.unlock(); } } + + public Map getRMNodeLabels(){ + try{ + readLock.lock(); + return tagsCollections; + } finally { + readLock.unlock(); + } + } + + public RMNodeLabel getRMNodeAppTag(String name){ + try{ + readLock.lock(); + return tagsCollections.get(name); + } finally { + readLock.unlock(); + } + } + + public List pullRMNodeTagsInfo(){ + try{ + readLock.lock(); + List infos = new ArrayList<>(); + for (Entry entry : tagsCollections.entrySet()){ + infos.add(entry.getValue().getCopy()); + } + return infos; + } finally { + readLock.unlock(); + } + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/AffinityConstraintHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/AffinityConstraintHandler.java new file mode 100644 index 0000000..08461ef --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/AffinityConstraintHandler.java @@ -0,0 +1,55 @@ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.constraints; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.nodelabels.RMNodeLabel; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; + +/** + * Created by pgaref on 7/14/16. + */ +public class AffinityConstraintHandler extends RMConstaintsHandler { + + private static Log LOG = + LogFactory.getLog(AffinityConstraintHandler.class.getCanonicalName()); + + public AffinityConstraintHandler(RMContext rmContext, + PlacementConstraint placementConstraint, + ResourceRequest resourceRequest) { + super(rmContext, placementConstraint, resourceRequest); + } + + @Override + public boolean canAssign(ApplicationAttemptId applicationAttemptId, + SchedulerNode node) { + + Set myTags = this.currentResourceRequest.getAllocationTags(); + // Check if my tags is part of this placement constraint SOURCE + List myTagsList = new ArrayList<>(myTags); + int indexOfSourceTags = + myTagsList.indexOf(this.placementConstraint.getSource()); + + if (indexOfSourceTags == -1) { + LOG.info("PANOS: Source Tag " + this.placementConstraint.getSource() + + " Not found!!"); + return true; + } + + String target = normaliseTargetTag(this.placementConstraint.getTarget(), + applicationAttemptId.getApplicationId().toString()); + RMNodeLabel targetLabel = + this.rmContext.getNodeLabelManager().getRMNodeAppTag(target); + // Requesting affiinity with the nodeIds of the above target + return isLabelInScope(targetLabel, node, + this.placementConstraint.getScope()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/AntiAffinityConstraintHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/AntiAffinityConstraintHandler.java new file mode 100644 index 0000000..83f1b86 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/AntiAffinityConstraintHandler.java @@ -0,0 +1,56 @@ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.constraints; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.nodelabels.RMNodeLabel; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; + +/** + * Created by pgaref on 7/14/16. + */ +public class AntiAffinityConstraintHandler extends RMConstaintsHandler { + + private static final Log LOG = + LogFactory.getLog(AntiAffinityConstraintHandler.class.getCanonicalName()); + + public AntiAffinityConstraintHandler(RMContext rmContext, + PlacementConstraint placementConstraint, + ResourceRequest resourceRequest) { + super(rmContext, placementConstraint, resourceRequest); + } + + @Override + public boolean canAssign(ApplicationAttemptId applicationAttemptId, + SchedulerNode node) { + + Set myTags = this.currentResourceRequest.getAllocationTags(); + // Check if my tags is part of this placement constraint SOURCE + List myTagsList = new ArrayList<>(myTags); + int indexOfSourceTags = + myTagsList.indexOf(this.placementConstraint.getSource()); + + if (indexOfSourceTags == -1) { + LOG.debug("Source Tag " + this.placementConstraint.getSource() + + " Not found!!"); + return true; + } + + String target = normaliseTargetTag(this.placementConstraint.getTarget(), + applicationAttemptId.getApplicationId().toString()); + RMNodeLabel targetLabel = + this.rmContext.getNodeLabelManager().getRMNodeAppTag(target); + + // Requesting anti-affiinity with the nodeIds of the above target + return !(isLabelInScope(targetLabel, node, + this.placementConstraint.getScope())); + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/CardinalityConstraintHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/CardinalityConstraintHandler.java new file mode 100644 index 0000000..59c0e5e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/CardinalityConstraintHandler.java @@ -0,0 +1,60 @@ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.constraints; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.nodelabels.RMNodeLabel; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; + +/** + * Created by pgaref on 7/14/16. + */ +public class CardinalityConstraintHandler extends RMConstaintsHandler { + + private static final Log LOG = + LogFactory.getLog(CardinalityConstraintHandler.class.getCanonicalName()); + + public CardinalityConstraintHandler(RMContext rmContext, + PlacementConstraint placementConstraint, + ResourceRequest resourceRequest) { + super(rmContext, placementConstraint, resourceRequest); + } + + @Override + public boolean canAssign(ApplicationAttemptId applicationAttemptId, + SchedulerNode node) { + + Set myTags = this.currentResourceRequest.getAllocationTags(); + + // Check if my tags is part of this placement constraint SOURCE + List myTagsList = new ArrayList<>(myTags); + int indexOfSourceTags = + myTagsList.indexOf(this.placementConstraint.getSource()); + + if (indexOfSourceTags == -1) { + LOG.debug("Source Tag " + this.placementConstraint.getSource() + + " Not found!!"); + return true; + } + + // For Cardinality consider the constraint quatraple: "Regionserver, 2, + // NODE, Cardinality" => So Target is myself + String target = normaliseTargetTag(this.placementConstraint.getSource(), + applicationAttemptId.getApplicationId().toString()); + RMNodeLabel targetLabel = + this.rmContext.getNodeLabelManager().getRMNodeAppTag(target); + + // Requesting Cardinality + return isCardinalityInScope(targetLabel, node, + this.placementConstraint.getScope(), + Integer.parseInt(this.placementConstraint.getTarget())); + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/DummyConstraintHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/DummyConstraintHandler.java new file mode 100644 index 0000000..91cb78c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/DummyConstraintHandler.java @@ -0,0 +1,31 @@ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.constraints; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; + +/** + * Created by pgaref on 7/14/16. + */ +public class DummyConstraintHandler extends RMConstaintsHandler { + + private static final Log LOG = + LogFactory.getLog(DummyConstraintHandler.class.getCanonicalName()); + + public DummyConstraintHandler(RMContext rmContext, + PlacementConstraint placementConstraint, + ResourceRequest resourceRequest) { + super(rmContext, placementConstraint, resourceRequest); + } + + @Override + public boolean canAssign(ApplicationAttemptId applicationAttemptId, + SchedulerNode node) { + return true; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/RMConstaintsHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/RMConstaintsHandler.java new file mode 100644 index 0000000..d393b4a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/RMConstaintsHandler.java @@ -0,0 +1,132 @@ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.constraints; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.PlacementConstraintScope; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.nodelabels.RMNodeLabel; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; + +/** + * Created by pgaref on 7/12/16. + */ +public abstract class RMConstaintsHandler { + + private final Log LOG = + LogFactory.getLog(RMConstaintsHandler.class.getCanonicalName()); + + protected PlacementConstraint placementConstraint = null; + protected RMContext rmContext = null; + protected ResourceRequest currentResourceRequest = null; + + public RMConstaintsHandler(RMContext rmContext, + PlacementConstraint placementConstraint, + ResourceRequest resourceRequest) { + this.rmContext = rmContext; + this.placementConstraint = placementConstraint; + this.currentResourceRequest = resourceRequest; + } + + /** + * Method to be implemented by each individual Constraint Handler + * + * @param applicationAttemptId + * @param schedulerNode + * @return boolean + */ + public abstract boolean canAssign(ApplicationAttemptId applicationAttemptId, + SchedulerNode schedulerNode); + + /** + * + * @param label + * @param node + * @param scope + * @return + */ + protected boolean isLabelInScope(RMNodeLabel label, SchedulerNode node, + PlacementConstraintScope scope) { + + // First Application Allocation + if (label == null || label.getAssociatedNodeIds().isEmpty()) + return true; + + for (NodeId nodeId : label.getAssociatedNodeIds()) { + if (!isNodeInScope(nodeId, node, scope)) + return false; + } + return true; + } + + protected boolean isCardinalityInScope(RMNodeLabel label, SchedulerNode node, + PlacementConstraintScope scope, int cardinalityLimit) { + // Label is empty so far + if (label == null) + return true; + + int currentCount = 0; + for (NodeId nodeId : label.getAssociatedNodeIds()) { + if (isNodeInScope(nodeId, node, scope)) { + // ++ is for the existing nodeId + // Equality is for the wanted allocation! + if (++currentCount >= cardinalityLimit) + return false; + } + } + return true; + + } + + /** + * + * @param nodeId + * @param schedulerNode + * @param placementConstraintScope + * @return + */ + protected boolean isNodeInScope(NodeId nodeId, SchedulerNode schedulerNode, + PlacementConstraintScope placementConstraintScope) { + switch (placementConstraintScope) { + case DEFAULT: + return true; + case NODE: + return nodeId.equals(schedulerNode.getNodeID()); + case RACK: + RMNode rmNode = rmContext.getRMNodes().get(nodeId); + if (rmNode == null) { + if (LOG.isDebugEnabled()) { + LOG.debug(nodeId + " not found in RM context"); + } + } + return (rmNode == null ? true + : (rmNode.getRackName().compareTo(schedulerNode.getRackName()) == 0)); + default: + return false; + } + } + + /** + * Retrieve Placement Constraint TARGET - if in form HbaseRegionServer it + * refers to my appID If in form 2232141.HbaseRegionServer - it refers to + * another appID If in form *.HbaseRegionServer it refers to ALL + * HbaseRegionServers + * + * @param target + * @return + */ + public String normaliseTargetTag(String target, String appId) { + // refers to my appId + if (!target.contains(".")) + return appId + "." + target; + if (!target.contains("*")) + return target; + // TODO: implement the * case - return a list of Strings + return null; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/RMConstraintsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/RMConstraintsManager.java new file mode 100644 index 0000000..29f9e5c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/constraints/RMConstraintsManager.java @@ -0,0 +1,152 @@ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.constraints; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ConstraintDefinition; +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.PlacementConstraintsExpression; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; + +/** + * Created by pgaref on 7/12/16. + */ +public class RMConstraintsManager { + + private static final Log LOG = + LogFactory.getLog(RMConstraintsManager.class.getCanonicalName()); + + protected final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + protected final Lock writeLock = readWriteLock.writeLock(); + + protected Map appConstraintsMap = + null; + protected RMContext rmContext = null; + + public RMConstraintsManager(RMContext context) { + LOG.info("Initializing RMConstraintsManager"); + this.rmContext = context; + appConstraintsMap = + new HashMap(); + } + + public boolean canAssign(ApplicationAttemptId applicationAttemptId, + SchedulerNode node, ResourceRequest resourceRequest) { + ConstraintDefinition attemptConstraints = + appConstraintsMap.get(applicationAttemptId); + if (attemptConstraints == null) + return true; + LOG.info( + "PANOS: Retrieved constraints for Application: " + attemptConstraints); + + if (resourceRequest == null) + return true; + + return ConstraintsDispatcher(applicationAttemptId, attemptConstraints, node, + resourceRequest); + } + + private boolean ConstraintsDispatcher( + ApplicationAttemptId applicationAttemptId, + ConstraintDefinition attemptConstraints, SchedulerNode node, + ResourceRequest resourceRequest) { + // Try to satisfy one of the expressions (DNF form) + for (PlacementConstraintsExpression pExpression : attemptConstraints + .getPlacementConstraintsExpressions()) { + int statisfiedCount = 0; + for (PlacementConstraint placementConstraint : pExpression + .getPlacementConstraints()) { + + RMConstaintsHandler constaintsHandler = getConstrainntsHandler( + rmContext, placementConstraint, resourceRequest); + + LOG.info("PANOS: Node: " + node.getNodeID() + " Constraints Found: " + + attemptConstraints + " RR Tags: " + + resourceRequest.getAllocationTags() + "Success: " + + constaintsHandler.canAssign(applicationAttemptId, node)); + if (constaintsHandler.canAssign(applicationAttemptId, node)) + statisfiedCount++; + + if (statisfiedCount == pExpression.getPlacementConstraints().size()) { + LOG.info("PANOS: Placement Constraints: " + + pExpression.getPlacementConstraints() + + " successfully satisfied: " + statisfiedCount + " => by node " + + node.getNodeID()); + return true; + } + } + } + LOG.debug( + "Placement Constraints NOT satisfied => by node " + node.getNodeID()); + return false; + } + + private RMConstaintsHandler getConstrainntsHandler(RMContext rmContext, + PlacementConstraint placementConstraint, + ResourceRequest resourceRequest) { + switch (placementConstraint.getType()) { + case AFFINITY: + return new AffinityConstraintHandler(rmContext, placementConstraint, + resourceRequest); + case ANTI_AFFINITY: + return new AntiAffinityConstraintHandler(rmContext, placementConstraint, + resourceRequest); + case CARDINALITY: + return new CardinalityConstraintHandler(rmContext, placementConstraint, + resourceRequest); + case NO_TYPE: + return new DummyConstraintHandler(rmContext, placementConstraint, + resourceRequest); + default: + LOG.warn("Unrecognised Placement Constraint Type: " + + placementConstraint.getType()); + return new DummyConstraintHandler(rmContext, placementConstraint, + resourceRequest); + + } + + } + + public void registerApplicationAttempt(ApplicationAttemptId attemptId, + ConstraintDefinition constraintDefinition) { + if (constraintDefinition == null) { + LOG.warn( + "Attempting to register an AppAttempt with NULL Constraint Definition"); + return; + } + try { + this.writeLock.lock(); + LOG.debug("Registering Application Attempt : " + attemptId + + " with Constraint Definition : " + constraintDefinition); + this.appConstraintsMap.put(attemptId, constraintDefinition); + } finally { + this.writeLock.unlock(); + } + } + + public void unregisterApplicationAttempt(ApplicationAttemptId attemptId) { + + if (this.appConstraintsMap.get(attemptId) == null) { + LOG.warn("Attempting to UnRegister NON existing AppAttempt" + attemptId); + return; + } + try { + this.writeLock.lock(); + LOG.debug("UN-Registering Application Attempt : " + attemptId + + " with Constraint Definition : " + + this.appConstraintsMap.get(attemptId)); + this.appConstraintsMap.remove(attemptId); + } finally { + this.writeLock.unlock(); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java index 67d93a4..fff474e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java @@ -18,11 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.io.IOException; +import java.util.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -66,6 +63,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator.AbstractContainerAllocator; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.allocator.ContainerAllocator; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.constraints.RMConstraintsManager; import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; @@ -175,16 +173,40 @@ public synchronized boolean containerCompleted(RMContainer rmContainer, queue.getMetrics().releaseResources(getUser(), 1, containerResource); attemptResourceUsage.decUsed(partition, containerResource); + // Remove Application Tags associated with the completed container + try { + this.rmContext.getNodeLabelManager().removeApplicationTags( + rmContainer.getApplicationAttemptId().getApplicationId().toString(), + rmContainer.getContainer().getNodeId(), + rmContainer.getContainerId().getContainerId(), + rmContainer.getContainer().getResource()); + } catch (IOException ex) { + LOG.error("Failed to Remove Tags for App= " + + rmContainer.getApplicationAttemptId().getApplicationId().toString() + + " containerID= " + rmContainer.getContainerId().getContainerId() + + " NodeID= " + rmContainer.getContainer().getNodeId() + " Resource= " + + rmContainer.getContainer().getResource()); + } // Clear resource utilization metrics cache. lastMemoryAggregateAllocationUpdateTime = -1; return true; } + public synchronized RMContainer allocate(NodeType type, FiCaSchedulerNode node, SchedulerRequestKey schedulerKey, ResourceRequest request, Container container) { + // Check that allocation satisfies Constraints (when applicable) + RMConstraintsManager contraintManager = rmContext.getRMConstraintsManager(); + if (contraintManager != null) { + if (!contraintManager.canAssign(this.getApplicationAttemptId(), node, + request)) { + return null; + } + } + if (isStopped) { return null; } @@ -223,6 +245,24 @@ public synchronized RMContainer allocate(NodeType type, FiCaSchedulerNode node, rmContainer.handle( new RMContainerEvent(containerId, RMContainerEventType.START)); + // Add Application Tags associated with the allocated container + try { + this.rmContext.getNodeLabelManager().addApplicationTags( + rmContainer.getApplicationAttemptId().getApplicationId().toString(), + rmContainer.getContainer().getNodeId(), + rmContainer.getContainerId().getContainerId(), + rmContainer.getContainer().getResource(), + request.getAllocationTags()); + } catch (IOException ex) { + LOG.error( + "Failed to add Tags " + request.getAllocationTags() + " for App= " + + rmContainer.getApplicationAttemptId().getApplicationId() + .toString() + + " containerID= " + rmContainer.getContainer().getNodeId() + + " NodeID= " + rmContainer.getContainer().getNodeId() + + " Resource= " + rmContainer.getContainer().getResource()); + } + if (LOG.isDebugEnabled()) { LOG.debug("allocate: applicationAttemptId=" + containerId.getApplicationAttemptId() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java index ea85d13..1c82294 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java @@ -77,6 +77,19 @@ protected void render(Block html) { } row.td(info.getResource().toString())._(); } + + // Represent Dynamic Tags in a separate Table + tbody._()._().table("#tags").tr().th(".name", "Tag Name") + .th(".numOfActiveNMs", "Num Of Active Containers") + .th(".totalResource", "Total Resource"); + + for (RMNodeLabel tag : nlm.pullRMNodeTagsInfo()) { + TR>> row = tbody.tr().td( + tag.getLabelName().isEmpty() ? "" : tag.getLabelName()); + row = row.td().a(url("app", tag.getLabelName()), + String.valueOf(tag.getNumActiveNMs()))._(); + row.td(tag.getResource().toString())._(); + } tbody._()._(); } } @@ -88,6 +101,8 @@ protected void render(Block html) { set(DATATABLES_ID, "nodelabels"); setTableStyles(html, "nodelabels", ".healthStatus {width:10em}", ".healthReport {width:10em}"); +// setTableStyles(html, "tags", ".healthStatus {width:10em}", +// ".healthReport {width:10em}"); } @Override protected Class content() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index 75bffc7..e8af92c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -1489,7 +1489,8 @@ protected ApplicationSubmissionContext createAppSubmissionContext( newApp.getApplicationType(), newApp.getKeepContainersAcrossApplicationAttempts(), newApp.getAppNodeLabelExpression(), - newApp.getAMContainerNodeLabelExpression()); + newApp.getAMContainerNodeLabelExpression(), + newApp.getConstraintDefinition()); appContext.setApplicationTags(newApp.getApplicationTags()); appContext.setAttemptFailuresValidityInterval( newApp.getAttemptFailuresValidityInterval()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java index 3d95ca1..1984870 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java @@ -27,6 +27,7 @@ import javax.xml.bind.annotation.XmlElementWrapper; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.yarn.api.records.ConstraintDefinition; import org.apache.hadoop.yarn.api.records.Priority; /** @@ -87,6 +88,9 @@ @XmlElement(name = "reservation-id") String reservationId; + @XmlElement(name = "application-constraint-definition") + ConstraintDefinition constraintDefinition; + public ApplicationSubmissionContextInfo() { applicationId = ""; applicationName = ""; @@ -103,6 +107,7 @@ public ApplicationSubmissionContextInfo() { logAggregationContextInfo = null; attemptFailuresValidityInterval = -1; reservationId = ""; + constraintDefinition = null; } public String getApplicationId() { @@ -173,6 +178,10 @@ public String getReservationId() { return reservationId; } + public ConstraintDefinition getConstraintDefinition() { + return this.constraintDefinition; + } + public void setApplicationId(String applicationId) { this.applicationId = applicationId; } @@ -244,4 +253,8 @@ public void setAttemptFailuresValidityInterval( public void setReservationId(String reservationId) { this.reservationId = reservationId; } + + public void setConstraintDefinition(ConstraintDefinition cd) { + this.constraintDefinition = cd; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java index 8f6a6c1..4057328 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java @@ -22,6 +22,7 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; +import java.util.Set; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -39,13 +40,13 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; -import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Logger; +import org.junit.Assert; public class MockAM { @@ -127,8 +128,13 @@ public RegisterApplicationMasterResponse run() throws Exception { } public void addRequests(String[] hosts, int memory, int priority, - int containers) throws Exception { - requests.addAll(createReq(hosts, memory, priority, containers)); + int containers) throws Exception { + requests.addAll(createReq(hosts, memory, priority, containers, null, null)); + } + + public void addRequests(String[] hosts, int memory, int priority, + int containers, Set allocationTags) throws Exception { + requests.addAll(createReq(hosts, memory, priority, containers, null, allocationTags)); } public AllocateResponse schedule() throws Exception { @@ -159,17 +165,17 @@ public AllocateResponse allocate( List releases, String labelExpression) throws Exception { List reqs = createReq(new String[] { host }, memory, priority, numContainers, - labelExpression); + labelExpression, null); return allocate(reqs, releases); } public List createReq(String[] hosts, int memory, int priority, int containers) throws Exception { - return createReq(hosts, memory, priority, containers, null); + return createReq(hosts, memory, priority, containers, null, null); } public List createReq(String[] hosts, int memory, int priority, - int containers, String labelExpression) throws Exception { + int containers, String labelExpression, Set allocationTags) throws Exception { List reqs = new ArrayList(); if (hosts != null) { for (String host : hosts) { @@ -177,36 +183,35 @@ public AllocateResponse allocate( if (!host.equals(ResourceRequest.ANY)) { ResourceRequest hostReq = createResourceReq(host, memory, priority, containers, - labelExpression); + labelExpression, allocationTags); reqs.add(hostReq); ResourceRequest rackReq = createResourceReq("/default-rack", memory, priority, containers, - labelExpression); + labelExpression, allocationTags); reqs.add(rackReq); } } } ResourceRequest offRackReq = createResourceReq(ResourceRequest.ANY, memory, - priority, containers, labelExpression); + priority, containers, labelExpression, allocationTags); reqs.add(offRackReq); return reqs; } - - public ResourceRequest createResourceReq(String resource, int memory, int priority, - int containers) throws Exception { - return createResourceReq(resource, memory, priority, containers, null); - } public ResourceRequest createResourceReq(String resource, int memory, int priority, int containers, String labelExpression) throws Exception { return createResourceReq(resource, memory, priority, containers, - labelExpression, ExecutionTypeRequest.newInstance()); + labelExpression, null); } - public ResourceRequest createResourceReq(String resource, int memory, - int priority, int containers, String labelExpression, - ExecutionTypeRequest executionTypeRequest) throws Exception { + public ResourceRequest createResourceReq(String resource, int memory, int priority, + int containers) throws Exception { + return createResourceReq(resource, memory, priority, containers, null, null); + } + + public ResourceRequest createResourceReq(String resource, int memory, int priority, + int containers, String labelExpression, Set allocationTags) throws Exception { ResourceRequest req = Records.newRecord(ResourceRequest.class); req.setResourceName(resource); req.setNumContainers(containers); @@ -217,11 +222,11 @@ public ResourceRequest createResourceReq(String resource, int memory, capability.setMemorySize(memory); req.setCapability(capability); if (labelExpression != null) { - req.setNodeLabelExpression(labelExpression); + req.setNodeLabelExpression(labelExpression); } - req.setExecutionTypeRequest(executionTypeRequest); + if(allocationTags != null) + req.setAllocationTags(allocationTags); return req; - } public AllocateResponse allocate( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 7d1b3c3..356e109 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ConstraintDefinition; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; @@ -94,8 +95,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; - - import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.YarnVersionInfo; import org.apache.log4j.Level; @@ -409,13 +408,23 @@ public RMApp submitApp(int masterMemory) throws Exception { return submitApp(masterMemory, false); } + public RMApp submitApp(int masterMemory, ConstraintDefinition constraintDefinition) throws Exception { + Resource resource = Resource.newInstance(masterMemory, 0); + Priority priority = Priority.newInstance(0); + return submitApp(resource, "", UserGroupInformation.getCurrentUser() + .getShortUserName(), null, false, null, + super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, + YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true, + false, false, null, 0, null, true, priority, constraintDefinition); + } + public RMApp submitApp(int masterMemory, Priority priority) throws Exception { Resource resource = Resource.newInstance(masterMemory, 0); return submitApp(resource, "", UserGroupInformation.getCurrentUser() .getShortUserName(), null, false, null, super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true, - false, false, null, 0, null, true, priority); + false, false, null, 0, null, true, priority, null); } public RMApp submitApp(int masterMemory, boolean unmanaged) @@ -460,7 +469,7 @@ public RMApp submitApp(int masterMemory, String name, String user, return submitApp(resource, name, user, acls, false, queue, super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true, false, - false, null, 0, null, true, priority, amLabel); + false, null, 0, null, true, priority, amLabel, null); } public RMApp submitApp(Resource resource, String name, String user, @@ -468,7 +477,7 @@ public RMApp submitApp(Resource resource, String name, String user, return submitApp(resource, name, user, acls, false, queue, super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, - true, false, false, null, 0, null, true, null); + true, false, false, null, 0, null, true, null, null); } public RMApp submitApp(int masterMemory, String name, String user, @@ -511,7 +520,7 @@ public RMApp submitApp(int masterMemory, String name, String user, resource.setMemorySize(masterMemory); return submitApp(resource, name, user, acls, unmanaged, queue, maxAppAttempts, ts, appType, waitForAccepted, keepContainers, - false, null, 0, null, true, Priority.newInstance(0)); + false, null, 0, null, true, Priority.newInstance(0), null); } public RMApp submitApp(int masterMemory, long attemptFailuresValidityInterval) @@ -523,7 +532,7 @@ public RMApp submitApp(int masterMemory, long attemptFailuresValidityInterval) .getShortUserName(), null, false, null, super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true, false, - false, null, attemptFailuresValidityInterval, null, true, priority); + false, null, attemptFailuresValidityInterval, null, true, priority, null); } public RMApp submitApp(int masterMemory, String name, String user, @@ -536,7 +545,7 @@ public RMApp submitApp(int masterMemory, String name, String user, Priority priority = Priority.newInstance(0); return submitApp(resource, name, user, acls, unmanaged, queue, maxAppAttempts, ts, appType, waitForAccepted, keepContainers, - isAppIdProvided, applicationId, 0, null, true, priority); + isAppIdProvided, applicationId, 0, null, true, priority, null); } public RMApp submitApp(int masterMemory, @@ -548,7 +557,7 @@ public RMApp submitApp(int masterMemory, .getShortUserName(), null, false, null, super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true, false, - false, null, 0, logAggregationContext, true, priority); + false, null, 0, logAggregationContext, true, priority, null); } public RMApp submitApp(Resource capability, String name, String user, @@ -557,11 +566,12 @@ public RMApp submitApp(Resource capability, String name, String user, boolean waitForAccepted, boolean keepContainers, boolean isAppIdProvided, ApplicationId applicationId, long attemptFailuresValidityInterval, LogAggregationContext logAggregationContext, - boolean cancelTokensWhenComplete, Priority priority) throws Exception { + boolean cancelTokensWhenComplete, Priority priority, + ConstraintDefinition constraintDefinition) throws Exception { return submitApp(capability, name, user, acls, unmanaged, queue, maxAppAttempts, ts, appType, waitForAccepted, keepContainers, isAppIdProvided, applicationId, attemptFailuresValidityInterval, - logAggregationContext, cancelTokensWhenComplete, priority, ""); + logAggregationContext, cancelTokensWhenComplete, priority, "", constraintDefinition); } public RMApp submitApp(Resource capability, String name, String user, @@ -570,7 +580,8 @@ public RMApp submitApp(Resource capability, String name, String user, boolean waitForAccepted, boolean keepContainers, boolean isAppIdProvided, ApplicationId applicationId, long attemptFailuresValidityInterval, LogAggregationContext logAggregationContext, - boolean cancelTokensWhenComplete, Priority priority, String amLabel) + boolean cancelTokensWhenComplete, Priority priority, String amLabel, + ConstraintDefinition constraintDefinition) throws Exception { ApplicationId appId = isAppIdProvided ? applicationId : null; ApplicationClientProtocol client = getClientRMService(); @@ -596,6 +607,10 @@ public RMApp submitApp(Resource capability, String name, String user, if (priority != null) { sub.setPriority(priority); } + + if(constraintDefinition != null) + sub.setConstraintDefinition(constraintDefinition); + sub.setApplicationType(appType); ContainerLaunchContext clc = Records .newRecord(ContainerLaunchContext.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java index a9f1f63..5c9eeb3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java @@ -1349,7 +1349,7 @@ public void testContainerCompleteMsgNotLostAfterAMFailedAndRMRestart() throws Ex RMApp app0 = rm1.submitApp(resource, "", UserGroupInformation .getCurrentUser().getShortUserName(), null, false, null, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS, null, null, true, true, - false, null, 0, null, true, null); + false, null, 0, null, true, null, null); MockAM am0 = MockRM.launchAndRegisterAM(app0, rm1, nm1); am0.allocate("127.0.0.1", 1000, 2, new ArrayList()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/DummyDNSResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/DummyDNSResolver.java new file mode 100644 index 0000000..1fe87d8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/DummyDNSResolver.java @@ -0,0 +1,26 @@ +package org.apache.hadoop.yarn.server.resourcemanager.constraints; + +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.net.DNSToSwitchMapping; + +/** + * Created by pgaref on 7/20/16. + */ +public class DummyDNSResolver implements DNSToSwitchMapping { + static int rackIdentifier = 1; + + @Override + public List resolve(List names) { + return Arrays.asList("/rack" + rackIdentifier); + } + + @Override + public void reloadCachedMappings() { + } + + @Override + public void reloadCachedMappings(List names) { + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/TestAffinityPlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/TestAffinityPlacement.java new file mode 100644 index 0000000..8ed4a5d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/TestAffinityPlacement.java @@ -0,0 +1,232 @@ +package org.apache.hadoop.yarn.server.resourcemanager.constraints; + +import java.util.ArrayList; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.net.DNSToSwitchMapping; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.ConstraintDefinition; +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.PlacementConstraintScope; +import org.apache.hadoop.yarn.api.records.PlacementConstraintType; +import org.apache.hadoop.yarn.api.records.PlacementConstraintsExpression; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockAM; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Created by pgaref on 16/07/16. + */ +public class TestAffinityPlacement { + + private final int GB = 1024; + private YarnConfiguration conf; + + @Before + public void setUp() throws Exception { + conf = new YarnConfiguration(); + // test capacity scheduler first + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + conf.setBoolean(YarnConfiguration.TAG_LABELS_ENABLED, true); + // Hack to apply custom DNS Resolving + conf.setClass( + CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + DummyDNSResolver.class, DNSToSwitchMapping.class); + } + + // @Ignore + @Test + public void testNodeAffinityRequired() throws Exception { + MockRM rm = new MockRM(conf); + rm.start(); + + // Initialize cluster + MockNM nm1 = rm.registerNode("host1:1234", 4 * GB); + MockNM nm2 = rm.registerNode("host2:2345", 4 * GB); + MockNM nm3 = rm.registerNode("host3:3456", 4 * GB); + + // Create affinity rule + PlacementConstraint placementConstraint = + PlacementConstraint.newInstance("RegionServer2", "RegionServer1", + PlacementConstraintType.AFFINITY, PlacementConstraintScope.NODE); + PlacementConstraintsExpression placementConstraintsExpression = + PlacementConstraintsExpression + .newInstance(new ArrayList() { + { + add(placementConstraint); + } + }); + ConstraintDefinition constraintDefinition = + ConstraintDefinition.newInstance(System.currentTimeMillis(), + new ArrayList() { + { + add(placementConstraintsExpression); + } + }); + + // submit an app with node-affinity rule + RMApp app = rm.submitApp(1 * GB, constraintDefinition); + MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1); + + // request for First container + am.addRequests(new String[] { "*" }, 1 * GB, 1, 1, + Stream.of("RegionServer1").collect(Collectors.toSet())); + // request for Second container + am.addRequests(new String[] { "*" }, 1 * GB, 2, 1, + Stream.of("RegionServer2").collect(Collectors.toSet())); + AllocateResponse allocateResponse = am.schedule(); // send the request + + YarnScheduler scheduler = rm.getRMContext().getScheduler(); + SchedulerNode sn1 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm1.getNodeId()); + SchedulerNode sn2 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm2.getNodeId()); + SchedulerNode sn3 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm3.getNodeId()); + + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + nm3.nodeHeartbeat(true); + + // wait 5 seconds until container is allocated + waitUntilContainerAllocated(allocateResponse, 5000); + + // nm1 runs 2 containers, 1 for am and the other for app attempt + // nm2 runs 0 containers + Assert.assertEquals(2, sn1.getNumContainers()); + Assert.assertEquals(0, sn2.getNumContainers()); + Assert.assertEquals(0, sn3.getNumContainers()); + + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + nm3.nodeHeartbeat(true); + am.doHeartbeat(); + + waitUntilContainerAllocated(allocateResponse, 5000); + + Assert.assertEquals(3, sn1.getNumContainers()); + Assert.assertEquals(0, sn2.getNumContainers()); + Assert.assertEquals(0, sn3.getNumContainers()); + // + // // nm1 runs 3 containers, 1 for am and the other 2 for app attempt + // // nm2 runs 0 containers + // Assert.assertEquals(3, sn1.getNumContainers()); + // Assert.assertEquals(0, sn2.getNumContainers()); + + // am.unregisterAppAttempt(); + rm.close(); + } + + /** + * Need to hack DNS for this! + * + * @throws Exception + **/ + @Test + public void testRackAffinityRequired() throws Exception { + MockRM rm = new MockRM(conf); + rm.start(); + + // Initialize cluster + MockNM nm1 = rm.registerNode("host21:1234", 4 * GB, 4); + MockNM nm3 = rm.registerNode("host23:3456", 4 * GB, 4); + DummyDNSResolver.rackIdentifier = 2; + MockNM nm2 = rm.registerNode("host22:2345", 4 * GB, 4); + MockNM nm4 = rm.registerNode("host24:4567", 4 * GB, 4); + + // Create affinity rule + PlacementConstraint placementConstraint = + PlacementConstraint.newInstance("RegionServer2", "RegionServer1", + PlacementConstraintType.AFFINITY, PlacementConstraintScope.RACK); + PlacementConstraintsExpression placementConstraintsExpression = + PlacementConstraintsExpression + .newInstance(new ArrayList() { + { + add(placementConstraint); + } + }); + ConstraintDefinition constraintDefinition = + ConstraintDefinition.newInstance(System.currentTimeMillis(), + new ArrayList() { + { + add(placementConstraintsExpression); + } + }); + + // submit an app with node-affinity rule + RMApp app = rm.submitApp(1 * GB, constraintDefinition); + MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1); + + // request for First container + am.addRequests(new String[] { "*" }, 1 * GB, 1, 1, + Stream.of("RegionServer1").collect(Collectors.toSet())); + // request for Second container + am.addRequests(new String[] { "*" }, 1 * GB, 2, 1, + Stream.of("RegionServer2").collect(Collectors.toSet())); + AllocateResponse allocateResponse = am.schedule(); // send the request + + YarnScheduler scheduler = rm.getRMContext().getScheduler(); + SchedulerNode sn1 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm1.getNodeId()); + SchedulerNode sn2 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm2.getNodeId()); + SchedulerNode sn3 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm3.getNodeId()); + SchedulerNode sn4 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm4.getNodeId()); + + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + nm3.nodeHeartbeat(true); + nm4.nodeHeartbeat(true); + + // wait 5 seconds until container is allocated + waitUntilContainerAllocated(allocateResponse, 5000); + + // nm1 runs 2 containers, 1 for am and the other for app attempt + // nm2 runs 0 containers (Avoid different Rack) + Assert.assertEquals(2, sn1.getNumContainers()); + Assert.assertEquals(0, sn2.getNumContainers()); + Assert.assertEquals(1, sn3.getNumContainers()); + Assert.assertEquals(0, sn4.getNumContainers()); + + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + nm3.nodeHeartbeat(true); + nm4.nodeHeartbeat(true); + am.doHeartbeat(); + + waitUntilContainerAllocated(allocateResponse, 5000); + + // sn1 and sn3 are on the same RACK! + Assert.assertEquals(2, sn1.getNumContainers()); + Assert.assertEquals(0, sn2.getNumContainers()); + Assert.assertEquals(1, sn3.getNumContainers()); + Assert.assertEquals(0, sn4.getNumContainers()); + + // am.unregisterAppAttempt(); + rm.close(); + } + + private void waitUntilContainerAllocated(AllocateResponse response, + int timeoutInMs) throws Exception { + int timeSpent = 0; + while (timeSpent < timeoutInMs + && response.getAllocatedContainers().size() == 0) { + Thread.sleep(500); + timeSpent += 500; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/TestAntiAffinityPlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/TestAntiAffinityPlacement.java new file mode 100644 index 0000000..fed7a35 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/TestAntiAffinityPlacement.java @@ -0,0 +1,198 @@ +package org.apache.hadoop.yarn.server.resourcemanager.constraints; + +import java.util.ArrayList; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.net.DNSToSwitchMapping; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.ConstraintDefinition; +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.PlacementConstraintScope; +import org.apache.hadoop.yarn.api.records.PlacementConstraintType; +import org.apache.hadoop.yarn.api.records.PlacementConstraintsExpression; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockAM; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Created by pgaref on 16/07/16. + */ +public class TestAntiAffinityPlacement { + + private final int GB = 1024; + private YarnConfiguration conf; + + @Before + public void setUp() throws Exception { + conf = new YarnConfiguration(); + // test capacity scheduler first + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + conf.setBoolean(YarnConfiguration.TAG_LABELS_ENABLED, true); + // Hack to apply custom DNS Resolving + conf.setClass( + CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + DummyDNSResolver.class, DNSToSwitchMapping.class); + } + + @Test + public void testNodeAntiAffinityRequired() throws Exception { + MockRM rm = new MockRM(conf); + rm.start(); + + // Initialize cluster + MockNM nm1 = rm.registerNode("host1:1234", 4 * GB); + MockNM nm2 = rm.registerNode("host2:2345", 4 * GB); + + // Create affinity rule + PlacementConstraint placementConstraint = PlacementConstraint.newInstance( + "RegionServer2", "RegionServer1", PlacementConstraintType.ANTI_AFFINITY, + PlacementConstraintScope.NODE); + PlacementConstraintsExpression placementConstraintsExpression = + PlacementConstraintsExpression + .newInstance(new ArrayList() { + { + add(placementConstraint); + } + }); + ConstraintDefinition constraintDefinition = + ConstraintDefinition.newInstance(System.currentTimeMillis(), + new ArrayList() { + { + add(placementConstraintsExpression); + } + }); + + // submit an app with node-affinity rule + RMApp app = rm.submitApp(1 * GB, constraintDefinition); + MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1); + + // request for First container + am.addRequests(new String[] { "*" }, 1 * GB, 1, 1, + Stream.of("RegionServer1").collect(Collectors.toSet())); + // request for Second container + am.addRequests(new String[] { "*" }, 1 * GB, 2, 1, + Stream.of("RegionServer2").collect(Collectors.toSet())); + AllocateResponse allocateResponse = am.schedule(); // send the request + + YarnScheduler scheduler = rm.getRMContext().getScheduler(); + SchedulerNode sn1 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm1.getNodeId()); + SchedulerNode sn2 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm2.getNodeId()); + + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + + // wait 5 seconds until container is allocated + waitUntilContainerAllocated(allocateResponse, 5000); + + // nm1 runs 2 containers, 1 for am and the other for app attempt + // nm2 runs 0 containers + Assert.assertEquals(2, sn1.getNumContainers()); + Assert.assertEquals(1, sn2.getNumContainers()); + + rm.close(); + } + + @Test + public void testRackAntiAffinityRequired() throws Exception { + MockRM rm = new MockRM(conf); + rm.start(); + + // Initialize cluster + MockNM nm1 = rm.registerNode("host21:1234", 4 * GB, 4); + MockNM nm3 = rm.registerNode("host23:3456", 4 * GB, 4); + DummyDNSResolver.rackIdentifier = 2; + MockNM nm2 = rm.registerNode("host22:2345", 4 * GB, 4); + + // Create affinity rule + PlacementConstraint placementConstraint = PlacementConstraint.newInstance( + "RegionServer2", "RegionServer1", PlacementConstraintType.ANTI_AFFINITY, + PlacementConstraintScope.RACK); + PlacementConstraintsExpression placementConstraintsExpression = + PlacementConstraintsExpression + .newInstance(new ArrayList() { + { + add(placementConstraint); + } + }); + ConstraintDefinition constraintDefinition = + ConstraintDefinition.newInstance(System.currentTimeMillis(), + new ArrayList() { + { + add(placementConstraintsExpression); + } + }); + + // submit an app with node-affinity rule + RMApp app = rm.submitApp(1 * GB, constraintDefinition); + MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1); + + // request for First container + am.addRequests(new String[] { "*" }, 1 * GB, 1, 1, + Stream.of("RegionServer1").collect(Collectors.toSet())); + // request for Second container + am.addRequests(new String[] { "*" }, 1 * GB, 2, 1, + Stream.of("RegionServer2").collect(Collectors.toSet())); + AllocateResponse allocateResponse = am.schedule(); // send the request + + YarnScheduler scheduler = rm.getRMContext().getScheduler(); + SchedulerNode sn1 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm1.getNodeId()); + SchedulerNode sn2 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm2.getNodeId()); + SchedulerNode sn3 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm3.getNodeId()); + + // Mix heartbeats! + nm1.nodeHeartbeat(true); + nm3.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + + // wait 5 seconds until container is allocated + waitUntilContainerAllocated(allocateResponse, 5000); + + // nm1 runs 2 containers, 1 for am and the other for app attempt + // nm3 runs 0 containers (Avoid same Rack) + Assert.assertEquals(2, sn1.getNumContainers()); + Assert.assertEquals(1, sn2.getNumContainers()); + Assert.assertEquals(0, sn3.getNumContainers()); + + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + nm3.nodeHeartbeat(true); + am.doHeartbeat(); + + waitUntilContainerAllocated(allocateResponse, 5000); + + // sn1 and sn3 are on the same RACK! + Assert.assertEquals(2, sn1.getNumContainers()); + Assert.assertEquals(1, sn2.getNumContainers()); + Assert.assertEquals(0, sn3.getNumContainers()); + + // am.unregisterAppAttempt(); + rm.close(); + } + + private void waitUntilContainerAllocated(AllocateResponse response, + int timeoutInMs) throws Exception { + int timeSpent = 0; + while (timeSpent < timeoutInMs + && response.getAllocatedContainers().size() == 0) { + Thread.sleep(500); + timeSpent += 500; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/TestCardinalityPlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/TestCardinalityPlacement.java new file mode 100644 index 0000000..cb23c43 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/constraints/TestCardinalityPlacement.java @@ -0,0 +1,239 @@ +package org.apache.hadoop.yarn.server.resourcemanager.constraints; + +import java.util.ArrayList; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.net.DNSToSwitchMapping; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.ConstraintDefinition; +import org.apache.hadoop.yarn.api.records.PlacementConstraint; +import org.apache.hadoop.yarn.api.records.PlacementConstraintScope; +import org.apache.hadoop.yarn.api.records.PlacementConstraintType; +import org.apache.hadoop.yarn.api.records.PlacementConstraintsExpression; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockAM; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Created by pgaref on 17/07/16. + */ +public class TestCardinalityPlacement { + + private final int GB = 1024; + private YarnConfiguration conf; + + @Before + public void setUp() throws Exception { + conf = new YarnConfiguration(); + // test capacity scheduler first + conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, + ResourceScheduler.class); + conf.setBoolean(YarnConfiguration.TAG_LABELS_ENABLED, true); + // Hack to apply custom DNS Resolving + conf.setClass( + CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + DummyDNSResolver.class, DNSToSwitchMapping.class); + } + + @Test + public void testNodeCardinalityRequired() throws Exception { + MockRM rm = new MockRM(conf); + rm.start(); + + // Initialize cluster + MockNM nm1 = rm.registerNode("host1:1234", 4 * GB); + MockNM nm2 = rm.registerNode("host2:2345", 4 * GB); + MockNM nm3 = rm.registerNode("host3:3456", 4 * GB); + + // Create affinity rule + PlacementConstraint placementConstraint = + PlacementConstraint.newInstance("RegionServer", "1", + PlacementConstraintType.CARDINALITY, PlacementConstraintScope.NODE); + PlacementConstraintsExpression placementConstraintsExpression = + PlacementConstraintsExpression + .newInstance(new ArrayList() { + { + add(placementConstraint); + } + }); + ConstraintDefinition constraintDefinition = + ConstraintDefinition.newInstance(System.currentTimeMillis(), + new ArrayList() { + { + add(placementConstraintsExpression); + } + }); + + // submit an app with node-affinity rule + RMApp app = rm.submitApp(1 * GB, constraintDefinition); + MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1); + + // request for First container + am.addRequests(new String[] { "*" }, 1 * GB, 1, 1, + Stream.of("RegionServer").collect(Collectors.toSet())); + // request for Second container + am.addRequests(new String[] { "*" }, 1 * GB, 2, 1, + Stream.of("RegionServer").collect(Collectors.toSet())); + // request for Third container + am.addRequests(new String[] { "*" }, 1 * GB, 3, 1, + Stream.of("RegionServer").collect(Collectors.toSet())); + AllocateResponse allocateResponse = am.schedule(); // send the request + + YarnScheduler scheduler = rm.getRMContext().getScheduler(); + SchedulerNode sn1 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm1.getNodeId()); + SchedulerNode sn2 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm2.getNodeId()); + SchedulerNode sn3 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm3.getNodeId()); + + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + nm3.nodeHeartbeat(true); + + // wait 5 seconds until container is allocated + waitUntilContainerAllocated(allocateResponse, 5000); + + // nm1 runs 2 containers, 1 for am and the other for app attempt + // nm2 runs 0 containers + Assert.assertEquals(2, sn1.getNumContainers()); + Assert.assertEquals(1, sn2.getNumContainers()); + Assert.assertEquals(1, sn3.getNumContainers()); + + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + nm3.nodeHeartbeat(true); + am.doHeartbeat(); + + waitUntilContainerAllocated(allocateResponse, 5000); + + Assert.assertEquals(2, sn1.getNumContainers()); + Assert.assertEquals(1, sn2.getNumContainers()); + Assert.assertEquals(1, sn3.getNumContainers()); + + rm.close(); + } + + @Test + public void testRackCardinalityRequired() throws Exception { + MockRM rm = new MockRM(conf); + rm.start(); + + // Initialize cluster + MockNM nm1 = rm.registerNode("host1:1234", 4 * GB); + MockNM nm2 = rm.registerNode("host2:1234", 4 * GB); + DummyDNSResolver.rackIdentifier = 2; + MockNM nm3 = rm.registerNode("host4:2345", 4 * GB); + MockNM nm4 = rm.registerNode("host5:2345", 4 * GB); + DummyDNSResolver.rackIdentifier = 3; + MockNM nm5 = rm.registerNode("host6:3456", 4 * GB); + MockNM nm6 = rm.registerNode("host7:3456", 4 * GB); + + // Create affinity rule + PlacementConstraint placementConstraint = + PlacementConstraint.newInstance("RegionServer", "1", + PlacementConstraintType.CARDINALITY, PlacementConstraintScope.RACK); + PlacementConstraintsExpression placementConstraintsExpression = + PlacementConstraintsExpression + .newInstance(new ArrayList() { + { + add(placementConstraint); + } + }); + ConstraintDefinition constraintDefinition = + ConstraintDefinition.newInstance(System.currentTimeMillis(), + new ArrayList() { + { + add(placementConstraintsExpression); + } + }); + + // submit an app with node-affinity rule + RMApp app = rm.submitApp(1 * GB, constraintDefinition); + MockAM am = MockRM.launchAndRegisterAM(app, rm, nm1); + + // request for First container + am.addRequests(new String[] { "*" }, 1 * GB, 1, 1, + Stream.of("RegionServer").collect(Collectors.toSet())); + // request for Second container + am.addRequests(new String[] { "*" }, 1 * GB, 2, 1, + Stream.of("RegionServer").collect(Collectors.toSet())); + // request for Third container + am.addRequests(new String[] { "*" }, 1 * GB, 3, 1, + Stream.of("RegionServer").collect(Collectors.toSet())); + AllocateResponse allocateResponse = am.schedule(); // send the request + + YarnScheduler scheduler = rm.getRMContext().getScheduler(); + SchedulerNode sn1 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm1.getNodeId()); + SchedulerNode sn2 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm2.getNodeId()); + SchedulerNode sn3 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm3.getNodeId()); + SchedulerNode sn4 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm4.getNodeId()); + SchedulerNode sn5 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm5.getNodeId()); + SchedulerNode sn6 = + ((CapacityScheduler) scheduler).getSchedulerNode(nm6.getNodeId()); + + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + nm3.nodeHeartbeat(true); + nm4.nodeHeartbeat(true); + nm5.nodeHeartbeat(true); + nm6.nodeHeartbeat(true); + + // wait 5 seconds until container is allocated + waitUntilContainerAllocated(allocateResponse, 5000); + + // nm1 runs 2 containers, 1 for am and the other for app attempt + // nm2 runs 0 containers + Assert.assertEquals(2, sn1.getNumContainers()); + Assert.assertEquals(0, sn2.getNumContainers()); + Assert.assertEquals(1, sn3.getNumContainers()); + Assert.assertEquals(0, sn4.getNumContainers()); + Assert.assertEquals(1, sn5.getNumContainers()); + Assert.assertEquals(0, sn6.getNumContainers()); + + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + nm3.nodeHeartbeat(true); + nm4.nodeHeartbeat(true); + nm5.nodeHeartbeat(true); + nm6.nodeHeartbeat(true); + am.doHeartbeat(); + + waitUntilContainerAllocated(allocateResponse, 5000); + + Assert.assertEquals(2, sn1.getNumContainers()); + Assert.assertEquals(0, sn2.getNumContainers()); + Assert.assertEquals(1, sn3.getNumContainers()); + Assert.assertEquals(0, sn4.getNumContainers()); + Assert.assertEquals(1, sn5.getNumContainers()); + Assert.assertEquals(0, sn6.getNumContainers()); + + rm.close(); + } + + private void waitUntilContainerAllocated(AllocateResponse response, + int timeoutInMs) throws Exception { + int timeSpent = 0; + while (timeSpent < timeoutInMs + && response.getAllocatedContainers().size() == 0) { + Thread.sleep(500); + timeSpent += 500; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 84217c4..c1d118d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -182,7 +182,7 @@ public void testAppAttemptMetrics() throws Exception { FifoScheduler scheduler = new FifoScheduler(); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, - null, null, null, null, null, null, null, scheduler); + null, null, null, null, null, null, null, scheduler, null); ((RMContextImpl) rmContext).setSystemMetricsPublisher( mock(SystemMetricsPublisher.class)); @@ -229,7 +229,7 @@ public void testNodeLocalAssignment() throws Exception { FifoScheduler scheduler = new FifoScheduler(); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, - null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler); + null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler, null); rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class)); rmContext.setRMApplicationHistoryWriter( mock(RMApplicationHistoryWriter.class)); @@ -306,7 +306,7 @@ public void testUpdateResourceOnNode() throws Exception { FifoScheduler scheduler = new FifoScheduler(); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, - null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler); + null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler, null); rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class)); rmContext.setRMApplicationHistoryWriter(mock(RMApplicationHistoryWriter.class)); ((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 5dfee89..2d7a4f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -1145,13 +1145,13 @@ public void testAppSubmissionWithPreviousToken() throws Exception{ Resource resource = Records.newRecord(Resource.class); resource.setMemorySize(200); RMApp app1 = rm.submitApp(resource, "name", "user", null, false, null, 2, - credentials, null, true, false, false, null, 0, null, false, null); + credentials, null, true, false, false, null, 0, null, false, null, null); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); rm.waitForState(app1.getApplicationId(), RMAppState.RUNNING); // submit app2 with the same token, set cancelTokenWhenComplete to true; RMApp app2 = rm.submitApp(resource, "name", "user", null, false, null, 2, - credentials, null, true, false, false, null, 0, null, true, null); + credentials, null, true, false, false, null, 0, null, true, null, null); MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1); rm.waitForState(app2.getApplicationId(), RMAppState.RUNNING); MockRM.finishAMAndVerifyAppState(app2, rm, nm1, am2); @@ -1208,7 +1208,7 @@ public void testCancelWithMultipleAppSubmissions() throws Exception{ resource.setMemorySize(200); RMApp app1 = rm.submitApp(resource, "name", "user", null, false, null, 2, credentials, - null, true, false, false, null, 0, null, true, null); + null, true, false, false, null, 0, null, true, null, null); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); rm.waitForState(app1.getApplicationId(), RMAppState.RUNNING); @@ -1216,7 +1216,7 @@ public void testCancelWithMultipleAppSubmissions() throws Exception{ Assert.assertNotNull(dttr); Assert.assertTrue(dttr.referringAppIds.contains(app1.getApplicationId())); RMApp app2 = rm.submitApp(resource, "name", "user", null, false, null, 2, - credentials, null, true, false, false, null, 0, null, true, null); + credentials, null, true, false, false, null, 0, null, true, null, null); MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1); rm.waitForState(app2.getApplicationId(), RMAppState.RUNNING); Assert.assertTrue(renewer.getAllTokens().containsKey(token1)); @@ -1233,7 +1233,7 @@ public void testCancelWithMultipleAppSubmissions() throws Exception{ Assert.assertFalse(Renewer.cancelled); RMApp app3 = rm.submitApp(resource, "name", "user", null, false, null, 2, - credentials, null, true, false, false, null, 0, null, true, null); + credentials, null, true, false, false, null, 0, null, true, null, null); MockAM am3 = MockRM.launchAndRegisterAM(app3, rm, nm1); rm.waitForState(app3.getApplicationId(), RMAppState.RUNNING); Assert.assertTrue(renewer.getAllTokens().containsKey(token1));