diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java index 553ba70..b6fe7be 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapred.JobPriority; @@ -485,7 +486,8 @@ public static QueueInfo fromYarn(org.apache.hadoop.yarn.api.records.QueueInfo (queueInfo.getMaximumCapacity() < 0 ? "UNDEFINED" : queueInfo.getMaximumCapacity() * 100) + ", CurrentCapacity: " + queueInfo.getCurrentCapacity() * 100, fromYarn(queueInfo.getQueueState()), - TypeConverter.fromYarnApps(queueInfo.getApplications(), conf)); + TypeConverter.fromYarnApps(queueInfo.getApplications(), conf), + queueInfo.getNodeLabels(), queueInfo.getDefaultNodeLabelExpression()); List childQueues = new ArrayList(); for(org.apache.hadoop.yarn.api.records.QueueInfo childQueue : queueInfo.getChildQueues()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java index 097e338..44ab191 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java @@ -135,6 +135,14 @@ void printJobQueueInfo(JobQueueInfo jobQueueInfo, Writer writer, jobQueueInfo.getQueueState())); writer.write(String.format(prefix + "Scheduling Info : %s \n", jobQueueInfo.getSchedulingInfo())); + if (jobQueueInfo.getLabels() != null) { + writer.write(String.format(prefix + "Labels : %s \n", + jobQueueInfo.getLabels())); + } + if (jobQueueInfo.getDefaultLabelExpression() != null) { + writer.write(String.format(prefix + "DefaultLabelExpression : %s \n", + jobQueueInfo.getDefaultLabelExpression())); + } List childQueues = jobQueueInfo.getChildren(); if (childQueues != null && childQueues.size() > 0) { for (int i = 0; i < childQueues.size(); i++) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueInfo.java index 67b73ce..4025967 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueInfo.java @@ -60,6 +60,8 @@ public JobQueueInfo(String queueName, String schedulingInfo) { setQueueChildren(queue.getQueueChildren()); setProperties(queue.getProperties()); setJobStatuses(queue.getJobStatuses()); + setLabels(queue.getLabels()); + setDefaultLabelExpression(queue.getDefaultLabelExpression()); } /** diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueInfo.java index 6e6ce9e..46a9c51 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/QueueInfo.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Set; import java.util.Properties; import org.apache.hadoop.classification.InterfaceAudience; @@ -54,6 +55,10 @@ private List children; private Properties props; + + private Set labels; + + private String defaultLabelExpression; /** * Default constructor for QueueInfo. @@ -79,6 +84,13 @@ public QueueInfo(String queueName, String schedulingInfo) { this.queueName = queueName; this.schedulingInfo = schedulingInfo; } + + public QueueInfo(String queueName, String schedulingInfo, QueueState state, + JobStatus[] stats) { + this(queueName, schedulingInfo); + this.queueState = state; + this.stats = stats; + } /** * @@ -86,12 +98,15 @@ public QueueInfo(String queueName, String schedulingInfo) { * @param schedulingInfo * @param state * @param stats + * @param labels + * @param defaultLabelExpression */ public QueueInfo(String queueName, String schedulingInfo, QueueState state, - JobStatus[] stats) { - this(queueName, schedulingInfo); - this.queueState = state; - this.stats = stats; + JobStatus[] stats, Set labels, + String defaultLabelExpression) { + this(queueName, schedulingInfo, state, stats); + this.labels = labels; + this.defaultLabelExpression = defaultLabelExpression; } /** @@ -189,6 +204,22 @@ protected void setProperties(Properties props) { return stats; } + public Set getLabels() { + return labels; + } + + public String getDefaultLabelExpression() { + return defaultLabelExpression; + } + + public void setLabels(Set labels) { + this.labels = labels; + } + + public void setDefaultLabelExpression(String defaultLabelExpression) { + this.defaultLabelExpression = defaultLabelExpression; + } + @Override public void readFields(DataInput in) throws IOException { queueName = StringInterner.weakIntern(Text.readString(in)); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java index 9419d03..30685f7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java @@ -320,6 +320,7 @@ private LocalResource createApplicationResource(FileContext fs, Path p, LocalRes return rsrc; } + @SuppressWarnings("deprecation") public ApplicationSubmissionContext createApplicationSubmissionContext( Configuration jobConf, String jobSubmitDir, Credentials ts) throws IOException { diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java index 2272e3e..2e0f14d 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java @@ -40,7 +40,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; - import org.apache.hadoop.yarn.api.protocolrecords .RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords @@ -67,7 +66,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Logger; - import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator; import org.apache.hadoop.yarn.sls.scheduler.SchedulerWrapper; import org.apache.hadoop.yarn.sls.SLSRunner; diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java index 029fa87..fdddcf4 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -159,6 +160,10 @@ public String getNodeManagerVersion() { return null; } + @Override + public Set getNodeLabels() { + return null; + } } public static RMNode newNodeInfo(String rackName, String hostName, diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java index 7eca66f..3b185ae 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java @@ -33,6 +33,7 @@ import java.util.Collections; import java.util.List; +import java.util.Set; @Private @Unstable @@ -147,4 +148,8 @@ public String getNodeManagerVersion() { return node.getNodeManagerVersion(); } + @Override + public Set getNodeLabels() { + return null; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 0e6207b..64ae59e 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -188,6 +188,21 @@ + + + + + + + + + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java index 74da4b5..b983f43 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java @@ -18,18 +18,20 @@ package org.apache.hadoop.yarn.api.records; +import java.util.Set; + import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.Records; -import java.util.Set; - /** *

ApplicationSubmissionContext represents all of the * information needed by the ResourceManager to launch @@ -72,7 +74,8 @@ public static ApplicationSubmissionContext newInstance( Priority priority, ContainerLaunchContext amContainer, boolean isUnmanagedAM, boolean cancelTokensWhenComplete, int maxAppAttempts, Resource resource, String applicationType, - boolean keepContainers) { + boolean keepContainers, String appLabelExpression, + String amContainerLabelExpression) { ApplicationSubmissionContext context = Records.newRecord(ApplicationSubmissionContext.class); context.setApplicationId(applicationId); @@ -83,11 +86,30 @@ public static ApplicationSubmissionContext newInstance( context.setUnmanagedAM(isUnmanagedAM); context.setCancelTokensWhenComplete(cancelTokensWhenComplete); context.setMaxAppAttempts(maxAppAttempts); - context.setResource(resource); context.setApplicationType(applicationType); context.setKeepContainersAcrossApplicationAttempts(keepContainers); + context.setNodeLabelExpression(appLabelExpression); + + ResourceRequest amReq = Records.newRecord(ResourceRequest.class); + amReq.setResourceName(ResourceRequest.ANY); + amReq.setCapability(resource); + amReq.setNumContainers(1); + amReq.setRelaxLocality(true); + amReq.setNodeLabelExpression(amContainerLabelExpression); + context.setAMContainerResourceRequest(amReq); return context; } + + public static ApplicationSubmissionContext newInstance( + ApplicationId applicationId, String applicationName, String queue, + Priority priority, ContainerLaunchContext amContainer, + boolean isUnmanagedAM, boolean cancelTokensWhenComplete, + int maxAppAttempts, Resource resource, String applicationType, + boolean keepContainers) { + return newInstance(applicationId, applicationName, queue, priority, + amContainer, isUnmanagedAM, cancelTokensWhenComplete, maxAppAttempts, + resource, applicationType, keepContainers, null, null); + } @Public @Stable @@ -98,7 +120,7 @@ public static ApplicationSubmissionContext newInstance( int maxAppAttempts, Resource resource, String applicationType) { return newInstance(applicationId, applicationName, queue, priority, amContainer, isUnmanagedAM, cancelTokensWhenComplete, maxAppAttempts, - resource, applicationType, false); + resource, applicationType, false, null, null); } @Public @@ -112,6 +134,29 @@ public static ApplicationSubmissionContext newInstance( amContainer, isUnmanagedAM, cancelTokensWhenComplete, maxAppAttempts, resource, null); } + + @Public + @Stable + public static ApplicationSubmissionContext newInstance( + ApplicationId applicationId, String applicationName, String queue, + ContainerLaunchContext amContainer, boolean isUnmanagedAM, + boolean cancelTokensWhenComplete, int maxAppAttempts, + String applicationType, boolean keepContainers, + String appLabelExpression, ResourceRequest resourceRequest) { + ApplicationSubmissionContext context = + Records.newRecord(ApplicationSubmissionContext.class); + context.setApplicationId(applicationId); + context.setApplicationName(applicationName); + context.setQueue(queue); + context.setAMContainerSpec(amContainer); + context.setUnmanagedAM(isUnmanagedAM); + context.setCancelTokensWhenComplete(cancelTokensWhenComplete); + context.setMaxAppAttempts(maxAppAttempts); + context.setApplicationType(applicationType); + context.setKeepContainersAcrossApplicationAttempts(keepContainers); + context.setAMContainerResourceRequest(resourceRequest); + return context; + } @Public @Stable @@ -289,6 +334,9 @@ public static ApplicationSubmissionContext newInstance( public abstract void setMaxAppAttempts(int maxAppAttempts); /** + * Please note this is DEPRECATED, please use getResource in + * getAMContainerResourceRequest instead. + * * Get the resource required by the ApplicationMaster for this * application. * @@ -296,7 +344,7 @@ public static ApplicationSubmissionContext newInstance( * this application. */ @Public - @Stable + @Deprecated public abstract Resource getResource(); /** @@ -307,7 +355,7 @@ public static ApplicationSubmissionContext newInstance( * for this application. */ @Public - @Stable + @Deprecated public abstract void setResource(Resource resource); /** @@ -379,6 +427,54 @@ public abstract void setKeepContainersAcrossApplicationAttempts( @Public @Stable public abstract void setApplicationTags(Set tags); + + /** + * Get node-label-expression for this app. If this is set, all containers of + * this application without setting node-label-expression in ResurceRequest + * will get allocated resources on only those nodes that satisfy this + * node-label-expression. + * + * If different node-label-expression of this app and ResourceRequest are set + * at the same time, the one set in ResourceRequest will be used when + * allocating container + * + * @return node-label-expression for this app + */ + @Public + @Evolving + public abstract String getNodeLabelExpression(); + + /** + * Set node-label-expression for this app + * @param nodeLabelExpression node-label-expression of this app + */ + @Public + @Evolving + public abstract void setNodeLabelExpression(String nodeLabelExpression); + + /** + * Get ResourceRequest of AM container, if this is not null, scheduler will + * use this to acquire resource for AM container. + * + * If this is null, scheduler will assemble a ResourceRequest by using + * getResource and getPriority of + * ApplicationSubmissionContext. + * + * Number of containers and Priority will be ignore. + * + * @return ResourceRequest of AM container + */ + @Public + @Evolving + public abstract ResourceRequest getAMContainerResourceRequest(); + + /** + * Set ResourceRequest of AM container + * @param request of AM container + */ + @Public + @Evolving + public abstract void setAMContainerResourceRequest(ResourceRequest request); /** * Get the attemptFailuresValidityInterval in milliseconds for the application diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java index 7146db2..ff4f4cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.api.records; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; @@ -48,13 +49,24 @@ @Public @Stable public abstract class QueueInfo { - + @Private @Unstable public static QueueInfo newInstance(String queueName, float capacity, float maximumCapacity, float currentCapacity, List childQueues, List applications, QueueState queueState) { + return newInstance(queueName, capacity, maximumCapacity, currentCapacity, + childQueues, applications, queueState, null, null); + } + + @Private + @Unstable + public static QueueInfo newInstance(String queueName, float capacity, + float maximumCapacity, float currentCapacity, + List childQueues, List applications, + QueueState queueState, Set nodeLabels, + String defaultNodeLabelExpression) { QueueInfo queueInfo = Records.newRecord(QueueInfo.class); queueInfo.setQueueName(queueName); queueInfo.setCapacity(capacity); @@ -63,6 +75,8 @@ public static QueueInfo newInstance(String queueName, float capacity, queueInfo.setChildQueues(childQueues); queueInfo.setApplications(applications); queueInfo.setQueueState(queueState); + queueInfo.setNodeLabels(nodeLabels); + queueInfo.setDefaultNodeLabelExpression(defaultNodeLabelExpression); return queueInfo; } @@ -149,4 +163,29 @@ public static QueueInfo newInstance(String queueName, float capacity, @Private @Unstable public abstract void setQueueState(QueueState queueState); + + /** + * Get the node labels of the queue. + * @return node labels of the queue + */ + @Public + @Stable + public abstract Set getNodeLabels(); + + @Private + @Unstable + public abstract void setNodeLabels(Set labels); + + /** + * Get the default node label expression of the queue + * @return default node label expression of the queue + */ + @Public + @Stable + public abstract String getDefaultNodeLabelExpression(); + + @Public + @Stable + public abstract void setDefaultNodeLabelExpression( + String defaultLabelExpression); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java index 86b55d1..7f86cae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java @@ -21,6 +21,7 @@ import java.io.Serializable; import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.util.Records; @@ -70,12 +71,22 @@ public static ResourceRequest newInstance(Priority priority, String hostName, @Stable public static ResourceRequest newInstance(Priority priority, String hostName, Resource capability, int numContainers, boolean relaxLocality) { + return newInstance(priority, hostName, capability, numContainers, + relaxLocality, null); + } + + @Public + @Stable + public static ResourceRequest newInstance(Priority priority, String hostName, + Resource capability, int numContainers, boolean relaxLocality, + String labelExpression) { ResourceRequest request = Records.newRecord(ResourceRequest.class); request.setPriority(priority); request.setResourceName(hostName); request.setCapability(capability); request.setNumContainers(numContainers); request.setRelaxLocality(relaxLocality); + request.setNodeLabelExpression(labelExpression); return request; } @@ -239,6 +250,32 @@ public static boolean isAnyLocation(String hostName) { @Stable public abstract void setRelaxLocality(boolean relaxLocality); + /** + * Get node-label-expression for this Resource Request. If this is set, all + * containers allocated to satisfy this resource-request will be only on those + * nodes that satisfy this node-label-expression + * + * @return node-label-expression + */ + @Public + @Evolving + public abstract String getNodeLabelExpression(); + + /** + * Set node label expression of this resource request. Now only + * support AND(&&), in the future will provide support for OR(||), NOT(!). + * + * Examples: + * - GPU && LARGE_MEM, ask for node has label GPU and LARGE_MEM together + * - "" (empty) means ask for node doesn't have label on it, this is default + * behavior + * + * @param nodelabelExpression node-label-expression of this ResourceRequest + */ + @Public + @Evolving + public abstract void setNodeLabelExpression(String nodelabelExpression); + @Override public int hashCode() { final int prime = 2153; @@ -283,6 +320,20 @@ public boolean equals(Object obj) { return false; } else if (!priority.equals(other.getPriority())) return false; + if (getNodeLabelExpression() == null) { + if (other.getNodeLabelExpression() != null) { + return false; + } + } else { + // do normalize on label expression before compare + String label1 = getNodeLabelExpression().replaceAll("[\\t ]", ""); + String label2 = + other.getNodeLabelExpression() == null ? null : other + .getNodeLabelExpression().replaceAll("[\\t ]", ""); + if (!label1.equals(label2)) { + return false; + } + } return true; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 1a2aa1d..7b464ea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1404,6 +1404,17 @@ public static final String YARN_HTTP_POLICY_KEY = YARN_PREFIX + "http.policy"; public static final String YARN_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY .name(); + + public static final String NODE_LABELS_PREFIX = YARN_PREFIX + "node-labels."; + + /** URI for NodeLabelManager */ + public static final String FS_NODE_LABELS_STORE_URI = NODE_LABELS_PREFIX + + "fs-store.uri"; + public static final String DEFAULT_FS_NODE_LABELS_STORE_URI = "file:///tmp/"; + public static final String FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC = + NODE_LABELS_PREFIX + "fs-store.retry-policy-spec"; + public static final String DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC = + "2000, 500"; public YarnConfiguration() { super(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java index 4b777ea..78aa417 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java @@ -30,6 +30,12 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; @@ -42,6 +48,10 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; @@ -110,4 +120,34 @@ public RefreshServiceAclsResponse refreshServiceAcls( public UpdateNodeResourceResponse updateNodeResource( UpdateNodeResourceRequest request) throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public AddToClusterNodeLabelsResponse addToClusterNodeLabels(AddToClusterNodeLabelsRequest request) + throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels( + RemoveFromClusterNodeLabelsRequest request) throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public ReplaceLabelsOnNodeResponse replaceLabelsOnNode( + ReplaceLabelsOnNodeRequest request) throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public GetNodeToLabelsResponse getNodeToLabels( + GetNodeToLabelsRequest request) throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public GetClusterNodeLabelsResponse getClusterNodeLabels( + GetClusterNodeLabelsRequest request) throws YarnException, IOException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddToClusterNodeLabelsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddToClusterNodeLabelsRequest.java new file mode 100644 index 0000000..e2eaff1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddToClusterNodeLabelsRequest.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class AddToClusterNodeLabelsRequest { + public static AddToClusterNodeLabelsRequest newInstance(Set labels) { + AddToClusterNodeLabelsRequest request = + Records.newRecord(AddToClusterNodeLabelsRequest.class); + request.setLabels(labels); + return request; + } + + @Public + @Evolving + public abstract void setLabels(Set labels); + + @Public + @Evolving + public abstract Set getLabels(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddToClusterNodeLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddToClusterNodeLabelsResponse.java new file mode 100644 index 0000000..7e70ca9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddToClusterNodeLabelsResponse.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class AddToClusterNodeLabelsResponse { + public static AddToClusterNodeLabelsResponse newInstance() { + return Records.newRecord(AddToClusterNodeLabelsResponse.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetClusterNodeLabelsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetClusterNodeLabelsRequest.java new file mode 100644 index 0000000..208fe77 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetClusterNodeLabelsRequest.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class GetClusterNodeLabelsRequest { + public static GetClusterNodeLabelsRequest newInstance() { + return Records.newRecord(GetClusterNodeLabelsRequest.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetClusterNodeLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetClusterNodeLabelsResponse.java new file mode 100644 index 0000000..b0e6470 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetClusterNodeLabelsResponse.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class GetClusterNodeLabelsResponse { + public static GetClusterNodeLabelsResponse newInstance(Set labels) { + GetClusterNodeLabelsResponse request = + Records.newRecord(GetClusterNodeLabelsResponse.class); + request.setLabels(labels); + return request; + } + + @Public + @Evolving + public abstract void setLabels(Set labels); + + @Public + @Evolving + public abstract Set getLabels(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsRequest.java new file mode 100644 index 0000000..56d6587 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsRequest.java @@ -0,0 +1,27 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.yarn.util.Records; + +public abstract class GetNodeToLabelsRequest { + public static GetNodeToLabelsRequest newInstance() { + return Records.newRecord(GetNodeToLabelsRequest.class); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsResponse.java new file mode 100644 index 0000000..c8b40b5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsResponse.java @@ -0,0 +1,45 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.util.Records; + +public abstract class GetNodeToLabelsResponse { + public static GetNodeToLabelsResponse newInstance( + Map> map) { + GetNodeToLabelsResponse response = + Records.newRecord(GetNodeToLabelsResponse.class); + response.setNodeToLabels(map); + return response; + } + + @Public + @Evolving + public abstract void setNodeToLabels(Map> map); + + @Public + @Evolving + public abstract Map> getNodeToLabels(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeIdToLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeIdToLabels.java new file mode 100644 index 0000000..7b901c8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeIdToLabels.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class NodeIdToLabels { + @Public + @Evolving + public static NodeIdToLabels newInstance(NodeId node, List labels) { + NodeIdToLabels record = Records.newRecord(NodeIdToLabels.class); + record.setLabels(labels); + record.setNodeId(node); + return record; + } + + @Public + @Evolving + public abstract void setNodeId(NodeId node); + + @Public + @Evolving + public abstract NodeId getNodeId(); + + @Public + @Evolving + public abstract void setLabels(List labels); + + @Public + @Evolving + public abstract List getLabels(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveFromClusterNodeLabelsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveFromClusterNodeLabelsRequest.java new file mode 100644 index 0000000..96297de --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveFromClusterNodeLabelsRequest.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class RemoveFromClusterNodeLabelsRequest { + public static RemoveFromClusterNodeLabelsRequest newInstance( + Set labels) { + RemoveFromClusterNodeLabelsRequest request = + Records.newRecord(RemoveFromClusterNodeLabelsRequest.class); + request.setLabels(labels); + return request; + } + + @Public + @Evolving + public abstract void setLabels(Set labels); + + @Public + @Evolving + public abstract Set getLabels(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveFromClusterNodeLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveFromClusterNodeLabelsResponse.java new file mode 100644 index 0000000..de8867c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveFromClusterNodeLabelsResponse.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class RemoveFromClusterNodeLabelsResponse { + public static RemoveFromClusterNodeLabelsResponse newInstance() { + return Records.newRecord(RemoveFromClusterNodeLabelsResponse.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java new file mode 100644 index 0000000..28e261a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class ReplaceLabelsOnNodeRequest { + public static ReplaceLabelsOnNodeRequest newInstance( + Map> map) { + ReplaceLabelsOnNodeRequest request = + Records.newRecord(ReplaceLabelsOnNodeRequest.class); + request.setNodeToLabels(map); + return request; + } + + @Public + @Evolving + public abstract void setNodeToLabels(Map> map); + + @Public + @Evolving + public abstract Map> getNodeToLabels(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeResponse.java new file mode 100644 index 0000000..f087cc5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeResponse.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class ReplaceLabelsOnNodeResponse { + public static ReplaceLabelsOnNodeResponse newInstance() { + return Records.newRecord(ReplaceLabelsOnNodeResponse.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto index 47a6cf7..ac7a616 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto @@ -39,4 +39,9 @@ service ResourceManagerAdministrationProtocolService { rpc refreshServiceAcls(RefreshServiceAclsRequestProto) returns (RefreshServiceAclsResponseProto); rpc getGroupsForUser(GetGroupsForUserRequestProto) returns (GetGroupsForUserResponseProto); rpc updateNodeResource (UpdateNodeResourceRequestProto) returns (UpdateNodeResourceResponseProto); + rpc addLabels(AddToClusterNodeLabelsRequestProto) returns (AddToClusterNodeLabelsResponseProto); + rpc removeLabels(RemoveFromClusterNodeLabelsRequestProto) returns (RemoveFromClusterNodeLabelsResponseProto); + rpc setNodeToLabels(ReplaceLabelsOnNodeRequestProto) returns (ReplaceLabelsOnNodeResponseProto); + rpc getNodeToLabels(GetNodeToLabelsRequestProto) returns (GetNodeToLabelsResponseProto); + rpc getLabels(GetClusterNodeLabelsRequestProto) returns (GetClusterNodeLabelsResponseProto); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto index 4637f03..b7649e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto @@ -75,6 +75,46 @@ message UpdateNodeResourceRequestProto { message UpdateNodeResourceResponseProto { } +message AddToClusterNodeLabelsRequestProto { + repeated string labels = 1; +} + +message AddToClusterNodeLabelsResponseProto { +} + +message RemoveFromClusterNodeLabelsRequestProto { + repeated string labels = 1; +} + +message RemoveFromClusterNodeLabelsResponseProto { +} + +message NodeIdToLabelsProto { + optional NodeIdProto nodeId = 1; + repeated string labels = 2; +} + +message ReplaceLabelsOnNodeRequestProto { + repeated NodeIdToLabelsProto nodeToLabels = 1; +} + +message ReplaceLabelsOnNodeResponseProto { + +} + +message GetNodeToLabelsRequestProto { +} + +message GetNodeToLabelsResponseProto { + repeated NodeIdToLabelsProto nodeToLabels = 1; +} + +message GetClusterNodeLabelsRequestProto { +} + +message GetClusterNodeLabelsResponseProto { + repeated string labels = 1; +} ////////////////////////////////////////////////////////////////// ///////////// RM Failover related records //////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index d07ce13..1a35951 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -241,6 +241,7 @@ message ResourceRequestProto { optional ResourceProto capability = 3; optional int32 num_containers = 4; optional bool relax_locality = 5 [default = true]; + optional string node_label_expression = 6; } enum AMCommandProto { @@ -294,6 +295,8 @@ message ApplicationSubmissionContextProto { optional int64 attempt_failures_validity_interval = 13 [default = -1]; optional LogAggregationContextProto log_aggregation_context = 14; optional ReservationIdProto reservation_id = 15; + optional string node_label_expression = 16; + optional ResourceRequestProto am_container_resource_request = 17; } message LogAggregationContextProto { @@ -329,6 +332,8 @@ message QueueInfoProto { optional QueueStateProto state = 5; repeated QueueInfoProto childQueues = 6; repeated ApplicationReportProto applications = 7; + repeated string nodeLabels = 8; + optional string defaultNodeLabelExpression = 9; } enum QueueACLProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index f3ce64c..8990f53 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -113,6 +113,9 @@ private static final Log LOG = LogFactory.getLog(Client.class); + // && is a special character in shell, we need escape it + public final static String AMP = "?amp"; + // Configuration private Configuration conf; private YarnClient yarnClient; @@ -149,6 +152,7 @@ private int containerVirtualCores = 1; // No. of containers in which the shell script needs to be executed private int numContainers = 1; + private String labelExpression = null; // log4j.properties file // if available, add to local resources and set into classpath @@ -257,7 +261,7 @@ public Client(Configuration conf) throws Exception { "the application will be failed."); opts.addOption("debug", false, "Dump out debug information"); opts.addOption("help", false, "Print usage"); - + opts.addOption("node_label_expression", true, "Set node label expression will be used by this application"); } /** @@ -368,6 +372,7 @@ public boolean init(String[] args) throws ParseException { containerMemory = Integer.parseInt(cliParser.getOptionValue("container_memory", "10")); containerVirtualCores = Integer.parseInt(cliParser.getOptionValue("container_vcores", "1")); numContainers = Integer.parseInt(cliParser.getOptionValue("num_containers", "1")); + if (containerMemory < 0 || containerVirtualCores < 0 || numContainers < 1) { throw new IllegalArgumentException("Invalid no. of containers or container memory/vcores specified," @@ -376,6 +381,8 @@ public boolean init(String[] args) throws ParseException { + ", containerVirtualCores=" + containerVirtualCores + ", numContainer=" + numContainers); } + + labelExpression = cliParser.getOptionValue("node_label_expression", null); clientTimeout = Integer.parseInt(cliParser.getOptionValue("timeout", "600000")); @@ -394,6 +401,7 @@ public boolean init(String[] args) throws ParseException { * @throws IOException * @throws YarnException */ + @SuppressWarnings("deprecation") public boolean run() throws IOException, YarnException { LOG.info("Running Client"); @@ -575,6 +583,9 @@ public boolean run() throws IOException, YarnException { vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); vargs.add("--num_containers " + String.valueOf(numContainers)); + if (null != labelExpression) { + appContext.setNodeLabelExpression(labelExpression); + } vargs.add("--priority " + String.valueOf(shellCmdPriority)); for (Map.Entry entry : shellEnv.entrySet()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java index f41c018..db86a3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java @@ -105,6 +105,7 @@ protected AMRMClient(String name) { final List racks; final Priority priority; final boolean relaxLocality; + final String labels; /** * Instantiates a {@link ContainerRequest} with the given constraints and @@ -124,9 +125,9 @@ protected AMRMClient(String name) { */ public ContainerRequest(Resource capability, String[] nodes, String[] racks, Priority priority) { - this(capability, nodes, racks, priority, true); + this(capability, nodes, racks, priority, true, null); } - + /** * Instantiates a {@link ContainerRequest} with the given constraints. * @@ -147,6 +148,32 @@ public ContainerRequest(Resource capability, String[] nodes, */ public ContainerRequest(Resource capability, String[] nodes, String[] racks, Priority priority, boolean relaxLocality) { + this(capability, nodes, racks, priority, relaxLocality, null); + } + + /** + * Instantiates a {@link ContainerRequest} with the given constraints. + * + * @param capability + * The {@link Resource} to be requested for each container. + * @param nodes + * Any hosts to request that the containers are placed on. + * @param racks + * Any racks to request that the containers are placed on. The + * racks corresponding to any hosts requested will be automatically + * added to this list. + * @param priority + * The priority at which to request the containers. Higher + * priorities have lower numerical values. + * @param relaxLocality + * If true, containers for this request may be assigned on hosts + * and racks other than the ones explicitly requested. + * @param labels + * Set node labels to allocate resource + */ + public ContainerRequest(Resource capability, String[] nodes, + String[] racks, Priority priority, boolean relaxLocality, + String labels) { // Validate request Preconditions.checkArgument(capability != null, "The Resource to be requested for each container " + @@ -163,6 +190,7 @@ public ContainerRequest(Resource capability, String[] nodes, this.racks = (racks != null ? ImmutableList.copyOf(racks) : null); this.priority = priority; this.relaxLocality = relaxLocality; + this.labels = labels; } public Resource getCapability() { @@ -185,6 +213,10 @@ public boolean getRelaxLocality() { return relaxLocality; } + public String getLabelExpression() { + return labels; + } + public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Capability[").append(capability).append("]"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index 88b2f45..aba97a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -251,7 +251,7 @@ public AllocateResponse allocate(float progressIndicator) // RPC layer is using it to send info across askList.add(ResourceRequest.newInstance(r.getPriority(), r.getResourceName(), r.getCapability(), r.getNumContainers(), - r.getRelaxLocality())); + r.getRelaxLocality(), r.getNodeLabelExpression())); } releaseList = new ArrayList(release); // optimistically clear this collection assuming no RPC failure @@ -436,25 +436,25 @@ public synchronized void addContainerRequest(T req) { } for (String node : dedupedNodes) { addResourceRequest(req.getPriority(), node, req.getCapability(), req, - true); + true, req.getLabelExpression()); } } for (String rack : dedupedRacks) { addResourceRequest(req.getPriority(), rack, req.getCapability(), req, - true); + true, req.getLabelExpression()); } // Ensure node requests are accompanied by requests for // corresponding rack for (String rack : inferredRacks) { addResourceRequest(req.getPriority(), rack, req.getCapability(), req, - req.getRelaxLocality()); + req.getRelaxLocality(), req.getLabelExpression()); } // Off-switch addResourceRequest(req.getPriority(), ResourceRequest.ANY, - req.getCapability(), req, req.getRelaxLocality()); + req.getCapability(), req, req.getRelaxLocality(), req.getLabelExpression()); } @Override @@ -608,8 +608,10 @@ private void addResourceRequestToAsk(ResourceRequest remoteRequest) { ask.add(remoteRequest); } - private void addResourceRequest(Priority priority, String resourceName, - Resource capability, T req, boolean relaxLocality) { + private void + addResourceRequest(Priority priority, String resourceName, + Resource capability, T req, boolean relaxLocality, + String labelExpression) { Map> remoteRequests = this.remoteRequestsTable.get(priority); if (remoteRequests == null) { @@ -642,6 +644,8 @@ private void addResourceRequest(Priority priority, String resourceName, if (relaxLocality) { resourceRequestInfo.containerRequests.add(req); } + + resourceRequestInfo.remoteRequest.setNodeLabelExpression(labelExpression); // Note this down for next interaction with ResourceManager addResourceRequestToAsk(resourceRequestInfo.remoteRequest); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java index 50e5825..9d9aaaa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java @@ -18,12 +18,24 @@ package org.apache.hadoop.yarn.client.cli; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; import java.io.IOException; +import java.net.ConnectException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; import java.util.Map; +import java.util.Set; -import com.google.common.collect.ImmutableMap; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -33,6 +45,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.client.RMHAServiceTarget; import org.apache.hadoop.yarn.conf.HAUtil; @@ -41,13 +54,21 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.nodelabels.NodeLabelsManager; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; + +import com.google.common.collect.ImmutableMap; @Private @Unstable @@ -55,6 +76,7 @@ private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); + private static final Log LOG = LogFactory.getLog(RMAdminCLI.class); protected final static Map ADMIN_USAGE = ImmutableMap.builder() @@ -78,7 +100,23 @@ .put("-help", new UsageInfo("[cmd]", "Displays help for the given command or all commands if none " + "is specified.")) - .build(); + .put("-addToClusterNodeLabels", + new UsageInfo("[labels splitted by ',']", + "add to cluster node labels ")) + .put("-removeFromClusterNodeLabels", + new UsageInfo("[labels splitted by ',']", + "remove from cluster node labels")) + .put("-replaceLabelsOnNode", + new UsageInfo("[node1:port,label1,label2 node2:port,label1,label2]", + "replace labels on nodes")) + .put("-getNodeToLabels", new UsageInfo("", + "Get node to label mappings")) + .put("-getClusterNodeLabels", + new UsageInfo("", "Get node labels in the cluster")) + .put("-loadNodeLabelsConfigFile", + new UsageInfo("[path/to to node-label.config]", + "Load labels config file")) + .build(); public RMAdminCLI() { super(); @@ -202,10 +240,26 @@ private static void printUsage(String cmd, boolean isHAEnabled) { } - protected ResourceManagerAdministrationProtocol createAdminProtocol() throws IOException { + protected ResourceManagerAdministrationProtocol createAdminProtocol() + throws IOException { + return createAdminProtocol(false); + } + + protected ResourceManagerAdministrationProtocol + createAdminProtocolDoNotRetry() throws IOException { + return createAdminProtocol(true); + } + + protected ResourceManagerAdministrationProtocol createAdminProtocol( + boolean doNotRetry) throws IOException { // Get the current configuration final YarnConfiguration conf = new YarnConfiguration(getConf()); - return ClientRMProxy.createRMProxy(conf, ResourceManagerAdministrationProtocol.class); + if (doNotRetry) { + conf.setInt("ipc.client.connect.max.retries", 0); + conf.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, 0); + } + return ClientRMProxy.createRMProxy(conf, + ResourceManagerAdministrationProtocol.class); } private int refreshQueues() throws IOException, YarnException { @@ -285,6 +339,218 @@ private int getGroups(String[] usernames) throws IOException { return 0; } + private NodeLabelsManager getLocalNodeLabelManagerInstance() + throws IOException { + NodeLabelsManager localMgr = new NodeLabelsManager(); + + localMgr.init(getConf()); + localMgr.start(); + + return localMgr; + } + + private int addLabels(String args) throws IOException, YarnException { + Set labels = new HashSet(); + for (String p : args.split(",")) { + labels.add(p); + } + + return addLabels(labels); + } + + private int addLabels(Set labels) throws IOException, YarnException { + ResourceManagerAdministrationProtocol adminProtocol = + createAdminProtocolDoNotRetry(); + + try { + AddToClusterNodeLabelsRequest request = + AddToClusterNodeLabelsRequest.newInstance(labels); + adminProtocol.addToClusterNodeLabels(request); + } catch (ConnectException e) { + LOG.info("Failed to connect to RM, try to use standalone NodeLabelManager"); + NodeLabelsManager mgr = getLocalNodeLabelManagerInstance(); + mgr.addToCluserNodeLabels(labels); + mgr.stop(); + } + + return 0; + } + + private int removeLabels(String args) throws IOException, YarnException { + ResourceManagerAdministrationProtocol adminProtocol = + createAdminProtocolDoNotRetry(); + Set labels = new HashSet(); + for (String p : args.split(",")) { + labels.add(p); + } + + try { + RemoveFromClusterNodeLabelsRequest request = + RemoveFromClusterNodeLabelsRequest.newInstance(labels); + adminProtocol.removeFromClusterNodeLabels(request); + } catch (ConnectException e) { + LOG.info("Failed to connect to RM, try to use standalone NodeLabelManager"); + NodeLabelsManager mgr = getLocalNodeLabelManagerInstance(); + mgr.removeFromClusterNodeLabels(labels); + mgr.stop(); + } + + return 0; + } + + private int getNodeToLabels() throws IOException, YarnException { + ResourceManagerAdministrationProtocol adminProtocol = + createAdminProtocolDoNotRetry(); + + Map> nodeToLabels = null; + try { + nodeToLabels = + adminProtocol.getNodeToLabels(GetNodeToLabelsRequest.newInstance()) + .getNodeToLabels(); + } catch (ConnectException e) { + LOG.info("Failed to connect to RM, try to use standalone NodeLabelManager"); + NodeLabelsManager mgr = getLocalNodeLabelManagerInstance(); + nodeToLabels = mgr.getNodeLabels(); + mgr.stop(); + } + + for (NodeId host : sortNodeIdSet(nodeToLabels.keySet())) { + System.out.println(String.format("Host=%s, Labels=[%s]", host, + StringUtils.join(sortStrSet(nodeToLabels.get(host)), ","))); + } + return 0; + } + + private int getLabels() throws IOException, YarnException { + ResourceManagerAdministrationProtocol adminProto = + createAdminProtocolDoNotRetry(); + + Set labels = null; + try { + labels = + adminProto.getClusterNodeLabels( + GetClusterNodeLabelsRequest.newInstance()).getLabels(); + } catch (ConnectException e) { + LOG.info("Failed to connect to RM, try to use standalone NodeLabelManager"); + NodeLabelsManager mgr = getLocalNodeLabelManagerInstance(); + labels = mgr.getClusterNodeLabels(); + mgr.stop(); + } + + System.out.println(String.format("Labels=%s", + StringUtils.join(sortStrSet(labels).iterator(), ","))); + return 0; + } + + private int loadLabelsConfigFile(String configFile) throws IOException, + YarnException { + File file = new File(configFile); + if (!file.exists() || file.isDirectory()) { + LOG.error(String.format("ConfigFile=%s, doesn't exist or it's a dir", + configFile)); + return -1; + } + + StringBuilder sb = new StringBuilder(); + String line; + BufferedReader br = new BufferedReader(new FileReader(file)); + while (null != (line = br.readLine())) { + sb.append(line); + sb.append("\n"); + } + br.close(); + + Map> nodeToLabels = buildNodeLabelsFromStr(sb.toString()); + Set nodeLabelsCollection = new HashSet(); + for (NodeId id : nodeToLabels.keySet()) { + nodeLabelsCollection.add(id.getHost()); + } + + int rc; + if (0 != (rc = addLabels(nodeLabelsCollection))) { + return rc; + } + return setNodeToLabels(nodeToLabels); + } + + private List sortNodeIdSet(Set nodes) { + List list = new ArrayList(); + list.addAll(nodes); + Collections.sort(list); + return list; + } + + private List sortStrSet(Set labels) { + List list = new ArrayList(); + list.addAll(labels); + Collections.sort(list); + return list; + } + + private Map> buildNodeLabelsFromStr(String args) + throws IOException { + Map> map = new HashMap>(); + + for (String nodeToLabels : args.split("[ \n]")) { + nodeToLabels = nodeToLabels.trim(); + if (nodeToLabels.isEmpty() || nodeToLabels.startsWith("#")) { + continue; + } + + String[] splits = nodeToLabels.split(","); + String nodeIdStr = splits[0]; + + if (nodeIdStr.trim().isEmpty()) { + throw new IOException("node name cannot be empty"); + } + + String nodeName; + int port; + if (nodeIdStr.contains(":")) { + nodeName = nodeIdStr.substring(0, nodeIdStr.indexOf(":")); + port = Integer.valueOf(nodeIdStr.substring(nodeIdStr.indexOf(":"))); + } else { + nodeName = nodeIdStr; + port = 0; + } + + NodeId nodeId = NodeId.newInstance(nodeName, port); + + map.put(nodeId, new HashSet()); + + for (int i = 1; i < splits.length; i++) { + if (!splits[i].trim().isEmpty()) { + map.get(nodeId).add(splits[i].trim().toLowerCase()); + } + } + } + + return map; + } + + private int setNodeToLabels(String args) throws IOException, YarnException { + Map> map = buildNodeLabelsFromStr(args); + return setNodeToLabels(map); + } + + private int setNodeToLabels(Map> map) throws IOException, + YarnException { + ResourceManagerAdministrationProtocol adminProtocol = + createAdminProtocolDoNotRetry(); + try { + ReplaceLabelsOnNodeRequest request = + ReplaceLabelsOnNodeRequest.newInstance(map); + adminProtocol.replaceLabelsOnNode(request); + } catch (ConnectException e) { + LOG.info("Failed to connect to RM, try to use standalone NodeLabelManager"); + NodeLabelsManager mgr = getLocalNodeLabelManagerInstance(); + mgr.replaceLabelsOnNode(map); + mgr.stop(); + } + + return 0; + } + @Override public int run(String[] args) throws Exception { YarnConfiguration yarnConf = @@ -351,6 +617,38 @@ public int run(String[] args) throws Exception { } else if ("-getGroups".equals(cmd)) { String[] usernames = Arrays.copyOfRange(args, i, args.length); exitCode = getGroups(usernames); + } else if ("-addToClusterNodeLabels".equals(cmd)) { + if (i >= args.length) { + System.err.println("Labels is not specified"); + exitCode = -1; + } else { + exitCode = addLabels(args[i]); + } + } else if ("-removeFromClusterNodeLabels".equals(cmd)) { + if (i >= args.length) { + System.err.println("arg is not specified"); + exitCode = -1; + } else { + exitCode = removeLabels(args[i]); + } + } else if ("-replaceLabelsOnNode".equals(cmd)) { + if (i >= args.length) { + System.err.println("arg is not specified"); + exitCode = -1; + } else { + exitCode = setNodeToLabels(args[i]); + } + } else if ("-getNodeToLabels".equals(cmd)) { + exitCode = getNodeToLabels(); + } else if ("-getClusterNodeLabels".equals(cmd)) { + exitCode = getLabels(); + } else if ("-loadNodeLabelsConfigFile".equals(cmd)) { + if (i >= args.length) { + System.err.println("Node label config file is not specified"); + exitCode = -1; + } else { + exitCode = loadLabelsConfigFile(args[i]); + } } else { exitCode = -1; System.err.println(cmd.substring(1) + ": Unknown command"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java index bfc6656..5a54ff0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java @@ -158,6 +158,7 @@ public void testGetContainersOnHA() throws Exception { reports); } + @SuppressWarnings("deprecation") @Test(timeout = 15000) public void testSubmitApplicationOnHA() throws Exception { ApplicationSubmissionContext appContext = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java index 4921452..036b8fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.client.api.impl; import com.google.common.base.Supplier; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -147,6 +148,7 @@ public static void setup() throws Exception { racks = new String[]{ rack }; } + @SuppressWarnings("deprecation") @Before public void startApp() throws Exception { // submit new app @@ -667,6 +669,28 @@ public void testAMRMClient() throws YarnException, IOException { } } } + + @Test (timeout=30000) + public void testAskWithLabels() { + AMRMClientImpl client = + new AMRMClientImpl(); + + // add x, y to ANY + client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, + 1), null, null, Priority.UNDEFINED, true, "x && y")); + Assert.assertEquals(1, client.ask.size()); + Assert.assertEquals("x && y", client.ask.iterator().next() + .getNodeLabelExpression()); + + // add x, y and a, b to ANY, only a, b should be kept + client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, + 1), null, null, Priority.UNDEFINED, true, "x && y")); + client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, + 1), null, null, Priority.UNDEFINED, true, "a && b")); + Assert.assertEquals(1, client.ask.size()); + Assert.assertEquals("a && b", client.ask.iterator().next() + .getNodeLabelExpression()); + } private void testAllocation(final AMRMClientImpl amClient) throws YarnException, IOException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index d7bea7a..e40d9dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -678,6 +678,7 @@ public ApplicationId run() throws Exception { } } + @SuppressWarnings("deprecation") private ApplicationId createApp(YarnClient rmClient, boolean unmanaged) throws Exception { YarnClientApplication newApp = rmClient.createApplication(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java index 9462a4e..e1640c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java @@ -18,7 +18,8 @@ package org.apache.hadoop.yarn.api.records.impl.pb; -import com.google.common.base.CharMatcher; +import java.util.HashSet; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -29,6 +30,7 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto; @@ -38,12 +40,11 @@ import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto; import org.apache.hadoop.yarn.proto.YarnProtos.ReservationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto; +import com.google.common.base.CharMatcher; import com.google.protobuf.TextFormat; -import java.util.HashSet; -import java.util.Set; - @Private @Unstable public class ApplicationSubmissionContextPBImpl @@ -58,6 +59,7 @@ private ContainerLaunchContext amContainer = null; private Resource resource = null; private Set applicationTags = null; + private ResourceRequest amResourceRequest = null; private LogAggregationContext logAggregationContext = null; private ReservationId reservationId = null; @@ -117,6 +119,10 @@ private void mergeLocalToBuilder() { builder.clearApplicationTags(); builder.addAllApplicationTags(this.applicationTags); } + if (this.amResourceRequest != null) { + builder.setAmContainerResourceRequest( + convertToProtoFormat(this.amResourceRequest)); + } if (this.logAggregationContext != null) { builder.setLogAggregationContext( convertToProtoFormat(this.logAggregationContext)); @@ -140,8 +146,7 @@ private void maybeInitBuilder() { } viaProto = false; } - - + @Override public Priority getPriority() { ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; @@ -154,7 +159,7 @@ public Priority getPriority() { this.priority = convertFromProtoFormat(p.getPriority()); return this.priority; } - + @Override public void setPriority(Priority priority) { maybeInitBuilder(); @@ -349,6 +354,7 @@ public void setMaxAppAttempts(int maxAppAttempts) { builder.setMaxAppAttempts(maxAppAttempts); } + @Deprecated @Override public Resource getResource() { ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; @@ -362,6 +368,7 @@ public Resource getResource() { return this.resource; } + @Deprecated @Override public void setResource(Resource resource) { maybeInitBuilder(); @@ -414,6 +421,14 @@ private PriorityPBImpl convertFromProtoFormat(PriorityProto p) { private PriorityProto convertToProtoFormat(Priority t) { return ((PriorityPBImpl)t).getProto(); } + + private ResourceRequestPBImpl convertFromProtoFormat(ResourceRequestProto p) { + return new ResourceRequestPBImpl(p); + } + + private ResourceRequestProto convertToProtoFormat(ResourceRequest t) { + return ((ResourceRequestPBImpl)t).getProto(); + } private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) { return new ApplicationIdPBImpl(p); @@ -428,7 +443,8 @@ private ContainerLaunchContextPBImpl convertFromProtoFormat( return new ContainerLaunchContextPBImpl(p); } - private ContainerLaunchContextProto convertToProtoFormat(ContainerLaunchContext t) { + private ContainerLaunchContextProto convertToProtoFormat( + ContainerLaunchContext t) { return ((ContainerLaunchContextPBImpl)t).getProto(); } @@ -441,6 +457,47 @@ private ResourceProto convertToProtoFormat(Resource t) { } @Override + public String getNodeLabelExpression() { + ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasNodeLabelExpression()) { + return null; + } + return p.getNodeLabelExpression(); + } + + @Override + public void setNodeLabelExpression(String labelExpression) { + maybeInitBuilder(); + if (labelExpression == null) { + builder.clearNodeLabelExpression(); + return; + } + builder.setNodeLabelExpression(labelExpression); + } + + @Override + public ResourceRequest getAMContainerResourceRequest() { + ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; + if (this.amResourceRequest != null) { + return amResourceRequest; + } // Else via proto + if (!p.hasAmContainerResourceRequest()) { + return null; + } + amResourceRequest = convertFromProtoFormat(p.getAmContainerResourceRequest()); + return amResourceRequest; + } + + @Override + public void setAMContainerResourceRequest(ResourceRequest request) { + maybeInitBuilder(); + if (request == null) { + builder.clearAmContainerResourceRequest(); + } + this.amResourceRequest = request; + } + + @Override public long getAttemptFailuresValidityInterval() { ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; return p.getAttemptFailuresValidityInterval(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java index 56a5b58..a552e4d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java @@ -19,8 +19,10 @@ package org.apache.hadoop.yarn.api.records.impl.pb; import java.util.ArrayList; +import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -44,6 +46,7 @@ List applicationsList; List childQueuesList; + Set nodeLabels; public QueueInfoPBImpl() { builder = QueueInfoProto.newBuilder(); @@ -281,6 +284,10 @@ private void mergeLocalToBuilder() { if (this.applicationsList != null) { addApplicationsToProto(); } + if (this.nodeLabels != null) { + builder.clearNodeLabels(); + builder.addAllNodeLabels(this.nodeLabels); + } } private void mergeLocalToProto() { @@ -322,5 +329,43 @@ private QueueState convertFromProtoFormat(QueueStateProto q) { private QueueStateProto convertToProtoFormat(QueueState queueState) { return ProtoUtils.convertToProtoFormat(queueState); } + + @Override + public void setNodeLabels(Set nodeLabels) { + maybeInitBuilder(); + builder.clearNodeLabels(); + this.nodeLabels = nodeLabels; + } + + private void initNodeLabels() { + if (this.nodeLabels != null) { + return; + } + QueueInfoProtoOrBuilder p = viaProto ? proto : builder; + this.nodeLabels = new HashSet(); + this.nodeLabels.addAll(p.getNodeLabelsList()); + } + + @Override + public Set getNodeLabels() { + initNodeLabels(); + return this.nodeLabels; + } + + @Override + public String getDefaultNodeLabelExpression() { + QueueInfoProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasDefaultNodeLabelExpression()) ? p + .getDefaultNodeLabelExpression() : null; + } + @Override + public void setDefaultNodeLabelExpression(String defaultNodeLabelExpression) { + maybeInitBuilder(); + if (defaultNodeLabelExpression == null) { + builder.clearDefaultNodeLabelExpression(); + return; + } + builder.setDefaultNodeLabelExpression(defaultNodeLabelExpression); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java index 22863ac..0c8491f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java @@ -186,4 +186,23 @@ public String toString() { + ", Location: " + getResourceName() + ", Relax Locality: " + getRelaxLocality() + "}"; } + + @Override + public String getNodeLabelExpression() { + ResourceRequestProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasNodeLabelExpression()) { + return null; + } + return (p.getNodeLabelExpression()); + } + + @Override + public void setNodeLabelExpression(String nodeLabelExpression) { + maybeInitBuilder(); + if (nodeLabelExpression == null) { + builder.clearNodeLabelExpression(); + return; + } + builder.setNodeLabelExpression(nodeLabelExpression); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java new file mode 100644 index 0000000..4bb78e7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java @@ -0,0 +1,255 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.EOFException; +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl; + +import com.google.common.collect.Sets; + +public class FileSystemNodeLabelsStore extends NodeLabelsStore { + + public FileSystemNodeLabelsStore(NodeLabelsManager mgr) { + super(mgr); + } + + protected static final Log LOG = LogFactory.getLog(FileSystemNodeLabelsStore.class); + + protected static final String ROOT_DIR_NAME = "FSNodeLabelManagerRoot"; + protected static final String MIRROR_FILENAME = "nodelabel.mirror"; + protected static final String EDITLOG_FILENAME = "nodelabel.editlog"; + + protected enum SerializedLogType { + ADD_LABELS, NODE_TO_LABELS, REMOVE_LABELS + } + + Path fsWorkingPath; + Path rootDirPath; + FileSystem fs; + FSDataOutputStream editlogOs; + Path editLogPath; + + @Override + public void init(Configuration conf) throws Exception { + fsWorkingPath = + new Path(conf.get(YarnConfiguration.FS_NODE_LABELS_STORE_URI, + YarnConfiguration.DEFAULT_FS_NODE_LABELS_STORE_URI)); + rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); + + setFileSystem(conf); + + // mkdir of root dir path + fs.mkdirs(rootDirPath); + } + + @Override + public void finalize() throws Exception { + try { + fs.close(); + editlogOs.close(); + } catch (Exception e) { + LOG.warn("Exception happened whiling shutting down,", e); + } + } + + private void setFileSystem(Configuration conf) throws IOException { + Configuration confCopy = new Configuration(conf); + confCopy.setBoolean("dfs.client.retry.policy.enabled", true); + String retryPolicy = + confCopy.get(YarnConfiguration.FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC, + YarnConfiguration.DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC); + confCopy.set("dfs.client.retry.policy.spec", retryPolicy); + fs = fsWorkingPath.getFileSystem(confCopy); + + // if it's local file system, use RawLocalFileSystem instead of + // LocalFileSystem, the latter one doesn't support append. + if (fs.getScheme().equals("file")) { + fs = ((LocalFileSystem)fs).getRaw(); + } + } + + private void ensureAppendEditlogFile() throws IOException { + editlogOs = fs.append(editLogPath); + } + + private void ensureCloseEditlogFile() throws IOException { + editlogOs.close(); + } + + @Override + public void persistNodeToLabelsChanges( + Map> nodeToLabels) throws IOException { + ensureAppendEditlogFile(); + editlogOs.writeInt(SerializedLogType.NODE_TO_LABELS.ordinal()); + ((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest + .newInstance(nodeToLabels)).getProto().writeDelimitedTo(editlogOs); + ensureCloseEditlogFile(); + } + + @Override + public void persistAddingLabels(Set labels) + throws IOException { + ensureAppendEditlogFile(); + editlogOs.writeInt(SerializedLogType.ADD_LABELS.ordinal()); + ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest.newInstance(labels)).getProto() + .writeDelimitedTo(editlogOs); + ensureCloseEditlogFile(); + } + + @Override + public void persistRemovingLabels(Collection labels) + throws IOException { + ensureAppendEditlogFile(); + editlogOs.writeInt(SerializedLogType.REMOVE_LABELS.ordinal()); + ((RemoveFromClusterNodeLabelsRequestPBImpl) RemoveFromClusterNodeLabelsRequest.newInstance(Sets + .newHashSet(labels.iterator()))).getProto().writeDelimitedTo(editlogOs); + ensureCloseEditlogFile(); + } + + @Override + public void recover() throws IOException { + /* + * Steps of recover + * 1) Read from last mirror (from mirror or mirror.old) + * 2) Read from last edit log, and apply such edit log + * 3) Write new mirror to mirror.writing + * 4) Rename mirror to mirror.old + * 5) Move mirror.writing to mirror + * 6) Remove mirror.old + * 7) Remove edit log and create a new empty edit log + */ + + // Open mirror from serialized file + Path mirrorPath = new Path(rootDirPath, MIRROR_FILENAME); + Path oldMirrorPath = new Path(rootDirPath, MIRROR_FILENAME + ".old"); + + FSDataInputStream is = null; + if (fs.exists(mirrorPath)) { + is = fs.open(mirrorPath); + } else if (fs.exists(oldMirrorPath)) { + is = fs.open(oldMirrorPath); + } + + if (null != is) { + Set labels = + new AddToClusterNodeLabelsRequestPBImpl( + AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is)).getLabels(); + Map> nodeToLabels = + new ReplaceLabelsOnNodeRequestPBImpl( + ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is)) + .getNodeToLabels(); + mgr.addToCluserNodeLabels(labels); + mgr.replaceLabelsOnNode(nodeToLabels); + is.close(); + } + + // Open and process editlog + editLogPath = new Path(rootDirPath, EDITLOG_FILENAME); + if (fs.exists(editLogPath)) { + is = fs.open(editLogPath); + + while (true) { + try { + // read edit log one by one + SerializedLogType type = SerializedLogType.values()[is.readInt()]; + + switch (type) { + case ADD_LABELS: { + Collection labels = + AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is) + .getLabelsList(); + mgr.addToCluserNodeLabels(Sets.newHashSet(labels.iterator())); + break; + } + case REMOVE_LABELS: { + Collection labels = + RemoveFromClusterNodeLabelsRequestProto.parseDelimitedFrom(is) + .getLabelsList(); + mgr.removeFromClusterNodeLabels(labels); + break; + } + case NODE_TO_LABELS: { + Map> map = + new ReplaceLabelsOnNodeRequestPBImpl( + ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is)) + .getNodeToLabels(); + mgr.replaceLabelsOnNode(map); + break; + } + } + } catch (EOFException e) { + // EOF hit, break + break; + } + } + } + + // Serialize current mirror to mirror.writing + Path writingMirrorPath = new Path(rootDirPath, MIRROR_FILENAME + ".writing"); + FSDataOutputStream os = fs.create(writingMirrorPath, true); + ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequestPBImpl + .newInstance(mgr.getClusterNodeLabels())).getProto().writeDelimitedTo(os); + ((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest + .newInstance(mgr.getNodeLabels())).getProto().writeDelimitedTo(os); + os.close(); + + // Move mirror to mirror.old + if (fs.exists(mirrorPath)) { + fs.delete(oldMirrorPath, false); + fs.rename(mirrorPath, oldMirrorPath); + } + + // move mirror.writing to mirror + fs.rename(writingMirrorPath, mirrorPath); + fs.delete(writingMirrorPath, false); + + // remove mirror.old + fs.delete(oldMirrorPath, false); + + // create a new editlog file + editlogOs = fs.create(editLogPath, true); + editlogOs.close(); + + LOG.info("Finished write mirror at:" + mirrorPath.toString()); + LOG.info("Finished create editlog file at:" + editLogPath.toString()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsManager.java new file mode 100644 index 0000000..d740785 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsManager.java @@ -0,0 +1,785 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; +import java.util.regex.Pattern; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.nodelabels.event.AddToClusterNodeLabelsEvent; +import org.apache.hadoop.yarn.nodelabels.event.NodeLabelsManagerEvent; +import org.apache.hadoop.yarn.nodelabels.event.NodeLabelsManagerEventType; +import org.apache.hadoop.yarn.nodelabels.event.RemoveFromCluserNodeLabelsEvent; +import org.apache.hadoop.yarn.nodelabels.event.ReplaceLabelsOnNodeEvent; +import org.apache.hadoop.yarn.util.resource.Resources; + +import com.google.common.collect.ImmutableSet; + +public class NodeLabelsManager extends AbstractService { + protected static final Log LOG = LogFactory.getLog(NodeLabelsManager.class); + private static final int MAX_LABEL_LENGTH = 255; + public static final Set EMPTY_STRING_SET = Collections + .unmodifiableSet(new HashSet(0)); + public static final String ANY = "*"; + public static final Set ACCESS_ANY_LABEL_SET = ImmutableSet.of(ANY); + private static final Pattern LABEL_PATTERN = Pattern + .compile("^[0-9a-zA-Z][0-9a-zA-z-_]*"); + public static final int WILDCARD_PORT = 0; + + /** + * If a user doesn't specify label of a queue or node, it belongs + * DEFAULT_LABEL + */ + public static final String NO_LABEL = ""; + + protected Dispatcher dispatcher; + + protected ConcurrentMap labelCollections = + new ConcurrentHashMap(); + protected ConcurrentMap nodeCollections = + new ConcurrentHashMap(); + + protected final ReadLock readLock; + protected final WriteLock writeLock; + protected AccessControlList adminAcl; + + protected NodeLabelsStore store; + + public static class LabelType { + public Resource resource; + + public LabelType() { + this.resource = Resource.newInstance(0, 0); + } + } + + public static class NodeType { + public Set labels; + public Map nms; + + public NodeType() { + labels = + Collections.newSetFromMap(new ConcurrentHashMap()); + nms = new ConcurrentHashMap(); + } + + @Override + public NodeType clone() { + NodeType c = new NodeType(); + c.labels = new HashSet(labels); + for (Entry entry : nms.entrySet()) { + c.nms.put(entry.getKey(), entry.getValue().clone()); + } + return c; + } + } + + public static class NMType { + public Set labels; + public Resource resource; + public boolean running; + + public NMType() { + labels = null; + resource = Resource.newInstance(0, 0); + running = false; + } + + public NMType clone() { + NMType c = new NMType(); + if (labels != null) { + c.labels = + Collections.newSetFromMap(new ConcurrentHashMap()); + } else { + c.labels = null; + } + c.resource = Resources.clone(resource); + c.running = running; + return c; + } + } + + public static class QueueType { + public Set labels; + public Resource resource; + + public QueueType() { + labels = + Collections.newSetFromMap(new ConcurrentHashMap()); + resource = Resource.newInstance(0, 0); + } + } + + private final class ForwardingEventHandler implements + EventHandler { + + @Override + public void handle(NodeLabelsManagerEvent event) { + if (isInState(STATE.STARTED)) { + handleStoreEvent(event); + } + } + } + + // Dispatcher related code + protected void handleStoreEvent(NodeLabelsManagerEvent event) { + try { + switch (event.getType()) { + case ADD_LABELS: + AddToClusterNodeLabelsEvent addLabelsEvent = + (AddToClusterNodeLabelsEvent) event; + store.persistAddingLabels(addLabelsEvent.getLabels()); + break; + case REMOVE_LABELS: + RemoveFromCluserNodeLabelsEvent removeLabelsEvent = + (RemoveFromCluserNodeLabelsEvent) event; + store.persistRemovingLabels(removeLabelsEvent.getLabels()); + break; + case STORE_NODE_TO_LABELS: + ReplaceLabelsOnNodeEvent storeNodeToLabelsEvent = + (ReplaceLabelsOnNodeEvent) event; + store.persistNodeToLabelsChanges(storeNodeToLabelsEvent + .getNodeToLabels()); + break; + } + } catch (IOException e) { + LOG.error("Failed to store label modification to storage"); + throw new YarnRuntimeException(e); + } + } + + public NodeLabelsManager() { + super(NodeLabelsManager.class.getName()); + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + readLock = lock.readLock(); + writeLock = lock.writeLock(); + } + + // for UT purpose + protected void initDispatcher(Configuration conf) { + // create async handler + dispatcher = new AsyncDispatcher(); + AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher; + asyncDispatcher.init(conf); + asyncDispatcher.setDrainEventsOnStop(); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + adminAcl = + new AccessControlList(conf.get(YarnConfiguration.YARN_ADMIN_ACL, + YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)); + + initNodeLabelStore(conf); + + labelCollections.put(NO_LABEL, new LabelType()); + } + + protected void initNodeLabelStore(Configuration conf) throws Exception { + this.store = new FileSystemNodeLabelsStore(this); + this.store.init(conf); + this.store.recover(); + } + + public boolean checkAccess(UserGroupInformation user) { + // make sure only admin can invoke + // this method + if (adminAcl.isUserAllowed(user)) { + return true; + } + return false; + } + + // for UT purpose + protected void startDispatcher() { + // start dispatcher + AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher; + asyncDispatcher.start(); + } + + @Override + protected void serviceStart() throws Exception { + // init dispatcher only when service start, because recover will happen in + // service init, we don't want to trigger any event handling at that time. + initDispatcher(getConfig()); + + dispatcher.register(NodeLabelsManagerEventType.class, + new ForwardingEventHandler()); + + startDispatcher(); + } + + /** + * Add multiple node labels to repository + * + * @param labels + * new node labels added + */ + @SuppressWarnings("unchecked") + public void addToCluserNodeLabels(Set labels) throws IOException { + if (null == labels || labels.isEmpty()) { + return; + } + + labels = normalizeLabels(labels); + + // do a check before actual adding them, will throw exception if any of them + // doesn't meet label name requirement + for (String label : labels) { + checkLabelName(label); + if (labelCollections.containsKey(label)) { + throw new IOException("label being add is already existed, label=" + + label); + } + } + + try { + writeLock.lock(); + for (String label : labels) { + this.labelCollections.put(label, new LabelType()); + } + if (null != dispatcher) { + dispatcher.getEventHandler().handle(new AddToClusterNodeLabelsEvent(labels)); + } + + LOG.info("Add labels: [" + StringUtils.join(labels.iterator(), ",") + "]"); + } finally { + writeLock.unlock(); + } + } + + protected void checkAddLabelsToNode( + Map> addedLabelsToNode) throws IOException { + if (null == addedLabelsToNode || addedLabelsToNode.isEmpty()) { + return; + } + + // check all labels being added existed + Set knownLabels = labelCollections.keySet(); + for (Entry> entry : addedLabelsToNode.entrySet()) { + if (!knownLabels.containsAll(entry.getValue())) { + String msg = + "Not all labels being added contained by known " + + "label collections, please check" + ", added labels=[" + + StringUtils.join(entry.getValue(), ",") + "]"; + LOG.error(msg); + throw new IOException(msg); + } + } + } + + @SuppressWarnings("unchecked") + protected void internalAddLabelsToNode( + Map> addedLabelsToNode) throws IOException { + // do add labels to nodes + Map> newNMToLabels = + new HashMap>(); + for (Entry> entry : addedLabelsToNode.entrySet()) { + NodeId nodeId = entry.getKey(); + Set labels = entry.getValue(); + + createNodeIfNonExisted(entry.getKey()); + + if (nodeId.getPort() == WILDCARD_PORT) { + NodeType node = nodeCollections.get(nodeId.getHost()); + node.labels.addAll(labels); + newNMToLabels.put(nodeId, node.labels); + } else { + NMType nm = getNMInNodeSet(nodeId); + if (nm.labels == null) { + nm.labels = new HashSet(); + } + nm.labels.addAll(labels); + newNMToLabels.put(nodeId, nm.labels); + } + } + + if (null != dispatcher) { + dispatcher.getEventHandler().handle( + new ReplaceLabelsOnNodeEvent(newNMToLabels)); + } + + // shows node->labels we added + LOG.info("addLabelsToNode:"); + for (Entry> entry : newNMToLabels.entrySet()) { + LOG.info(" NM=" + entry.getKey() + ", labels=[" + + StringUtils.join(entry.getValue().iterator(), ",") + "]"); + } + } + + /** + * add more labels to nodes + * + * @param addedLabelsToNode node -> labels map + */ + public void addLabelsToNode(Map> addedLabelsToNode) + throws IOException { + try { + writeLock.lock(); + checkAddLabelsToNode(addedLabelsToNode); + internalAddLabelsToNode(addedLabelsToNode); + } finally { + writeLock.unlock(); + } + } + + protected void checkRemoveFromClusterNodeLabels( + Collection labelsToRemove) throws IOException { + if (null == labelsToRemove || labelsToRemove.isEmpty()) { + return; + } + + // Check if label to remove doesn't existed or null/empty, will throw + // exception if any of labels to remove doesn't meet requirement + for (String label : labelsToRemove) { + label = normalizeLabel(label); + if (label == null || label.isEmpty()) { + throw new IOException("Label to be removed is null or empty"); + } + + if (!labelCollections.containsKey(label)) { + throw new IOException("Node label=" + label + + " to be removed doesn't existed in cluster " + + "node labels collection."); + } + } + } + + @SuppressWarnings("unchecked") + protected void internalRemoveFromClusterNodeLabels(Collection labelsToRemove) { + // remove labels from nodes + for (String nodeName : nodeCollections.keySet()) { + NodeType node = nodeCollections.get(nodeName); + if (null != node) { + node.labels.removeAll(labelsToRemove); + for (NMType nm : node.nms.values()) { + if (nm.labels != null) { + nm.labels.removeAll(labelsToRemove); + } + } + } + } + + // remove labels from node labels collection + for (String label : labelsToRemove) { + labelCollections.remove(label); + } + + // create event to remove labels + if (null != dispatcher) { + dispatcher.getEventHandler().handle( + new RemoveFromCluserNodeLabelsEvent(labelsToRemove)); + } + + LOG.info("Remove labels: [" + + StringUtils.join(labelsToRemove.iterator(), ",") + "]"); + } + + /** + * Remove multiple node labels from repository + * + * @param labelsToRemove + * node labels to remove + * @throws IOException + */ + public void removeFromClusterNodeLabels(Collection labelsToRemove) + throws IOException { + try { + writeLock.lock(); + + checkRemoveFromClusterNodeLabels(labelsToRemove); + + internalRemoveFromClusterNodeLabels(labelsToRemove); + + } finally { + writeLock.unlock(); + } + } + + protected void checkRemoveLabelsFromNode( + Map> removeLabelsFromNode) throws IOException { + // check all labels being added existed + Set knownLabels = labelCollections.keySet(); + for (Entry> entry : removeLabelsFromNode.entrySet()) { + NodeId nodeId = entry.getKey(); + Set labels = entry.getValue(); + + if (!knownLabels.containsAll(labels)) { + String msg = + "Not all labels being removed contained by known " + + "label collections, please check" + ", removed labels=[" + + StringUtils.join(labels, ",") + "]"; + LOG.error(msg); + throw new IOException(msg); + } + + Set originalLabels = null; + + boolean nodeExisted = false; + if (WILDCARD_PORT != nodeId.getPort()) { + NMType nm = getNMInNodeSet(nodeId); + if (nm != null) { + originalLabels = nm.labels; + nodeExisted = true; + } + } else { + NodeType node = nodeCollections.get(nodeId.getHost()); + if (null != node) { + originalLabels = node.labels; + nodeExisted = true; + } + } + + if (!nodeExisted) { + String msg = + "Try to remove labels from NM=" + nodeId + + ", but the NM doesn't existed"; + LOG.error(msg); + throw new IOException(msg); + } + + if (labels == null || labels.isEmpty()) { + continue; + } + + if (!originalLabels.containsAll(labels)) { + String msg = + "Try to remove labels = [" + StringUtils.join(labels, ",") + + "], but not all labels contained by NM=" + nodeId; + LOG.error(msg); + throw new IOException(msg); + } + } + } + + @SuppressWarnings("unchecked") + protected void internalRemoveLabelsFromNode(Map> removeLabelsFromNode) { + // do remove labels from nodes + Map> newNMToLabels = + new HashMap>(); + for (Entry> entry : removeLabelsFromNode.entrySet()) { + NodeId nodeId = entry.getKey(); + Set labels = entry.getValue(); + + if (nodeId.getPort() == WILDCARD_PORT) { + NodeType node = nodeCollections.get(nodeId.getHost()); + node.labels.removeAll(labels); + newNMToLabels.put(nodeId, node.labels); + } else { + NMType nm = getNMInNodeSet(nodeId); + if (nm.labels != null) { + nm.labels.removeAll(labels); + newNMToLabels.put(nodeId, nm.labels); + } + } + } + + if (null != dispatcher) { + dispatcher.getEventHandler().handle( + new ReplaceLabelsOnNodeEvent(newNMToLabels)); + } + + // shows node->labels we added + LOG.info("removeLabelsFromNode:"); + for (Entry> entry : newNMToLabels.entrySet()) { + LOG.info(" NM=" + entry.getKey() + ", labels=[" + + StringUtils.join(entry.getValue().iterator(), ",") + "]"); + } + } + + /** + * remove labels from nodes, labels being removed most be contained by these + * nodes + * + * @param removeLabelsFromNode node -> labels map + */ + public void + removeLabelsFromNode(Map> removeLabelsFromNode) + throws IOException { + try { + writeLock.lock(); + + checkRemoveLabelsFromNode(removeLabelsFromNode); + + internalRemoveLabelsFromNode(removeLabelsFromNode); + } finally { + writeLock.unlock(); + } + } + + protected void checkReplaceLabelsOnNode( + Map> replaceLabelsToNode) throws IOException { + if (null == replaceLabelsToNode || replaceLabelsToNode.isEmpty()) { + return; + } + + // check all labels being added existed + Set knownLabels = labelCollections.keySet(); + for (Entry> entry : replaceLabelsToNode.entrySet()) { + if (!knownLabels.containsAll(entry.getValue())) { + String msg = + "Not all labels being replaced contained by known " + + "label collections, please check" + ", new labels=[" + + StringUtils.join(entry.getValue(), ",") + "]"; + LOG.error(msg); + throw new IOException(msg); + } + } + } + + @SuppressWarnings("unchecked") + protected void internalReplaceLabelsOnNode( + Map> replaceLabelsToNode) { + // do replace labels to nodes + Map> newNMToLabels = new HashMap>(); + for (Entry> entry : replaceLabelsToNode.entrySet()) { + NodeId nodeId = entry.getKey(); + Set labels = entry.getValue(); + + // update nodeCollections + createNodeIfNonExisted(entry.getKey()); + if (nodeId.getPort() == WILDCARD_PORT) { + NodeType node = nodeCollections.get(nodeId.getHost()); + node.labels.clear(); + node.labels.addAll(labels); + newNMToLabels.put(nodeId, node.labels); + } else { + NMType nm = getNMInNodeSet(nodeId); + if (nm.labels == null) { + nm.labels = new HashSet(); + } + nm.labels.clear(); + nm.labels.addAll(labels); + newNMToLabels.put(nodeId, nm.labels); + } + } + + if (null != dispatcher) { + dispatcher.getEventHandler().handle( + new ReplaceLabelsOnNodeEvent(newNMToLabels)); + } + + // shows node->labels we added + LOG.info("setLabelsToNode:"); + for (Entry> entry : newNMToLabels.entrySet()) { + LOG.info(" NM=" + entry.getKey() + ", labels=[" + + StringUtils.join(entry.getValue().iterator(), ",") + "]"); + } + } + + /** + * replace labels to nodes + * + * @param replaceLabelsToNode node -> labels map + */ + public void replaceLabelsOnNode(Map> replaceLabelsToNode) + throws IOException { + + try { + writeLock.lock(); + + checkReplaceLabelsOnNode(replaceLabelsToNode); + + internalReplaceLabelsOnNode(replaceLabelsToNode); + } finally { + writeLock.unlock(); + } + } + + public boolean containsNodeLabel(String label) { + try { + readLock.lock(); + return label != null + && (label.isEmpty() || labelCollections.containsKey(label)); + } finally { + readLock.unlock(); + } + } + + /** + * Get mapping of nodes to labels + * + * @return nodes to labels map + */ + public Map> getNodeLabels() { + try { + readLock.lock(); + Map> nodeToLabels = + new HashMap>(); + for (Entry entry : nodeCollections.entrySet()) { + String host = entry.getKey(); + NodeType node = entry.getValue(); + for (NodeId nodeId : node.nms.keySet()) { + Set nodeLabels = getLabelsByNode(nodeId); + if (nodeLabels == null || nodeLabels.isEmpty()) { + continue; + } + nodeToLabels.put(nodeId, nodeLabels); + } + if (!node.labels.isEmpty()) { + nodeToLabels.put(NodeId.newInstance(host, WILDCARD_PORT), node.labels); + } + } + return Collections.unmodifiableMap(nodeToLabels); + } finally { + readLock.unlock(); + } + } + + public Set getLabelsOnNode(NodeId nodeId) { + try { + readLock.lock(); + Set nodeLabels = getLabelsByNode(nodeId); + return Collections.unmodifiableSet(nodeLabels); + } finally { + readLock.unlock(); + } + } + + /** + * Get existing valid labels in repository + * + * @return existing valid labels in repository + */ + public Set getClusterNodeLabels() { + try { + readLock.lock(); + Set labels = new HashSet(labelCollections.keySet()); + labels.remove(NO_LABEL); + return Collections.unmodifiableSet(labels); + } finally { + readLock.unlock(); + } + } + + private void checkLabelName(String label) throws IOException { + if (label == null || label.isEmpty() || label.length() > MAX_LABEL_LENGTH) { + throw new IOException("label added is empty or exceeds " + + MAX_LABEL_LENGTH + " character(s)"); + } + label = label.trim(); + + boolean match = LABEL_PATTERN.matcher(label).matches(); + + if (!match) { + throw new IOException("label name should only contains " + + "{0-9, a-z, A-Z, -, _} and should not started with {-,_}" + + ", now it is=" + label); + } + } + + protected String normalizeLabel(String label) { + if (label != null) { + return label.trim(); + } + return NO_LABEL; + } + + private Set normalizeLabels(Set labels) { + Set newLabels = new HashSet(); + for (String label : labels) { + newLabels.add(normalizeLabel(label)); + } + return newLabels; + } + + protected NMType getNMInNodeSet(NodeId nodeId) { + return getNMInNodeSet(nodeId, nodeCollections); + } + + protected NMType getNMInNodeSet(NodeId nodeId, Map map) { + return getNMInNodeSet(nodeId, map, false); + } + + protected NMType getNMInNodeSet(NodeId nodeId, Map map, + boolean checkRunning) { + if (WILDCARD_PORT == nodeId.getPort()) { + return null; + } + + NodeType node = map.get(nodeId.getHost()); + if (null == node) { + return null; + } + NMType nm = node.nms.get(nodeId); + if (null == nm) { + return null; + } + if (checkRunning) { + return nm.running ? nm : null; + } + return nm; + } + + protected Set getLabelsByNode(NodeId nodeId) { + return getLabelsByNode(nodeId, nodeCollections); + } + + protected Set getLabelsByNode(NodeId nodeId, Map map) { + NodeType node = map.get(nodeId.getHost()); + if (null == node) { + return EMPTY_STRING_SET; + } + NMType nm = node.nms.get(nodeId); + if (null != nm && null != nm.labels) { + return nm.labels; + } else { + return node.labels; + } + } + + protected void createNodeIfNonExisted(NodeId nodeId) { + NodeType node = nodeCollections.get(nodeId.getHost()); + if (null == node) { + node = new NodeType(); + nodeCollections.put(nodeId.getHost(), node); + } + if (nodeId.getPort() != WILDCARD_PORT) { + NMType nm = node.nms.get(nodeId); + if (null == nm) { + node.nms.put(nodeId, new NMType()); + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java new file mode 100644 index 0000000..a390ad0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java @@ -0,0 +1,54 @@ +package org.apache.hadoop.yarn.nodelabels; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.NodeId; + +public abstract class NodeLabelsStore { + protected final NodeLabelsManager mgr; + protected Configuration conf; + + public NodeLabelsStore(NodeLabelsManager mgr) { + this.mgr = mgr; + } + + /** + * Store node -> label + */ + public abstract void persistNodeToLabelsChanges( + Map> nodeToLabels) throws IOException; + + /** + * Store new labels + */ + public abstract void persistAddingLabels(Set label) + throws IOException; + + /** + * Remove labels + */ + public abstract void persistRemovingLabels(Collection labels) + throws IOException; + + /** + * Recover labels and node to labels mappings from store + * @param conf + */ + public abstract void recover() throws IOException; + + public void init(Configuration conf) throws Exception { + this.conf = conf; + } + + public void finalize() throws Exception { + + } + + public NodeLabelsManager getNodeLabelsManager() { + return mgr; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/AddToClusterNodeLabelsEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/AddToClusterNodeLabelsEvent.java new file mode 100644 index 0000000..87aa678 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/AddToClusterNodeLabelsEvent.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels.event; + +import java.util.Set; + +public class AddToClusterNodeLabelsEvent extends NodeLabelsManagerEvent { + private Set labels; + + public AddToClusterNodeLabelsEvent(Set labels) { + super(NodeLabelsManagerEventType.ADD_LABELS); + this.labels = labels; + } + + public Set getLabels() { + return labels; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelsManagerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelsManagerEvent.java new file mode 100644 index 0000000..33916ab --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelsManagerEvent.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels.event; + +import org.apache.hadoop.yarn.event.AbstractEvent; + +public class NodeLabelsManagerEvent extends + AbstractEvent { + public NodeLabelsManagerEvent(NodeLabelsManagerEventType type) { + super(type); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelsManagerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelsManagerEventType.java new file mode 100644 index 0000000..ad534bc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/NodeLabelsManagerEventType.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels.event; + +public enum NodeLabelsManagerEventType { + REMOVE_LABELS, + ADD_LABELS, + STORE_NODE_TO_LABELS +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/RemoveFromCluserNodeLabelsEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/RemoveFromCluserNodeLabelsEvent.java new file mode 100644 index 0000000..fba3282 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/RemoveFromCluserNodeLabelsEvent.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels.event; + +import java.util.Collection; + +public class RemoveFromCluserNodeLabelsEvent extends NodeLabelsManagerEvent { + private Collection labels; + + public RemoveFromCluserNodeLabelsEvent(Collection labels) { + super(NodeLabelsManagerEventType.REMOVE_LABELS); + this.labels = labels; + } + + public Collection getLabels() { + return labels; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/ReplaceLabelsOnNodeEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/ReplaceLabelsOnNodeEvent.java new file mode 100644 index 0000000..0b9aa6b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/event/ReplaceLabelsOnNodeEvent.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels.event; + +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.yarn.api.records.NodeId; + +public class ReplaceLabelsOnNodeEvent extends NodeLabelsManagerEvent { + private Map> nodeToLabels; + + public ReplaceLabelsOnNodeEvent(Map> nodeToLabels) { + super(NodeLabelsManagerEventType.STORE_NODE_TO_LABELS); + this.nodeToLabels = nodeToLabels; + } + + public Map> getNodeToLabels() { + return nodeToLabels; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java index ccffaed..f99de78 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java @@ -29,17 +29,28 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetClusterNodeLabelsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; @@ -52,8 +63,18 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetClusterNodeLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl; @@ -66,6 +87,10 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl; @@ -205,5 +230,75 @@ public UpdateNodeResourceResponse updateNodeResource( return null; } } - + + @Override + public AddToClusterNodeLabelsResponse addToClusterNodeLabels( + AddToClusterNodeLabelsRequest request) throws YarnException, IOException { + AddToClusterNodeLabelsRequestProto requestProto = + ((AddToClusterNodeLabelsRequestPBImpl) request).getProto(); + try { + return new AddToClusterNodeLabelsResponsePBImpl(proxy.addLabels(null, + requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels( + RemoveFromClusterNodeLabelsRequest request) throws YarnException, + IOException { + RemoveFromClusterNodeLabelsRequestProto requestProto = + ((RemoveFromClusterNodeLabelsRequestPBImpl) request).getProto(); + try { + return new RemoveFromClusterNodeLabelsResponsePBImpl(proxy.removeLabels( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public ReplaceLabelsOnNodeResponse replaceLabelsOnNode( + ReplaceLabelsOnNodeRequest request) throws YarnException, IOException { + ReplaceLabelsOnNodeRequestProto requestProto = + ((ReplaceLabelsOnNodeRequestPBImpl) request).getProto(); + try { + return new ReplaceLabelsOnNodeResponsePBImpl(proxy.setNodeToLabels( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetNodeToLabelsResponse getNodeToLabels(GetNodeToLabelsRequest request) + throws YarnException, IOException { + GetNodeToLabelsRequestProto requestProto = + ((GetNodeToLabelsRequestPBImpl) request).getProto(); + try { + return new GetNodeToLabelsResponsePBImpl(proxy.getNodeToLabels( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetClusterNodeLabelsResponse getClusterNodeLabels(GetClusterNodeLabelsRequest request) + throws YarnException, IOException { + GetClusterNodeLabelsRequestProto requestProto = + ((GetClusterNodeLabelsRequestPBImpl) request).getProto(); + try { + return new GetClusterNodeLabelsResponsePBImpl(proxy.getLabels( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java index d1f71fe..4e7fe20 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java @@ -22,8 +22,14 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetClusterNodeLabelsResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; @@ -36,17 +42,32 @@ import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetClusterNodeLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl; @@ -59,6 +80,10 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl; @@ -204,4 +229,79 @@ public UpdateNodeResourceResponseProto updateNodeResource(RpcController controll } } + @Override + public AddToClusterNodeLabelsResponseProto addLabels(RpcController controller, + AddToClusterNodeLabelsRequestProto proto) throws ServiceException { + AddToClusterNodeLabelsRequestPBImpl request = new AddToClusterNodeLabelsRequestPBImpl(proto); + try { + AddToClusterNodeLabelsResponse response = real.addToClusterNodeLabels(request); + return ((AddToClusterNodeLabelsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public RemoveFromClusterNodeLabelsResponseProto removeLabels( + RpcController controller, RemoveFromClusterNodeLabelsRequestProto proto) + throws ServiceException { + RemoveFromClusterNodeLabelsRequestPBImpl request = + new RemoveFromClusterNodeLabelsRequestPBImpl(proto); + try { + RemoveFromClusterNodeLabelsResponse response = real.removeFromClusterNodeLabels(request); + return ((RemoveFromClusterNodeLabelsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public ReplaceLabelsOnNodeResponseProto setNodeToLabels( + RpcController controller, ReplaceLabelsOnNodeRequestProto proto) + throws ServiceException { + ReplaceLabelsOnNodeRequestPBImpl request = + new ReplaceLabelsOnNodeRequestPBImpl(proto); + try { + ReplaceLabelsOnNodeResponse response = real.replaceLabelsOnNode(request); + return ((ReplaceLabelsOnNodeResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetNodeToLabelsResponseProto getNodeToLabels(RpcController controller, + GetNodeToLabelsRequestProto proto) throws ServiceException { + GetNodeToLabelsRequestPBImpl request = + new GetNodeToLabelsRequestPBImpl(proto); + try { + GetNodeToLabelsResponse response = real.getNodeToLabels(request); + return ((GetNodeToLabelsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetClusterNodeLabelsResponseProto getLabels(RpcController controller, + GetClusterNodeLabelsRequestProto proto) throws ServiceException { + GetClusterNodeLabelsRequestPBImpl request = + new GetClusterNodeLabelsRequestPBImpl(proto); + try { + GetClusterNodeLabelsResponse response = real.getClusterNodeLabels(request); + return ((GetClusterNodeLabelsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsRequestPBImpl.java new file mode 100644 index 0000000..0e5c655 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsRequestPBImpl.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; + +public class AddToClusterNodeLabelsRequestPBImpl extends + AddToClusterNodeLabelsRequest { + Set labels; + AddToClusterNodeLabelsRequestProto proto = AddToClusterNodeLabelsRequestProto + .getDefaultInstance(); + AddToClusterNodeLabelsRequestProto.Builder builder = null; + boolean viaProto = false; + + public AddToClusterNodeLabelsRequestPBImpl() { + this.builder = AddToClusterNodeLabelsRequestProto.newBuilder(); + } + + public AddToClusterNodeLabelsRequestPBImpl( + AddToClusterNodeLabelsRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = AddToClusterNodeLabelsRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.labels != null && !this.labels.isEmpty()) { + builder.addAllLabels(this.labels); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public AddToClusterNodeLabelsRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void initLabels() { + if (this.labels != null) { + return; + } + AddToClusterNodeLabelsRequestProtoOrBuilder p = viaProto ? proto : builder; + this.labels = new HashSet(); + this.labels.addAll(p.getLabelsList()); + } + + @Override + public void setLabels(Set labels) { + maybeInitBuilder(); + if (labels == null || labels.isEmpty()) { + builder.clearLabels(); + } + this.labels = labels; + } + + @Override + public Set getLabels() { + initLabels(); + return this.labels; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsResponsePBImpl.java new file mode 100644 index 0000000..3d1f71c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddToClusterNodeLabelsResponsePBImpl.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; + +import com.google.protobuf.TextFormat; + +public class AddToClusterNodeLabelsResponsePBImpl extends + AddToClusterNodeLabelsResponse { + + AddToClusterNodeLabelsResponseProto proto = AddToClusterNodeLabelsResponseProto + .getDefaultInstance(); + AddToClusterNodeLabelsResponseProto.Builder builder = null; + boolean viaProto = false; + + public AddToClusterNodeLabelsResponsePBImpl() { + builder = AddToClusterNodeLabelsResponseProto.newBuilder(); + } + + public AddToClusterNodeLabelsResponsePBImpl( + AddToClusterNodeLabelsResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public AddToClusterNodeLabelsResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetClusterNodeLabelsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetClusterNodeLabelsRequestPBImpl.java new file mode 100644 index 0000000..9cb4206 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetClusterNodeLabelsRequestPBImpl.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetClusterNodeLabelsRequest; + +import com.google.protobuf.TextFormat; + +public class GetClusterNodeLabelsRequestPBImpl extends + GetClusterNodeLabelsRequest { + + GetClusterNodeLabelsRequestProto proto = GetClusterNodeLabelsRequestProto + .getDefaultInstance(); + GetClusterNodeLabelsRequestProto.Builder builder = null; + boolean viaProto = false; + + public GetClusterNodeLabelsRequestPBImpl() { + builder = GetClusterNodeLabelsRequestProto.newBuilder(); + } + + public GetClusterNodeLabelsRequestPBImpl(GetClusterNodeLabelsRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetClusterNodeLabelsRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetClusterNodeLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetClusterNodeLabelsResponsePBImpl.java new file mode 100644 index 0000000..cd56f65 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetClusterNodeLabelsResponsePBImpl.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetClusterNodeLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetClusterNodeLabelsResponseProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetClusterNodeLabelsResponse; + +public class GetClusterNodeLabelsResponsePBImpl extends + GetClusterNodeLabelsResponse { + Set labels; + GetClusterNodeLabelsResponseProto proto = GetClusterNodeLabelsResponseProto + .getDefaultInstance(); + GetClusterNodeLabelsResponseProto.Builder builder = null; + boolean viaProto = false; + + public GetClusterNodeLabelsResponsePBImpl() { + this.builder = GetClusterNodeLabelsResponseProto.newBuilder(); + } + + public GetClusterNodeLabelsResponsePBImpl( + GetClusterNodeLabelsResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetClusterNodeLabelsResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.labels != null && !this.labels.isEmpty()) { + builder.addAllLabels(this.labels); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public GetClusterNodeLabelsResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void initLabels() { + if (this.labels != null) { + return; + } + GetClusterNodeLabelsResponseProtoOrBuilder p = viaProto ? proto : builder; + this.labels = new HashSet(); + this.labels.addAll(p.getLabelsList()); + } + + @Override + public void setLabels(Set labels) { + maybeInitBuilder(); + if (labels == null || labels.isEmpty()) { + builder.clearLabels(); + } + this.labels = labels; + } + + @Override + public Set getLabels() { + initLabels(); + return this.labels; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsRequestPBImpl.java new file mode 100644 index 0000000..057f41b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsRequestPBImpl.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsRequest; + +import com.google.protobuf.TextFormat; + +public class GetNodeToLabelsRequestPBImpl extends GetNodeToLabelsRequest { + + GetNodeToLabelsRequestProto proto = GetNodeToLabelsRequestProto + .getDefaultInstance(); + GetNodeToLabelsRequestProto.Builder builder = null; + boolean viaProto = false; + + public GetNodeToLabelsRequestPBImpl() { + builder = GetNodeToLabelsRequestProto.newBuilder(); + } + + public GetNodeToLabelsRequestPBImpl(GetNodeToLabelsRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetNodeToLabelsRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsResponsePBImpl.java new file mode 100644 index 0000000..13661b4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsResponsePBImpl.java @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsResponseProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeIdToLabelsProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsResponse; + +import com.google.common.collect.Sets; + +public class GetNodeToLabelsResponsePBImpl extends + GetNodeToLabelsResponse { + GetNodeToLabelsResponseProto proto = GetNodeToLabelsResponseProto + .getDefaultInstance(); + GetNodeToLabelsResponseProto.Builder builder = null; + boolean viaProto = false; + + private Map> nodeToLabels; + + public GetNodeToLabelsResponsePBImpl() { + this.builder = GetNodeToLabelsResponseProto.newBuilder(); + } + + public GetNodeToLabelsResponsePBImpl(GetNodeToLabelsResponseProto proto) { + this.proto = proto; + this.viaProto = true; + } + + private void initNodeToLabels() { + if (this.nodeToLabels != null) { + return; + } + GetNodeToLabelsResponseProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getNodeToLabelsList(); + this.nodeToLabels = new HashMap>(); + + for (NodeIdToLabelsProto c : list) { + this.nodeToLabels.put(new NodeIdPBImpl(c.getNodeId()), + Sets.newHashSet(c.getLabelsList())); + } + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetNodeToLabelsResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void addNodeToLabelsToProto() { + maybeInitBuilder(); + builder.clearNodeToLabels(); + if (nodeToLabels == null) { + return; + } + Iterable iterable = + new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + + Iterator>> iter = nodeToLabels + .entrySet().iterator(); + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + @Override + public NodeIdToLabelsProto next() { + Entry> now = iter.next(); + return NodeIdToLabelsProto.newBuilder() + .setNodeId(convertToProtoFormat(now.getKey())) + .addAllLabels(now.getValue()).build(); + } + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + }; + } + }; + builder.addAllNodeToLabels(iterable); + } + + private void mergeLocalToBuilder() { + if (this.nodeToLabels != null) { + addNodeToLabelsToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public GetNodeToLabelsResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public Map> getNodeToLabels() { + initNodeToLabels(); + return this.nodeToLabels; + } + + @Override + public void setNodeToLabels(Map> map) { + initNodeToLabels(); + nodeToLabels.clear(); + nodeToLabels.putAll(map); + } + + private NodeIdProto convertToProtoFormat(NodeId t) { + return ((NodeIdPBImpl)t).getProto(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeIdToLabelsPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeIdToLabelsPBImpl.java new file mode 100644 index 0000000..78caa74 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeIdToLabelsPBImpl.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeIdToLabelsProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeIdToLabelsProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.protocolrecords.NodeIdToLabels; + +public class NodeIdToLabelsPBImpl extends NodeIdToLabels { + private List labels; + private NodeId nodeId = null; + NodeIdToLabelsProto proto = NodeIdToLabelsProto + .getDefaultInstance(); + NodeIdToLabelsProto.Builder builder = null; + boolean viaProto = false; + + public NodeIdToLabelsPBImpl() { + this.builder = NodeIdToLabelsProto.newBuilder(); + } + + public NodeIdToLabelsPBImpl(NodeIdToLabelsProto proto) { + this.proto = proto; + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = NodeIdToLabelsProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.labels != null && !this.labels.isEmpty()) { + builder.addAllLabels(this.labels); + } + if (this.nodeId != null + && !((NodeIdPBImpl) nodeId).getProto().equals( + builder.getNodeId())) { + builder.setNodeId(convertToProtoFormat(this.nodeId)); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public NodeIdToLabelsProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void initLabels() { + if (this.labels != null) { + return; + } + NodeIdToLabelsProtoOrBuilder p = viaProto ? proto : builder; + this.labels = new ArrayList(); + this.labels.addAll(p.getLabelsList()); + } + + @Override + public void setLabels(List labels) { + maybeInitBuilder(); + if (labels == null || labels.isEmpty()) { + builder.clearLabels(); + } + this.labels = labels; + } + + @Override + public List getLabels() { + initLabels(); + return this.labels; + } + + @Override + public void setNodeId(NodeId nodeId) { + maybeInitBuilder(); + if (nodeId == null) + builder.clearNodeId(); + this.nodeId = nodeId; + } + + @Override + public NodeId getNodeId() { + NodeIdToLabelsProtoOrBuilder p = viaProto ? proto : builder; + if (this.nodeId != null) { + return this.nodeId; + } + if (!p.hasNodeId()) { + return null; + } + this.nodeId = new NodeIdPBImpl(p.getNodeId()); + return this.nodeId; + } + + private NodeIdProto convertToProtoFormat(NodeId t) { + return ((NodeIdPBImpl)t).getProto(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsRequestPBImpl.java new file mode 100644 index 0000000..411c532 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsRequestPBImpl.java @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; + +public class RemoveFromClusterNodeLabelsRequestPBImpl extends + RemoveFromClusterNodeLabelsRequest { + Set labels; + RemoveFromClusterNodeLabelsRequestProto proto = + RemoveFromClusterNodeLabelsRequestProto.getDefaultInstance(); + RemoveFromClusterNodeLabelsRequestProto.Builder builder = null; + boolean viaProto = false; + + public RemoveFromClusterNodeLabelsRequestPBImpl() { + this.builder = RemoveFromClusterNodeLabelsRequestProto.newBuilder(); + } + + public RemoveFromClusterNodeLabelsRequestPBImpl( + RemoveFromClusterNodeLabelsRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = RemoveFromClusterNodeLabelsRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.labels != null && !this.labels.isEmpty()) { + builder.addAllLabels(this.labels); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public RemoveFromClusterNodeLabelsRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void initLabels() { + if (this.labels != null) { + return; + } + RemoveFromClusterNodeLabelsRequestProtoOrBuilder p = + viaProto ? proto : builder; + this.labels = new HashSet(); + this.labels.addAll(p.getLabelsList()); + } + + @Override + public void setLabels(Set partitions) { + maybeInitBuilder(); + if (partitions == null || partitions.isEmpty()) { + builder.clearLabels(); + } + this.labels = partitions; + } + + @Override + public Set getLabels() { + initLabels(); + return this.labels; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsResponsePBImpl.java new file mode 100644 index 0000000..43cf948 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsResponsePBImpl.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsResponseProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; + +import com.google.protobuf.TextFormat; + +public class RemoveFromClusterNodeLabelsResponsePBImpl extends + RemoveFromClusterNodeLabelsResponse { + + RemoveFromClusterNodeLabelsResponseProto proto = + RemoveFromClusterNodeLabelsResponseProto.getDefaultInstance(); + RemoveFromClusterNodeLabelsResponseProto.Builder builder = null; + boolean viaProto = false; + + public RemoveFromClusterNodeLabelsResponsePBImpl() { + builder = RemoveFromClusterNodeLabelsResponseProto.newBuilder(); + } + + public RemoveFromClusterNodeLabelsResponsePBImpl( + RemoveFromClusterNodeLabelsResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public RemoveFromClusterNodeLabelsResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java new file mode 100644 index 0000000..6d07cb4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeRequestPBImpl.java @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeIdToLabelsProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; + +import com.google.common.collect.Sets; + +public class ReplaceLabelsOnNodeRequestPBImpl extends + ReplaceLabelsOnNodeRequest { + ReplaceLabelsOnNodeRequestProto proto = ReplaceLabelsOnNodeRequestProto + .getDefaultInstance(); + ReplaceLabelsOnNodeRequestProto.Builder builder = null; + boolean viaProto = false; + + private Map> nodeIdToLabels; + + public ReplaceLabelsOnNodeRequestPBImpl() { + this.builder = ReplaceLabelsOnNodeRequestProto.newBuilder(); + } + + public ReplaceLabelsOnNodeRequestPBImpl(ReplaceLabelsOnNodeRequestProto proto) { + this.proto = proto; + this.viaProto = true; + } + + private void initNodeToLabels() { + if (this.nodeIdToLabels != null) { + return; + } + ReplaceLabelsOnNodeRequestProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getNodeToLabelsList(); + this.nodeIdToLabels = new HashMap>(); + + for (NodeIdToLabelsProto c : list) { + this.nodeIdToLabels.put(new NodeIdPBImpl(c.getNodeId()), + Sets.newHashSet(c.getLabelsList())); + } + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ReplaceLabelsOnNodeRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void addNodeToLabelsToProto() { + maybeInitBuilder(); + builder.clearNodeToLabels(); + if (nodeIdToLabels == null) { + return; + } + Iterable iterable = + new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + + Iterator>> iter = nodeIdToLabels + .entrySet().iterator(); + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + @Override + public NodeIdToLabelsProto next() { + Entry> now = iter.next(); + return NodeIdToLabelsProto.newBuilder() + .setNodeId(convertToProtoFormat(now.getKey())) + .addAllLabels(now.getValue()).build(); + } + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + }; + } + }; + builder.addAllNodeToLabels(iterable); + } + + private void mergeLocalToBuilder() { + if (this.nodeIdToLabels != null) { + addNodeToLabelsToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public ReplaceLabelsOnNodeRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public Map> getNodeToLabels() { + initNodeToLabels(); + return this.nodeIdToLabels; + } + + @Override + public void setNodeToLabels(Map> map) { + initNodeToLabels(); + nodeIdToLabels.clear(); + nodeIdToLabels.putAll(map); + } + + private NodeIdProto convertToProtoFormat(NodeId t) { + return ((NodeIdPBImpl) t).getProto(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeResponsePBImpl.java new file mode 100644 index 0000000..cd52b61 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReplaceLabelsOnNodeResponsePBImpl.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; + +import com.google.protobuf.TextFormat; + +public class ReplaceLabelsOnNodeResponsePBImpl extends + ReplaceLabelsOnNodeResponse { + + ReplaceLabelsOnNodeResponseProto proto = ReplaceLabelsOnNodeResponseProto + .getDefaultInstance(); + ReplaceLabelsOnNodeResponseProto.Builder builder = null; + boolean viaProto = false; + + public ReplaceLabelsOnNodeResponsePBImpl() { + builder = ReplaceLabelsOnNodeResponseProto.newBuilder(); + } + + public ReplaceLabelsOnNodeResponsePBImpl( + ReplaceLabelsOnNodeResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public ReplaceLabelsOnNodeResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index e9ca76f..7ea614a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -36,15 +36,271 @@ import org.apache.commons.lang.math.LongRange; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.security.proto.SecurityProtos.*; -import org.apache.hadoop.yarn.api.protocolrecords.*; -import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.*; -import org.apache.hadoop.yarn.api.records.*; -import org.apache.hadoop.yarn.api.records.impl.pb.*; -import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.*; -import org.apache.hadoop.yarn.proto.YarnProtos.*; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.*; -import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.*; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; +import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerReportResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainerStatusesResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetContainersResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.MoveApplicationAcrossQueuesRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.MoveApplicationAcrossQueuesResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationDeleteRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationDeleteResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationSubmissionRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationSubmissionResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationUpdateRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.ReservationUpdateResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainerRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StartContainersResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.api.records.ContainerResourceDecrease; +import org.apache.hadoop.yarn.api.records.ContainerResourceIncrease; +import org.apache.hadoop.yarn.api.records.ContainerResourceIncreaseRequest; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LogAggregationContext; +import org.apache.hadoop.yarn.api.records.NMToken; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.PreemptionContainer; +import org.apache.hadoop.yarn.api.records.PreemptionContract; +import org.apache.hadoop.yarn.api.records.PreemptionMessage; +import org.apache.hadoop.yarn.api.records.PreemptionResourceRequest; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.QueueInfo; +import org.apache.hadoop.yarn.api.records.QueueState; +import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; +import org.apache.hadoop.yarn.api.records.ReservationDefinition; +import org.apache.hadoop.yarn.api.records.ReservationId; +import org.apache.hadoop.yarn.api.records.ReservationRequest; +import org.apache.hadoop.yarn.api.records.ReservationRequests; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; +import org.apache.hadoop.yarn.api.records.ResourceOption; +import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.SerializedException; +import org.apache.hadoop.yarn.api.records.StrictPreemptionContract; +import org.apache.hadoop.yarn.api.records.Token; +import org.apache.hadoop.yarn.api.records.URL; +import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptReportPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationResourceUsageReportPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerReportPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceDecreasePBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceIncreasePBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerResourceIncreaseRequestPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContainerPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContractPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionMessagePBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionResourceRequestPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.QueueUserACLInfoPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ResourceBlacklistRequestPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ResourceOptionPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.StrictPreemptionContractPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.URLPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.YarnClusterMetricsPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptReportProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerReportProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceDecreaseProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceIncreaseProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerResourceIncreaseRequestProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto; +import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; +import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContractProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionMessageProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionResourceRequestProto; +import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto; +import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProto; +import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceBlacklistRequestProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto; +import org.apache.hadoop.yarn.proto.YarnProtos.SerializedExceptionProto; +import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProto; +import org.apache.hadoop.yarn.proto.YarnProtos.URLProto; +import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetClusterNodeLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeIdToLabelsProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerReportResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainersResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.MoveApplicationAcrossQueuesRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.MoveApplicationAcrossQueuesResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.NMTokenProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationDeleteRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationDeleteResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationSubmissionRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationSubmissionResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationUpdateRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.ReservationUpdateResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetClusterNodeLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeIdToLabelsPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshServiceAclsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Ignore; @@ -934,4 +1190,69 @@ public void testReservationDeleteResponsePBImpl() throws Exception { validatePBImplRecord(ReservationDeleteResponsePBImpl.class, ReservationDeleteResponseProto.class); } + + @Test + public void testNodeToLabelsPBImpl() throws Exception { + validatePBImplRecord(NodeIdToLabelsPBImpl.class, NodeIdToLabelsProto.class); + } + + @Test + public void testAddToClusterNodeLabelsRequestPBImpl() throws Exception { + validatePBImplRecord(AddToClusterNodeLabelsRequestPBImpl.class, + AddToClusterNodeLabelsRequestProto.class); + } + + @Test + public void testAddToClusterNodeLabelsResponsePBImpl() throws Exception { + validatePBImplRecord(AddToClusterNodeLabelsResponsePBImpl.class, + AddToClusterNodeLabelsResponseProto.class); + } + + @Test + public void testRemoveFromClusterNodeLabelsRequestPBImpl() throws Exception { + validatePBImplRecord(RemoveFromClusterNodeLabelsRequestPBImpl.class, + RemoveFromClusterNodeLabelsRequestProto.class); + } + + @Test + public void testRemoveFromClusterNodeLabelsResponsePBImpl() throws Exception { + validatePBImplRecord(RemoveFromClusterNodeLabelsResponsePBImpl.class, + RemoveFromClusterNodeLabelsResponseProto.class); + } + + @Test + public void testGetClusterNodeLabelsRequestPBImpl() throws Exception { + validatePBImplRecord(GetClusterNodeLabelsRequestPBImpl.class, + GetClusterNodeLabelsRequestProto.class); + } + + @Test + public void testGetClusterNodeLabelsResponsePBImpl() throws Exception { + validatePBImplRecord(GetClusterNodeLabelsResponsePBImpl.class, + GetClusterNodeLabelsResponseProto.class); + } + + @Test + public void testReplaceLabelsOnNodeRequestPBImpl() throws Exception { + validatePBImplRecord(ReplaceLabelsOnNodeRequestPBImpl.class, + ReplaceLabelsOnNodeRequestProto.class); + } + + @Test + public void testReplaceLabelsOnNodeResponsePBImpl() throws Exception { + validatePBImplRecord(ReplaceLabelsOnNodeResponsePBImpl.class, + ReplaceLabelsOnNodeResponseProto.class); + } + + @Test + public void testGetNodeToLabelsRequestPBImpl() throws Exception { + validatePBImplRecord(GetNodeToLabelsRequestPBImpl.class, + GetNodeToLabelsRequestProto.class); + } + + @Test + public void testGetNodeToLabelsResponsePBImpl() throws Exception { + validatePBImplRecord(GetNodeToLabelsResponsePBImpl.class, + GetNodeToLabelsResponseProto.class); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyNodeLabelsManager.java new file mode 100644 index 0000000..2e64ca2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyNodeLabelsManager.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.event.InlineDispatcher; + +public class DummyNodeLabelsManager extends NodeLabelsManager { + Map> lastNodeToLabels = null; + Collection lastAddedlabels = null; + Collection lastRemovedlabels = null; + + @Override + public void initNodeLabelStore(Configuration conf) { + this.store = new NodeLabelsStore(this) { + + @Override + public void recover() throws IOException { + } + + @Override + public void persistRemovingLabels(Collection labels) + throws IOException { + lastRemovedlabels = labels; + } + + @Override + public void persistNodeToLabelsChanges( + Map> nodeToLabels) throws IOException { + lastNodeToLabels = nodeToLabels; + } + + @Override + public void persistAddingLabels(Set label) throws IOException { + lastAddedlabels = label; + } + }; + } + + @Override + protected void initDispatcher(Configuration conf) { + super.dispatcher = new InlineDispatcher(); + } + + @Override + protected void startDispatcher() { + // do nothing + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java new file mode 100644 index 0000000..9d5bf75 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/NodeLabelTestBase.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.yarn.api.records.NodeId; +import org.junit.Assert; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Sets; + +public class NodeLabelTestBase { + public static void assertMapEquals(Map> m1, + ImmutableMap> m2) { + Assert.assertEquals(m1.size(), m2.size()); + for (NodeId k : m1.keySet()) { + Assert.assertTrue(m2.containsKey(k)); + assertCollectionEquals(m1.get(k), m2.get(k)); + } + } + + public static void assertMapContains(Map> m1, + ImmutableMap> m2) { + for (NodeId k : m2.keySet()) { + Assert.assertTrue(m1.containsKey(k)); + assertCollectionEquals(m1.get(k), m2.get(k)); + } + } + + public static void assertCollectionEquals(Collection c1, + Collection c2) { + Assert.assertEquals(c1.size(), c2.size()); + Iterator i1 = c1.iterator(); + Iterator i2 = c2.iterator(); + while (i1.hasNext()) { + Assert.assertEquals(i1.next(), i2.next()); + } + } + + public static Set toSet(E... elements) { + Set set = Sets.newHashSet(elements); + return set; + } + + public NodeId toNodeId(String str) { + if (str.contains(":")) { + int idx = str.indexOf(':'); + NodeId id = + NodeId.newInstance(str.substring(0, idx), + Integer.valueOf(str.substring(idx + 1))); + return id; + } else { + return NodeId.newInstance(str, NodeLabelsManager.WILDCARD_PORT); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java new file mode 100644 index 0000000..9ebebbb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.InlineDispatcher; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableMap; + +public class TestFileSystemNodeLabelsStore extends NodeLabelTestBase { + MockNodeLabelManager mgr = null; + Configuration conf = null; + + private static class MockNodeLabelManager extends + NodeLabelsManager { + @Override + protected void initDispatcher(Configuration conf) { + super.dispatcher = new InlineDispatcher(); + } + + @Override + protected void startDispatcher() { + // do nothing + } + } + + private FileSystemNodeLabelsStore getStore() { + return (FileSystemNodeLabelsStore) mgr.store; + } + + @Before + public void before() throws IOException { + mgr = new MockNodeLabelManager(); + conf = new Configuration(); + File tempDir = File.createTempFile("nlb", ".tmp"); + tempDir.delete(); + tempDir.mkdirs(); + tempDir.deleteOnExit(); + conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_URI, + tempDir.getAbsolutePath()); + mgr.init(conf); + mgr.start(); + } + + @After + public void after() throws IOException { + getStore().fs.delete(getStore().rootDirPath, true); + mgr.stop(); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 10000) + public void testRecoverWithMirror() throws Exception { + mgr.addToCluserNodeLabels(toSet("p1", "p2", "p3")); + mgr.addToCluserNodeLabels(toSet("p4")); + mgr.addToCluserNodeLabels(toSet("p5", "p6")); + mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n1"), toSet("p1"), + toNodeId("n2"), toSet("p2"))); + mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n3"), toSet("p3"), + toNodeId("n4"), toSet("p4"), toNodeId("n5"), toSet("p5"), + toNodeId("n6"), toSet("p6"), toNodeId("n7"), toSet("p6"))); + + /* + * node -> partition p1: n1 p2: n2 p3: n3 p4: n4 p5: n5 p6: n6, n7 + */ + + mgr.removeFromClusterNodeLabels(toSet("p1")); + mgr.removeFromClusterNodeLabels(Arrays.asList("p3", "p5")); + + /* + * After removed p2: n2 p4: n4 p6: n6, n7 + */ + // shutdown mgr and start a new mgr + mgr.stop(); + + mgr = new MockNodeLabelManager(); + mgr.init(conf); + + // check variables + Assert.assertEquals(3, mgr.getClusterNodeLabels().size()); + Assert.assertTrue(mgr.getClusterNodeLabels().containsAll( + Arrays.asList("p2", "p4", "p6"))); + + assertMapContains(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n2"), + toSet("p2"), toNodeId("n4"), toSet("p4"), toNodeId("n6"), toSet("p6"), + toNodeId("n7"), toSet("p6"))); + + // stutdown mgr and start a new mgr + mgr.stop(); + mgr = new MockNodeLabelManager(); + mgr.init(conf); + + // check variables + Assert.assertEquals(3, mgr.getClusterNodeLabels().size()); + Assert.assertTrue(mgr.getClusterNodeLabels().containsAll( + Arrays.asList("p2", "p4", "p6"))); + + assertMapContains(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n2"), + toSet("p2"), toNodeId("n4"), toSet("p4"), toNodeId("n6"), toSet("p6"), + toNodeId("n7"), toSet("p6"))); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 10000) + public void testEditlogRecover() throws Exception { + mgr.addToCluserNodeLabels(toSet("p1", "p2", "p3")); + mgr.addToCluserNodeLabels(toSet("p4")); + mgr.addToCluserNodeLabels(toSet("p5", "p6")); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"), + toNodeId("n2"), toSet("p2"))); + mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n3"), toSet("p3"), + toNodeId("n4"), toSet("p4"), toNodeId("n5"), toSet("p5"), + toNodeId("n6"), toSet("p6"), toNodeId("n7"), toSet("p6"))); + + /* + * node -> partition p1: n1 p2: n2 p3: n3 p4: n4 p5: n5 p6: n6, n7 + */ + + mgr.removeFromClusterNodeLabels(toSet("p1")); + mgr.removeFromClusterNodeLabels(Arrays.asList("p3", "p5")); + + /* + * After removed p2: n2 p4: n4 p6: n6, n7 + */ + // shutdown mgr and start a new mgr + mgr.stop(); + + mgr = new MockNodeLabelManager(); + mgr.init(conf); + + // check variables + Assert.assertEquals(3, mgr.getClusterNodeLabels().size()); + Assert.assertTrue(mgr.getClusterNodeLabels().containsAll( + Arrays.asList("p2", "p4", "p6"))); + + assertMapContains(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n2"), + toSet("p2"), toNodeId("n4"), toSet("p4"), toNodeId("n6"), toSet("p6"), + toNodeId("n7"), toSet("p6"))); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelsManager.java new file mode 100644 index 0000000..6a79dd1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelsManager.java @@ -0,0 +1,245 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.nodelabels; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; + +public class TestNodeLabelsManager extends NodeLabelTestBase { + DummyNodeLabelsManager mgr = null; + + @Before + public void before() { + mgr = new DummyNodeLabelsManager(); + mgr.init(new Configuration()); + mgr.start(); + } + + @After + public void after() { + mgr.stop(); + } + + @Test(timeout = 5000) + public void testAddRemovelabel() throws Exception { + // Add some label + mgr.addToCluserNodeLabels(ImmutableSet.of("hello")); + assertCollectionEquals(mgr.lastAddedlabels, Arrays.asList("hello")); + + mgr.addToCluserNodeLabels(ImmutableSet.of("world")); + mgr.addToCluserNodeLabels(toSet("hello1", "world1")); + assertCollectionEquals(mgr.lastAddedlabels, + Sets.newHashSet("hello1", "world1")); + + Assert.assertTrue(mgr.getClusterNodeLabels().containsAll( + Sets.newHashSet("hello", "world", "hello1", "world1"))); + + // try to remove null, empty and non-existed label, should fail + for (String p : Arrays.asList(null, NodeLabelsManager.NO_LABEL, "xx")) { + boolean caught = false; + try { + mgr.removeFromClusterNodeLabels(Arrays.asList(p)); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("remove label should fail " + + "when label is null/empty/non-existed", caught); + } + + // Remove some label + mgr.removeFromClusterNodeLabels(Arrays.asList("hello")); + assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("hello")); + Assert.assertTrue(mgr.getClusterNodeLabels().containsAll( + Arrays.asList("world", "hello1", "world1"))); + + mgr.removeFromClusterNodeLabels(Arrays + .asList("hello1", "world1", "world")); + Assert.assertTrue(mgr.lastRemovedlabels.containsAll(Sets.newHashSet( + "hello1", "world1", "world"))); + Assert.assertTrue(mgr.getClusterNodeLabels().isEmpty()); + } + + @Test(timeout = 5000) + public void testAddlabelWithCase() throws Exception { + // Add some label, case will not ignore here + mgr.addToCluserNodeLabels(ImmutableSet.of("HeLlO")); + assertCollectionEquals(mgr.lastAddedlabels, Arrays.asList("HeLlO")); + Assert.assertFalse(mgr.getClusterNodeLabels().containsAll(Arrays.asList("hello"))); + } + + @Test(timeout = 5000) + public void testAddInvalidlabel() throws IOException { + boolean caught = false; + try { + Set set = new HashSet(); + set.add(null); + mgr.addToCluserNodeLabels(set); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("null label should not add to repo", caught); + + caught = false; + try { + mgr.addToCluserNodeLabels(ImmutableSet.of(NodeLabelsManager.NO_LABEL)); + } catch (IOException e) { + caught = true; + } + + Assert.assertTrue("empty label should not add to repo", caught); + + caught = false; + try { + mgr.addToCluserNodeLabels(ImmutableSet.of("-?")); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("invalid label charactor should not add to repo", caught); + + caught = false; + try { + mgr.addToCluserNodeLabels(ImmutableSet.of(StringUtils.repeat("c", 257))); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("too long label should not add to repo", caught); + + caught = false; + try { + mgr.addToCluserNodeLabels(ImmutableSet.of("-aaabbb")); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("label cannot start with \"-\"", caught); + + caught = false; + try { + mgr.addToCluserNodeLabels(ImmutableSet.of("_aaabbb")); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("label cannot start with \"_\"", caught); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 5000) + public void testAddReplaceRemoveLabelsOnNodes() throws Exception { + // set a label on a node, but label doesn't exist + boolean caught = false; + try { + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("node"), toSet("label"))); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("trying to set a label to a node but " + + "label doesn't exist in repository should fail", caught); + + // set a label on a node, but node is null or empty + try { + mgr.replaceLabelsOnNode(ImmutableMap.of( + toNodeId(NodeLabelsManager.NO_LABEL), toSet("label"))); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("trying to add a empty node but succeeded", caught); + + // set node->label one by one + mgr.addToCluserNodeLabels(toSet("p1", "p2", "p3")); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p2"))); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n2"), toSet("p3"))); + assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"), + toSet("p2"), toNodeId("n2"), toSet("p3"))); + assertMapEquals(mgr.lastNodeToLabels, + ImmutableMap.of(toNodeId("n2"), toSet("p3"))); + + // set bunch of node->label + mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n3"), toSet("p3"), + toNodeId("n1"), toSet("p1"))); + assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n1"), + toSet("p1"), toNodeId("n2"), toSet("p3"), toNodeId("n3"), toSet("p3"))); + assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of(toNodeId("n3"), + toSet("p3"), toNodeId("n1"), toSet("p1"))); + + /* + * n1: p1 + * n2: p3 + * n3: p3 + */ + + // remove label on node + mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); + assertMapEquals(mgr.getNodeLabels(), ImmutableMap.of(toNodeId("n2"), + toSet("p3"), toNodeId("n3"), toSet("p3"))); + assertMapEquals(mgr.lastNodeToLabels, + ImmutableMap.of(toNodeId("n1"), NodeLabelsManager.EMPTY_STRING_SET)); + + // add label on node + mgr.addLabelsToNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"), + toNodeId("n2"), toSet("p2"))); + assertMapEquals( + mgr.getNodeLabels(), + ImmutableMap.of(toNodeId("n1"), toSet("p1"), toNodeId("n2"), + toSet("p2", "p3"), toNodeId("n3"), toSet("p3"))); + assertMapEquals(mgr.lastNodeToLabels, + ImmutableMap.of(toNodeId("n1"), toSet("p1"), toNodeId("n2"), + toSet("p2", "p3"))); + + // remove labels on node + mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"), + toNodeId("n2"), toSet("p2", "p3"), toNodeId("n3"), toSet("p3"))); + Assert.assertEquals(0, mgr.getNodeLabels().size()); + assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of(toNodeId("n1"), + NodeLabelsManager.EMPTY_STRING_SET, toNodeId("n2"), + NodeLabelsManager.EMPTY_STRING_SET, toNodeId("n3"), + NodeLabelsManager.EMPTY_STRING_SET)); + } + + @Test(timeout = 5000) + public void testRemovelabelWithNodes() throws Exception { + mgr.addToCluserNodeLabels(toSet("p1", "p2", "p3")); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"))); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n2"), toSet("p2"))); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n3"), toSet("p3"))); + + mgr.removeFromClusterNodeLabels(ImmutableSet.of("p1")); + assertMapEquals(mgr.getNodeLabels(), + ImmutableMap.of(toNodeId("n2"), toSet("p2"), toNodeId("n3"), toSet("p3"))); + assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("p1")); + + mgr.removeFromClusterNodeLabels(ImmutableSet.of("p2", "p3")); + Assert.assertTrue(mgr.getNodeLabels().isEmpty()); + Assert.assertTrue(mgr.getClusterNodeLabels().isEmpty()); + assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("p2", "p3")); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java index 59db66a..f5e8ae1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java @@ -338,6 +338,7 @@ public static ApplicationReport newApplicationReport( return report; } + @SuppressWarnings("deprecation") public static ApplicationSubmissionContext newApplicationSubmissionContext( ApplicationId applicationId, String applicationName, String queue, Priority priority, ContainerLaunchContext amContainer, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index 2b7797f..7b57116 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -57,6 +57,12 @@ import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; @@ -69,8 +75,14 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsResponsePBImpl; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent; @@ -200,7 +212,8 @@ void resetLeaderElection() { } private UserGroupInformation checkAccess(String method) throws IOException { - return RMServerUtils.verifyAccess(adminAcl, method, LOG); + return RMServerUtils.verifyAccess(adminAcl, method, + AdminService.class.getName(), LOG); } private UserGroupInformation checkAcls(String method) throws YarnException { @@ -618,4 +631,104 @@ public AccessControlList getAccessControlList() { public Server getServer() { return this.server; } + + @Override + public AddToClusterNodeLabelsResponse addToClusterNodeLabels(AddToClusterNodeLabelsRequest request) + throws YarnException, IOException { + String argName = "addLabels"; + UserGroupInformation user = checkAcls(argName); + + if (!isRMActive()) { + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", + "ResourceManager is not active. Can not add labels."); + throwStandbyException(); + } + + AddToClusterNodeLabelsResponse response = + recordFactory.newRecordInstance(AddToClusterNodeLabelsResponse.class); + try { + rmContext.getNodeLabelManager().addToCluserNodeLabels(request.getLabels()); + RMAuditLogger + .logSuccess(user.getShortUserName(), argName, "AdminService"); + return response; + } catch (IOException ioe) { + LOG.info("Exception add labels", ioe); + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", "Exception add label"); + throw RPCUtil.getRemoteException(ioe); + } + } + + @Override + public RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels( + RemoveFromClusterNodeLabelsRequest request) throws YarnException, IOException { + String argName = "removeLabels"; + UserGroupInformation user = checkAcls(argName); + + if (!isRMActive()) { + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", + "ResourceManager is not active. Can not remove labels."); + throwStandbyException(); + } + + RemoveFromClusterNodeLabelsResponse response = + recordFactory.newRecordInstance(RemoveFromClusterNodeLabelsResponse.class); + try { + rmContext.getNodeLabelManager().removeFromClusterNodeLabels(request.getLabels()); + RMAuditLogger + .logSuccess(user.getShortUserName(), argName, "AdminService"); + return response; + } catch (IOException ioe) { + LOG.info("Exception remove labels", ioe); + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", "Exception remove label"); + throw RPCUtil.getRemoteException(ioe); + } + } + + @Override + public ReplaceLabelsOnNodeResponse replaceLabelsOnNode( + ReplaceLabelsOnNodeRequest request) throws YarnException, IOException { + String argName = "setNodeToLabels"; + UserGroupInformation user = checkAcls(argName); + + if (!isRMActive()) { + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", + "ResourceManager is not active. Can not set node to labels."); + throwStandbyException(); + } + + ReplaceLabelsOnNodeResponse response = + recordFactory.newRecordInstance(ReplaceLabelsOnNodeResponse.class); + try { + rmContext.getNodeLabelManager().replaceLabelsOnNode( + request.getNodeToLabels()); + RMAuditLogger + .logSuccess(user.getShortUserName(), argName, "AdminService"); + return response; + } catch (IOException ioe) { + LOG.info("Exception set node to labels. ", ioe); + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", + "Exception set node to labels."); + throw RPCUtil.getRemoteException(ioe); + } + } + + @Override + public GetNodeToLabelsResponse getNodeToLabels(GetNodeToLabelsRequest request) + throws YarnException, IOException { + return GetNodeToLabelsResponsePBImpl.newInstance(rmContext + .getNodeLabelManager().getNodeLabels()); + } + + @Override + public GetClusterNodeLabelsResponse getClusterNodeLabels(GetClusterNodeLabelsRequest request) + throws YarnException, IOException { + return GetClusterNodeLabelsResponsePBImpl.newInstance(rmContext.getNodeLabelManager() + .getClusterNodeLabels()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index 707cf1b..ab7f126 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.AMCommand; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NMToken; @@ -481,11 +482,22 @@ public AllocateResponse allocate(AllocateRequest request) List blacklistRemovals = (blacklistRequest != null) ? blacklistRequest.getBlacklistRemovals() : Collections.EMPTY_LIST; - + RMApp app = + this.rmContext.getRMApps().get(appAttemptId.getApplicationId()); + + // set label expression for Resource Requests + ApplicationSubmissionContext asc = app.getApplicationSubmissionContext(); + for (ResourceRequest req : ask) { + if (null == req.getNodeLabelExpression()) { + req.setNodeLabelExpression(asc.getNodeLabelExpression()); + } + } + // sanity check try { RMServerUtils.validateResourceRequests(ask, - rScheduler.getMaximumResourceCapability()); + rScheduler.getMaximumResourceCapability(), app.getQueue(), + rScheduler); } catch (InvalidResourceRequestException e) { LOG.warn("Invalid resource ask by application " + appAttemptId, e); throw e; @@ -498,8 +510,6 @@ public AllocateResponse allocate(AllocateRequest request) throw e; } - RMApp app = - this.rmContext.getRMApps().get(appAttemptId.getApplicationId()); // In the case of work-preserving AM restart, it's possible for the // AM to release containers from the earlier attempt. if (!app.getApplicationSubmissionContext() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index 1d672e5..c4ce1c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -343,7 +343,7 @@ private RMAppImpl createAndPopulateNewRMApp( long submitTime, String user) throws YarnException { ApplicationId applicationId = submissionContext.getApplicationId(); - validateResourceRequest(submissionContext); + ResourceRequest amReq = validateAndCreateResourceRequest(submissionContext); // Create RMApp RMAppImpl application = new RMAppImpl(applicationId, rmContext, this.conf, @@ -351,7 +351,7 @@ private RMAppImpl createAndPopulateNewRMApp( submissionContext.getQueue(), submissionContext, this.scheduler, this.masterService, submitTime, submissionContext.getApplicationType(), - submissionContext.getApplicationTags()); + submissionContext.getApplicationTags(), amReq); // Concurrent app submissions with same applicationId will fail here // Concurrent app submissions with different applicationIds will not @@ -373,7 +373,8 @@ private RMAppImpl createAndPopulateNewRMApp( return application; } - private void validateResourceRequest( + @SuppressWarnings("deprecation") + private ResourceRequest validateAndCreateResourceRequest( ApplicationSubmissionContext submissionContext) throws InvalidResourceRequestException { // Validation of the ApplicationSubmissionContext needs to be completed @@ -383,18 +384,40 @@ private void validateResourceRequest( // Check whether AM resource requirements are within required limits if (!submissionContext.getUnmanagedAM()) { - ResourceRequest amReq = BuilderUtils.newResourceRequest( - RMAppAttemptImpl.AM_CONTAINER_PRIORITY, ResourceRequest.ANY, - submissionContext.getResource(), 1); + ResourceRequest amReq; + if (submissionContext.getAMContainerResourceRequest() != null) { + amReq = submissionContext.getAMContainerResourceRequest(); + } else { + amReq = + BuilderUtils.newResourceRequest( + RMAppAttemptImpl.AM_CONTAINER_PRIORITY, ResourceRequest.ANY, + submissionContext.getResource(), 1); + } + + // set label expression for AM container + if (null == amReq.getNodeLabelExpression()) { + amReq.setNodeLabelExpression(submissionContext + .getNodeLabelExpression()); + } + if (null == amReq.getPriority()) { + amReq.setPriority(RMAppAttemptImpl.AM_CONTAINER_PRIORITY); + } + amReq.setNumContainers(1); + try { SchedulerUtils.validateResourceRequest(amReq, - scheduler.getMaximumResourceCapability()); + scheduler.getMaximumResourceCapability(), + submissionContext.getQueue(), scheduler); } catch (InvalidResourceRequestException e) { LOG.warn("RM app submission failed in validating AM resource request" + " for application " + submissionContext.getApplicationId(), e); throw e; } + + return amReq; } + + return null; } private boolean isApplicationInFinalState(RMAppState rmAppState) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java index a59965f..c34618d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java @@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; @@ -108,6 +109,10 @@ void setRMApplicationHistoryWriter( boolean isWorkPreservingRecoveryEnabled(); + DynamicNodeLabelsManager getNodeLabelManager(); + + public void setNodeLabelManager(DynamicNodeLabelsManager mgr); + long getEpoch(); ReservationSystem getReservationSystem(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index 78787ee..1c732be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; @@ -91,6 +92,7 @@ private RMApplicationHistoryWriter rmApplicationHistoryWriter; private SystemMetricsPublisher systemMetricsPublisher; private ConfigurationProvider configurationProvider; + private DynamicNodeLabelsManager nodeLabelManager; private long epoch; private Clock systemClock = new SystemClock(); private long schedulerRecoveryStartTime = 0; @@ -406,6 +408,16 @@ void setEpoch(long epoch) { this.epoch = epoch; } + @Override + public DynamicNodeLabelsManager getNodeLabelManager() { + return nodeLabelManager; + } + + @Override + public void setNodeLabelManager(DynamicNodeLabelsManager mgr) { + nodeLabelManager = mgr; + } + public void setSchedulerRecoveryStartAndWaitTime(long waitTime) { this.schedulerRecoveryStartTime = systemClock.getTime(); this.schedulerRecoveryWaitTime = waitTime; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java index 29c5953..46cad1a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java @@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.resource.Resources; @@ -84,9 +85,11 @@ * requested memory/vcore is non-negative and not greater than max */ public static void validateResourceRequests(List ask, - Resource maximumResource) throws InvalidResourceRequestException { + Resource maximumResource, String queueName, YarnScheduler scheduler) + throws InvalidResourceRequestException { for (ResourceRequest resReq : ask) { - SchedulerUtils.validateResourceRequest(resReq, maximumResource); + SchedulerUtils.validateResourceRequest(resReq, maximumResource, + queueName, scheduler); } } @@ -137,12 +140,13 @@ public static void validateBlacklistRequest( * passed {@link AccessControlList} * @param acl the {@link AccessControlList} to check against * @param method the method name to be logged + * @param module, like AdminService or NodeLabelManager * @param LOG the logger to use * @return {@link UserGroupInformation} of the current user * @throws IOException */ public static UserGroupInformation verifyAccess( - AccessControlList acl, String method, final Log LOG) + AccessControlList acl, String method, String module, final Log LOG) throws IOException { UserGroupInformation user; try { @@ -159,7 +163,7 @@ public static UserGroupInformation verifyAccess( " to call '" + method + "'"); RMAuditLogger.logFailure(user.getShortUserName(), method, - acl.toString(), "AdminService", + acl.toString(), module, RMAuditLogger.AuditConstants.UNAUTHORIZED_USER); throw new AccessControlException("User " + user.getShortUserName() + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 3e5f138..68f123a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -67,6 +67,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingMonitor; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; @@ -320,6 +321,10 @@ protected AMLivelinessMonitor createAMLivelinessMonitor() { return new AMLivelinessMonitor(this.rmDispatcher); } + protected DynamicNodeLabelsManager createNodeLabelManager() { + return new DynamicNodeLabelsManager(); + } + protected DelegationTokenRenewer createDelegationTokenRenewer() { return new DelegationTokenRenewer(); } @@ -399,6 +404,10 @@ protected void serviceInit(Configuration configuration) throws Exception { AMLivelinessMonitor amFinishingMonitor = createAMLivelinessMonitor(); addService(amFinishingMonitor); rmContext.setAMFinishingMonitor(amFinishingMonitor); + + DynamicNodeLabelsManager nlm = createNodeLabelManager(); + addService(nlm); + rmContext.setNodeLabelManager(nlm); boolean isRecoveryEnabled = conf.getBoolean( YarnConfiguration.RECOVERY_ENABLED, @@ -960,7 +969,7 @@ protected void startWepApp() { * instance of {@link RMActiveServices} and initializes it. * @throws Exception */ - void createAndInitActiveServices() throws Exception { + protected void createAndInitActiveServices() throws Exception { activeServices = new RMActiveServices(); activeServices.init(conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java index 1ebc19f..9e75ca1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java @@ -19,14 +19,16 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.event.EventHandler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; public interface SchedulingEditPolicy { public void init(Configuration config, EventHandler dispatcher, - PreemptableResourceScheduler scheduler); + PreemptableResourceScheduler scheduler, + DynamicNodeLabelsManager labelManager); /** * This method is invoked at regular intervals. Internally the policy is diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java index 1682f7d..9d11365 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java @@ -57,7 +57,8 @@ public synchronized SchedulingEditPolicy getSchedulingEditPolicy() { @SuppressWarnings("unchecked") public void serviceInit(Configuration conf) throws Exception { scheduleEditPolicy.init(conf, rmContext.getDispatcher().getEventHandler(), - (PreemptableResourceScheduler) rmContext.getScheduler()); + (PreemptableResourceScheduler) rmContext.getScheduler(), + rmContext.getNodeLabelManager()); this.monitorInterval = scheduleEditPolicy.getMonitoringInterval(); super.serviceInit(conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java index 0f48b0c..e11d3c1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java @@ -33,15 +33,18 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEventType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; @@ -125,6 +128,33 @@ private float percentageClusterPreemptionAllowed; private double naturalTerminationFactor; private boolean observeOnly; + private DynamicNodeLabelsManager labelManager; + + /* + * Variables for considering labels while preempting resource When considering + * preemption resource, + * + * When build queue tree in cloneQueue(), qA's resToBePreempted + * + * resToBePreempted = min(guaranteed - current, pending) + * And we will add it to totalResourceToBePreempted and + * totalResourceToBePreempted when resToBePreempted > 0: + * totalResourceToBePreempted += resToBePreempted + * labelToResourceToBePreempted[label belongs to qA] += resToBePreempted + * + * When trying to preempt a containerX from nodeY First will check + * totalResToBePreempted > 0 If it's < 0, no more resource need to be + * preempted. Else: + * if (labelToResourceToBePreempted[any label belongs to nodeY] > 0): + * labelToResourceToBePreempted[label belongs to nodeY] -= containerX.res + * totalResourceToBePreempted -= containerX.res + * mark containerX will be preempted + */ + Resource totalResourceToBePreempted; + Map labelToResourceToBePreempted; + + Resource totalResource; + Map labelToResource; public ProportionalCapacityPreemptionPolicy() { clock = new SystemClock(); @@ -132,20 +162,23 @@ public ProportionalCapacityPreemptionPolicy() { public ProportionalCapacityPreemptionPolicy(Configuration config, EventHandler dispatcher, - CapacityScheduler scheduler) { - this(config, dispatcher, scheduler, new SystemClock()); + CapacityScheduler scheduler, DynamicNodeLabelsManager labelManager) { + this(config, dispatcher, scheduler, new SystemClock(), labelManager); } public ProportionalCapacityPreemptionPolicy(Configuration config, EventHandler dispatcher, - CapacityScheduler scheduler, Clock clock) { - init(config, dispatcher, scheduler); + CapacityScheduler scheduler, Clock clock, + DynamicNodeLabelsManager labelManager) { + init(config, dispatcher, scheduler, labelManager); this.clock = clock; } + @Override public void init(Configuration config, EventHandler disp, - PreemptableResourceScheduler sched) { + PreemptableResourceScheduler sched, + DynamicNodeLabelsManager labelManager) { LOG.info("Preemption monitor:" + this.getClass().getCanonicalName()); assert null == scheduler : "Unexpected duplicate call to init"; if (!(sched instanceof CapacityScheduler)) { @@ -164,20 +197,72 @@ public void init(Configuration config, config.getFloat(TOTAL_PREEMPTION_PER_ROUND, (float) 0.1); observeOnly = config.getBoolean(OBSERVE_ONLY, false); rc = scheduler.getResourceCalculator(); + this.labelManager = labelManager; + labelToResourceToBePreempted = new HashMap(); } @VisibleForTesting public ResourceCalculator getResourceCalculator() { return rc; } + + @VisibleForTesting + public void setNodeLabelManager(DynamicNodeLabelsManager mgr) { + this.labelManager = mgr; + } @Override public void editSchedule(){ + totalResourceToBePreempted = Resource.newInstance(0, 0); + labelToResourceToBePreempted.clear(); + CSQueue root = scheduler.getRootQueue(); Resource clusterResources = Resources.clone(scheduler.getClusterResource()); + + updateTotalResource(clusterResources); containerBasedPreemptOrKill(root, clusterResources); } + + private void updateTotalResource(Resource clusterResource) { + totalResource = Resource.newInstance(0, 0); + labelToResource = new HashMap(); + Map> nodeToLabels = labelManager.getNodeLabels(); + + for (SchedulerNode schedulerNode : scheduler.getSchedulerNodes()) { + Resource nodeTotal = schedulerNode.getTotalResource(); + if (Resources.greaterThan(rc, clusterResource, nodeTotal, + Resources.none())) { + Set labels = + nodeToLabels.get(schedulerNode.getNodeName()); + if (labels != null && !labels.isEmpty()) { + for (String label : labels) { + if (!labelToResource.containsKey(label)) { + labelToResource.put(label, Resource.newInstance(0, 0)); + } + Resources.addTo(labelToResource.get(label), nodeTotal); + } + } else { + if (!labelToResource.containsKey("")) { + labelToResource.put("", Resource.newInstance(0, 0)); + } + Resources.addTo(labelToResource.get(""), nodeTotal); + } + + Resources.addTo(totalResource, nodeTotal); + } + } + } + + private void updateResourceToBePreempted(List queues, + Resource clusterResources) { + for (TempQueue queue : queues) { + // set totalResourceToBePreempted and label-to-resource-to-be-preempted + if (queue.leafQueue != null) { + addResourceToBePreempted(queue, clusterResources); + } + } + } /** * This method selects and tracks containers to be preempted. If a container @@ -202,6 +287,7 @@ private void containerBasedPreemptOrKill(CSQueue root, percentageClusterPreemptionAllowed); List queues = recursivelyComputeIdealAssignment(tRoot, totalPreemptionAllowed); + updateResourceToBePreempted(queues, clusterResources); // based on ideal allocation select containers to be preempted from each // queue and each application @@ -358,7 +444,7 @@ private void computeIdealResourceDistribution(ResourceCalculator rc, } } - + /** * Given a set of queues compute the fix-point distribution of unassigned * resources among them. As pending request of a queue are exhausted, the @@ -370,10 +456,13 @@ private void computeIdealResourceDistribution(ResourceCalculator rc, private void computeFixpointAllocation(ResourceCalculator rc, Resource tot_guarant, Collection qAlloc, Resource unassigned, boolean ignoreGuarantee) { + Resource wQassigned = Resource.newInstance(1, 1); + //assign all cluster resources until no more demand, or no resources are left - while (!qAlloc.isEmpty() && Resources.greaterThan(rc, tot_guarant, - unassigned, Resources.none())) { - Resource wQassigned = Resource.newInstance(0, 0); + while (!qAlloc.isEmpty() + && Resources.greaterThan(rc, tot_guarant, unassigned, Resources.none()) + && Resources.greaterThan(rc, tot_guarant, wQassigned, Resources.none())) { + wQassigned = Resource.newInstance(0, 0); // we compute normalizedGuarantees capacity based on currently active // queues @@ -383,21 +472,91 @@ private void computeFixpointAllocation(ResourceCalculator rc, // their share of over-capacity for (Iterator i = qAlloc.iterator(); i.hasNext();) { TempQueue sub = i.next(); - Resource wQavail = - Resources.multiply(unassigned, sub.normalizedGuarantee); - Resource wQidle = sub.offer(wQavail, rc, tot_guarant); - Resource wQdone = Resources.subtract(wQavail, wQidle); - // if the queue returned a value > 0 it means it is fully satisfied - // and it is removed from the list of active queues qAlloc - if (!Resources.greaterThan(rc, tot_guarant, - wQdone, Resources.none())) { + + if (!Double.isInfinite(sub.normalizedGuarantee)) { + Resource wQavail = + Resources.multiply(unassigned, sub.normalizedGuarantee); + Resource wQidle = sub.offer(wQavail, rc, tot_guarant); + Resource wQdone = Resources.subtract(wQavail, wQidle); + + if (sub.children.isEmpty()) { + Resource maxAvailableResource = + duductAvailableResourceAccordingToLabel(sub, + Resources.clone(wQdone), tot_guarant); + wQdone = + Resources.min(rc, tot_guarant, wQdone, maxAvailableResource); + } + Resources.addTo(sub.idealAssigned, wQdone); + + // if the queue returned a value > 0 it means it is fully satisfied + // and it is removed from the list of active queues qAlloc + if (!Resources.greaterThan(rc, tot_guarant, wQdone, Resources.none())) { + i.remove(); + } + Resources.addTo(wQassigned, wQdone); + } else { i.remove(); } - Resources.addTo(wQassigned, wQdone); } Resources.subtractFrom(unassigned, wQassigned); } } + + private Resource duductAvailableResource(String label, Resource maxCap, + Resource clusterResource) { + if (Resources.lessThanOrEqual(rc, clusterResource, totalResource, + Resources.none())) { + return Resources.none(); + } + + if (labelToResource.containsKey(label)) { + if (Resources.greaterThan(rc, clusterResource, + labelToResource.get(label), Resources.none())) { + // deduct-available-res = min(maxCap, totalResourceAvailable, + // availableResource[label]) + Resource min = + Resources.clone(Resources.min( + rc, + clusterResource, + Resources.min(rc, clusterResource, + labelToResource.get(label), maxCap), + totalResource)); + if (Resources.greaterThan(rc, clusterResource, min, Resources.none())) { + Resources.subtractFrom(maxCap, min); + Resources.subtractFrom(totalResource, min); + Resources.subtractFrom(labelToResource.get(label), min); + } + + return min; + } + } + + return Resources.none(); + } + + private Resource duductAvailableResourceAccordingToLabel(TempQueue q, + Resource maxCap, Resource clusterResource) { + Resource maxAvailableResource = Resource.newInstance(0, 0); + + if (Resources.lessThanOrEqual(rc, clusterResource, totalResource, + Resources.none())) { + return Resources.none(); + } + + // check if we have empty-label available resource + Resources.addTo(maxAvailableResource, + duductAvailableResource("", maxCap, clusterResource)); + + // check if we have any resource available for labels of the queue + if (q.labels != null) { + for (String label : q.labels) { + Resources.addTo(maxAvailableResource, + duductAvailableResource(label, maxCap, clusterResource)); + } + } + + return maxAvailableResource; + } /** * Computes a normalizedGuaranteed capacity based on active queues @@ -408,18 +567,25 @@ private void computeFixpointAllocation(ResourceCalculator rc, private void resetCapacity(ResourceCalculator rc, Resource clusterResource, Collection queues, boolean ignoreGuar) { Resource activeCap = Resource.newInstance(0, 0); + int availableQueueSize = queues.size(); if (ignoreGuar) { for (TempQueue q : queues) { - q.normalizedGuarantee = (float) 1.0f / ((float) queues.size()); + if (!Double.isInfinite(q.normalizedGuarantee)) { + q.normalizedGuarantee = 1.0f / availableQueueSize; + } } } else { for (TempQueue q : queues) { - Resources.addTo(activeCap, q.guaranteed); + if (!Double.isInfinite(q.normalizedGuarantee)) { + Resources.addTo(activeCap, q.guaranteed); + } } for (TempQueue q : queues) { - q.normalizedGuarantee = Resources.divide(rc, clusterResource, - q.guaranteed, activeCap); + if (!Double.isInfinite(q.normalizedGuarantee)) { + q.normalizedGuarantee = + Resources.divide(rc, clusterResource, q.guaranteed, activeCap); + } } } } @@ -514,6 +680,10 @@ private void preemptAMContainers(Resource clusterResource, maxAMCapacityForThisQueue)) { break; } + if (!possiblePendingRequestOnNode(clusterResource, c.getContainer() + .getNodeId(), c.getContainer().getResource())) { + continue; + } Set contToPrempt = preemptMap.get(c .getApplicationAttemptId()); if (null == contToPrempt) { @@ -578,12 +748,50 @@ private void preemptAMContainers(Resource clusterResource, Resources.addTo(skippedAMSize, c.getContainer().getResource()); continue; } - ret.add(c); - Resources.subtractFrom(rsrcPreempt, c.getContainer().getResource()); + + if (possiblePendingRequestOnNode(clusterResource, c.getContainer() + .getNodeId(), c.getContainer().getResource())) { + ret.add(c); + Resources.subtractFrom(rsrcPreempt, c.getContainer().getResource()); + } } return ret; } + + protected boolean possiblePendingRequestOnNode(Resource clusterResource, + NodeId nodeId, Resource containerRes) { + if (labelManager == null) { + return true; + } + + if (!Resources.greaterThan(rc, clusterResource, totalResourceToBePreempted, + Resources.none())) { + return false; + } + + Set labels = labelManager.getLabelsOnNode(nodeId); + + if (labels != null && !labels.isEmpty()) { + boolean isPossible = false; + // there're some labels on this node, so we will check if any of + // labelToResourceToBePreempted[label belongs to the node] > 0 + for (String label : labels) { + Resource res = labelToResourceToBePreempted.get(label); + res = res == null ? Resources.none() : res; + if (Resources.greaterThan(rc, clusterResource, res, Resources.none())) { + Resources.subtractFrom(res, containerRes); + isPossible = true; + } + } + + if (!isPossible) { + return false; + } + } + Resources.subtractFrom(totalResourceToBePreempted, containerRes); + return true; + } /** * Compare by reversed priority order first, and then reversed containerId @@ -617,6 +825,26 @@ public String getPolicyName() { return "ProportionalCapacityPreemptionPolicy"; } + private void addResourceToBePreempted(TempQueue leafQueue, + Resource clusterResources) { + Resource toBePreempted = + Resources.min(rc, clusterResources, + Resources.subtract(leafQueue.idealAssigned, leafQueue.current), + leafQueue.pending); + if (Resources.greaterThan(rc, clusterResources, toBePreempted, + Resources.none())) { + Resources.addTo(totalResourceToBePreempted, toBePreempted); + if (leafQueue.labels != null) { + for (String label : leafQueue.labels) { + if (!labelToResourceToBePreempted.containsKey(label)) { + labelToResourceToBePreempted.put(label, Resource.newInstance(0, 0)); + } + Resources.addTo(labelToResourceToBePreempted.get(label), + toBePreempted); + } + } + } + } /** * This method walks a tree of CSQueue and clones the portion of the state @@ -642,14 +870,28 @@ private TempQueue cloneQueues(CSQueue root, Resource clusterResources) { if (root instanceof LeafQueue) { LeafQueue l = (LeafQueue) root; Resource pending = l.getTotalResourcePending(); + + // it is possible queue's guaranteed resource cannot be satisfied because + // of labels, set min(guaranteed, resourceConsiderLabels) as guaranteed + // resource + if (labelManager != null) { + Resource queueResRespectLabels = + labelManager.getQueueResource(l.getQueueName(), l.getAccessibleLabels(), + clusterResources); + guaranteed = + Resources.min(rc, clusterResources, queueResRespectLabels, + guaranteed); + maxCapacity = + Resources.min(rc, clusterResources, queueResRespectLabels, + maxCapacity); + } ret = new TempQueue(queueName, current, pending, guaranteed, - maxCapacity); - + maxCapacity, l.getAccessibleLabels()); ret.setLeafQueue(l); } else { Resource pending = Resource.newInstance(0, 0); ret = new TempQueue(root.getQueueName(), current, pending, guaranteed, - maxCapacity); + maxCapacity, root.getAccessibleLabels()); for (CSQueue c : root.getChildQueues()) { ret.addChild(cloneQueues(c, clusterResources)); } @@ -695,9 +937,10 @@ public int compare(TempQueue o1, TempQueue o2) { final ArrayList children; LeafQueue leafQueue; + Set labels; TempQueue(String queueName, Resource current, Resource pending, - Resource guaranteed, Resource maxCapacity) { + Resource guaranteed, Resource maxCapacity, Set labels) { this.queueName = queueName; this.current = current; this.pending = pending; @@ -706,8 +949,9 @@ public int compare(TempQueue o1, TempQueue o2) { this.idealAssigned = Resource.newInstance(0, 0); this.actuallyPreempted = Resource.newInstance(0, 0); this.toBePreempted = Resource.newInstance(0, 0); - this.normalizedGuarantee = Float.NaN; + this.normalizedGuarantee = Double.NaN; this.children = new ArrayList(); + this.labels = labels; } public void setLeafQueue(LeafQueue l){ @@ -746,7 +990,6 @@ Resource offer(Resource avail, ResourceCalculator rc, Resources.min(rc, clusterResource, avail, Resources.subtract( Resources.add(current, pending), idealAssigned))); Resource remain = Resources.subtract(avail, accepted); - Resources.addTo(idealAssigned, accepted); return remain; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/DynamicNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/DynamicNodeLabelsManager.java new file mode 100644 index 0000000..9b80676 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/DynamicNodeLabelsManager.java @@ -0,0 +1,398 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.nodelabels; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.nodelabels.NodeLabelsManager; +import org.apache.hadoop.yarn.util.resource.Resources; + +import com.google.common.collect.ImmutableSet; + +public class DynamicNodeLabelsManager extends NodeLabelsManager { + + ConcurrentMap queueCollections = + new ConcurrentHashMap(); + + @Override + public void addLabelsToNode(Map> addedLabelsToNode) + throws IOException { + try { + writeLock.lock(); + + checkAddLabelsToNode(addedLabelsToNode); + + // get nodesCollection before edition + Map before = cloneNodeMap(addedLabelsToNode.keySet()); + + super.addLabelsToNode(addedLabelsToNode); + + // get nodesCollection after edition + Map after = cloneNodeMap(addedLabelsToNode.keySet()); + + // update running nodes resources + updateRunningNodes(before, after); + } finally { + writeLock.unlock(); + } + } + + @Override + protected void checkRemoveFromClusterNodeLabels( + Collection labelsToRemove) throws IOException { + + super.checkRemoveFromClusterNodeLabels(labelsToRemove); + + // Check if label to remove doesn't existed or null/empty, will throw + // exception if any of labels to remove doesn't meet requirement + for (String label : labelsToRemove) { + label = normalizeLabel(label); + + // check if any queue contains this label + for (Entry entry : queueCollections.entrySet()) { + String queueName = entry.getKey(); + Set queueLabels = entry.getValue().labels; + if (queueLabels.contains(label)) { + throw new IOException("Cannot remove label=" + label + + ", because queue=" + queueName + " is using this label. " + + "Please remove label on queue before remove the label"); + } + } + } + } + + @Override + public void removeFromClusterNodeLabels(Collection labelsToRemove) + throws IOException { + try { + writeLock.lock(); + + checkRemoveFromClusterNodeLabels(labelsToRemove); + + // copy before NMs + Map before = cloneNodeMap(); + + internalRemoveFromClusterNodeLabels(labelsToRemove); + + updateRunningNodes(before, nodeCollections); + } finally { + writeLock.unlock(); + } + } + + @Override + public void + removeLabelsFromNode(Map> removeLabelsFromNode) + throws IOException { + try { + writeLock.lock(); + + checkRemoveLabelsFromNode(removeLabelsFromNode); + + // get nodesCollection before edition + Map before = + cloneNodeMap(removeLabelsFromNode.keySet()); + + internalRemoveLabelsFromNode(removeLabelsFromNode); + + // get nodesCollection before edition + Map after = cloneNodeMap(removeLabelsFromNode.keySet()); + + // update running nodes resources + updateRunningNodes(before, after); + } finally { + writeLock.unlock(); + } + } + + @Override + public void replaceLabelsOnNode(Map> replaceLabelsToNode) + throws IOException { + try { + writeLock.lock(); + + checkReplaceLabelsOnNode(replaceLabelsToNode); + + // get nodesCollection before edition + Map before = cloneNodeMap(replaceLabelsToNode.keySet()); + + internalReplaceLabelsOnNode(replaceLabelsToNode); + + // get nodesCollection after edition + Map after = cloneNodeMap(replaceLabelsToNode.keySet()); + + // update running nodes resources + updateRunningNodes(before, after); + } finally { + writeLock.unlock(); + } + } + + + /* + * Following methods are used for setting if a node is up and running, and it + * will update running nodes resource + */ + public void activateNode(NodeId nodeId, Resource resource) { + try { + writeLock.lock(); + + // save if we have a node before + Map before = cloneNodeMap(ImmutableSet.of(nodeId)); + + createNodeIfNonExisted(nodeId); + NMType nm = getNMInNodeSet(nodeId); + nm.resource = resource; + nm.running = true; + + // get the node after edition + Map after = cloneNodeMap(ImmutableSet.of(nodeId)); + + updateRunningNodes(before, after); + } finally { + writeLock.unlock(); + } + } + + /* + * Following methods are used for setting if a node unregistered to RM + */ + public void deactivateNode(NodeId nodeId) { + try { + writeLock.lock(); + + // save if we have a node before + Map before = cloneNodeMap(ImmutableSet.of(nodeId)); + NMType nm = getNMInNodeSet(nodeId); + if (null != nm) { + // set nm is not running, and its resource = 0 + nm.running = false; + nm.resource = Resource.newInstance(0, 0); + } + + // get the node after edition + Map after = cloneNodeMap(ImmutableSet.of(nodeId)); + + updateRunningNodes(before, after); + } finally { + writeLock.unlock(); + } + } + + public void updateNodeResource(NodeId node, Resource newResource) { + deactivateNode(node); + activateNode(node, newResource); + } + + public void reinitializeQueueLabels(Map> queueToLabels) { + try { + writeLock.lock(); + // clear before set + this.queueCollections.clear(); + + for (Entry> entry : queueToLabels.entrySet()) { + String queue = entry.getKey(); + QueueType q = new QueueType(); + this.queueCollections.put(queue, q); + + Set labels = entry.getValue(); + if (labels.contains(ANY)) { + continue; + } + + q.labels.addAll(labels); + for (NodeType node : nodeCollections.values()) { + for (Entry nentry : node.nms.entrySet()) { + NodeId nodeId = nentry.getKey(); + NMType nm = nentry.getValue(); + if (nm.running && isNodeUsableByQueue(getLabelsByNode(nodeId), q)) { + Resources.addTo(q.resource, nm.resource); + } + } + } + } + } finally { + writeLock.unlock(); + } + } + + public Resource getQueueResource(String queueName, Set queueLabels, + Resource clusterResource) { + if (queueLabels.contains(ANY)) { + return clusterResource; + } + QueueType q = queueCollections.get(queueName); + if (null == q) { + return Resources.none(); + } + return q.resource; + } + + private Map cloneNodeMap(Set nodesToCopy) { + Map map = new HashMap(); + for (NodeId nodeId : nodesToCopy) { + if (!map.containsKey(nodeId.getHost())) { + NodeType originalN = nodeCollections.get(nodeId.getHost()); + if (null == originalN) { + continue; + } + NodeType n = originalN.clone(); + n.nms.clear(); + map.put(nodeId.getHost(), n); + } + + NodeType n = map.get(nodeId.getHost()); + if (WILDCARD_PORT == nodeId.getPort()) { + for (Entry entry : nodeCollections + .get(nodeId.getHost()).nms.entrySet()) { + n.nms.put(entry.getKey(), entry.getValue().clone()); + } + } else { + NMType nm = getNMInNodeSet(nodeId); + if (null != nm) { + n.nms.put(nodeId, nm.clone()); + } + } + } + return map; + } + + private void updateRunningNodes(Map before, + Map after) { + // Get NMs in before only + Set allNMs = new HashSet(); + for (Entry entry : before.entrySet()) { + allNMs.addAll(entry.getValue().nms.keySet()); + } + for (Entry entry : after.entrySet()) { + allNMs.addAll(entry.getValue().nms.keySet()); + } + + // traverse all nms + for (NodeId nodeId : allNMs) { + if (getNMInNodeSet(nodeId, before, true) != null) { + NMType oldNM = getNMInNodeSet(nodeId, before, true); + Set oldLabels = getLabelsByNode(nodeId, before); + // no label in the past + if (oldLabels.isEmpty()) { + // update labels + LabelType label = labelCollections.get(NO_LABEL); + Resources.subtractFrom(label.resource, oldNM.resource); + + // update queues, all queue can access this node + for (QueueType q : queueCollections.values()) { + Resources.subtractFrom(q.resource, oldNM.resource); + } + } else { + // update labels + for (String labelName : oldLabels) { + LabelType label = labelCollections.get(labelName); + if (null == label) { + continue; + } + Resources.subtractFrom(label.resource, oldNM.resource); + } + + // update queues, only queue can access this node will be subtract + for (QueueType q : queueCollections.values()) { + if (isNodeUsableByQueue(oldLabels, q)) { + Resources.subtractFrom(q.resource, oldNM.resource); + } + } + } + } + + if (getNMInNodeSet(nodeId, after, true) != null) { + NMType newNM = getNMInNodeSet(nodeId, after, true); + Set newLabels = getLabelsByNode(nodeId, after); + // no label in the past + if (newLabels.isEmpty()) { + // update labels + LabelType label = labelCollections.get(NO_LABEL); + Resources.addTo(label.resource, newNM.resource); + + // update queues, all queue can access this node + for (QueueType q : queueCollections.values()) { + Resources.addTo(q.resource, newNM.resource); + } + } else { + // update labels + for (String labelName : newLabels) { + LabelType label = labelCollections.get(labelName); + Resources.addTo(label.resource, newNM.resource); + } + + // update queues, only queue can access this node will be subtract + for (QueueType q : queueCollections.values()) { + if (isNodeUsableByQueue(newLabels, q)) { + Resources.addTo(q.resource, newNM.resource); + } + } + } + } + } + } + + public Resource getResourceByLabel(String label, Resource clusterResource) { + label = normalizeLabel(label); + try { + readLock.lock(); + if (null == labelCollections.get(label)) { + return Resources.none(); + } + return labelCollections.get(label).resource; + } finally { + readLock.unlock(); + } + } + + private boolean isNodeUsableByQueue(Set nodeLabels, QueueType q) { + // node without any labels can be accessed by any queue + if (nodeLabels == null || nodeLabels.isEmpty() + || (nodeLabels.size() == 1 && nodeLabels.contains(NO_LABEL))) { + return true; + } + + for (String label : nodeLabels) { + if (q.labels.contains(label)) { + return true; + } + } + + return false; + } + + private Map cloneNodeMap() { + Set nodesToCopy = new HashSet(); + for (String nodeName : nodeCollections.keySet()) { + nodesToCopy.add(NodeId.newInstance(nodeName, WILDCARD_PORT)); + } + return cloneNodeMap(nodesToCopy); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacitySchedulerPlanFollower.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacitySchedulerPlanFollower.java index 0c0fbc0..126560a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacitySchedulerPlanFollower.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacitySchedulerPlanFollower.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -126,14 +127,18 @@ public synchronized void synchronizePlan(Plan plan) { // create the default reservation queue if it doesnt exist String defReservationQueue = planQueueName + PlanQueue.DEFAULT_QUEUE_SUFFIX; if (scheduler.getQueue(defReservationQueue) == null) { - ReservationQueue defQueue = - new ReservationQueue(scheduler, defReservationQueue, planQueue); try { + ReservationQueue defQueue = + new ReservationQueue(scheduler, defReservationQueue, planQueue); scheduler.addQueue(defQueue); } catch (SchedulerDynamicEditException e) { LOG.warn( "Exception while trying to create default reservation queue for plan: {}", planQueueName, e); + } catch (IOException e) { + LOG.warn( + "Exception while trying to create default reservation queue for plan: {}", + planQueueName, e); } } curReservationNames.add(defReservationQueue); @@ -186,14 +191,18 @@ public synchronized void synchronizePlan(Plan plan) { for (ReservationAllocation res : sortedAllocations) { String currResId = res.getReservationId().toString(); if (curReservationNames.contains(currResId)) { - ReservationQueue resQueue = - new ReservationQueue(scheduler, currResId, planQueue); try { + ReservationQueue resQueue = + new ReservationQueue(scheduler, currResId, planQueue); scheduler.addQueue(resQueue); } catch (SchedulerDynamicEditException e) { LOG.warn( "Exception while trying to activate reservation: {} for plan: {}", currResId, planQueueName, e); + } catch (IOException e) { + LOG.warn( + "Exception while trying to activate reservation: {} for plan: {}", + currResId, planQueueName, e); } } Resource capToAssign = res.getResourcesAtTime(now); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index c0681aa..1994b36 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -48,6 +48,7 @@ import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; @@ -143,6 +144,7 @@ private RMAppEvent eventCausingFinalSaving; private RMAppState targetedFinalState; private RMAppState recoveredFinalState; + private ResourceRequest amReq; Object transitionTodo; @@ -342,7 +344,8 @@ public RMAppImpl(ApplicationId applicationId, RMContext rmContext, Configuration config, String name, String user, String queue, ApplicationSubmissionContext submissionContext, YarnScheduler scheduler, ApplicationMasterService masterService, long submitTime, - String applicationType, Set applicationTags) { + String applicationType, Set applicationTags, + ResourceRequest amReq) { this.systemClock = new SystemClock(); @@ -361,6 +364,7 @@ public RMAppImpl(ApplicationId applicationId, RMContext rmContext, this.startTime = this.systemClock.getTime(); this.applicationType = applicationType; this.applicationTags = applicationTags; + this.amReq = amReq; int globalMaxAppAttempts = conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); @@ -732,7 +736,7 @@ private void createNewAttempt() { // previously failed attempts(which should not include Preempted, // hardware error and NM resync) + 1) equal to the max-attempt // limit. - maxAppAttempts == (getNumFailedAppAttempts() + 1)); + maxAppAttempts == (getNumFailedAppAttempts() + 1), amReq); attempts.put(appAttemptId, attempt); currentAttempt = attempt; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index fbcb7d7..3fc7f68 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -93,7 +93,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; -import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils; import org.apache.hadoop.yarn.state.InvalidStateTransitonException; import org.apache.hadoop.yarn.state.MultipleArcTransition; @@ -177,6 +176,7 @@ private Object transitionTodo; private RMAppAttemptMetrics attemptMetrics = null; + private ResourceRequest amReq = null; private static final StateMachineFactory { + @SuppressWarnings("deprecation") @Override public RMAppAttemptState transition(RMAppAttemptImpl appAttempt, RMAppAttemptEvent event) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java index a423ea5..afbcbc7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java @@ -20,6 +20,7 @@ import java.util.List; +import java.util.Set; import org.apache.hadoop.net.Node; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -135,4 +136,11 @@ * @return containerUpdates accumulated across NM heartbeats. */ public List pullContainerUpdates(); + + /** + * Get set of labels in this node + * + * @return labels in this node + */ + public Set getNodeLabels(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index c960b50..13d60ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -855,4 +855,12 @@ public int getQueueSize() { public Set getLaunchedContainers() { return this.launchedContainers; } + + @Override + public Set getNodeLabels() { + if (context.getNodeLabelManager() == null) { + return null; + } + return context.getNodeLabelManager().getLabelsOnNode(nodeId); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index 8e8d627..c6a322d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -442,6 +442,14 @@ protected void releaseContainers(List containers, public SchedulerNode getSchedulerNode(NodeId nodeId) { return nodes.get(nodeId); } + + public List getSchedulerNodes() { + List snodes = new ArrayList(); + for (N node : nodes.values()) { + snodes.add(node); + } + return snodes; + } @Override public synchronized void moveAllApps(String sourceQueue, String destQueue) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java index 0bc8ca1..a397189 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Evolving; @@ -71,4 +72,22 @@ */ public void recoverContainer(Resource clusterResource, SchedulerApplicationAttempt schedulerAttempt, RMContainer rmContainer); + + /** + * Get labels can be accessed of this queue + * labels={*}, means this queue can access any label + * labels={ }, means this queue cannot access any label except node without label + * labels={a, b, c} means this queue can access a or b or c + * @return labels + */ + public Set getAccessibleLabels(); + + /** + * Get default label expression of this queue. If label expression of + * ApplicationSubmissionContext and label expression of Resource Request not + * set, this will be used. + * + * @return default label expression + */ + public String getDefaultLabelExpression(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index ac37c2f..1dde29a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -17,23 +17,29 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; +import java.io.IOException; import java.util.List; +import java.util.Set; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; +import com.google.common.collect.Sets; + /** * Utilities shared by schedulers. */ @@ -190,7 +196,8 @@ public static void normalizeRequest( * request */ public static void validateResourceRequest(ResourceRequest resReq, - Resource maximumResource) throws InvalidResourceRequestException { + Resource maximumResource, String queueName, YarnScheduler scheduler) + throws InvalidResourceRequestException { if (resReq.getCapability().getMemory() < 0 || resReq.getCapability().getMemory() > maximumResource.getMemory()) { throw new InvalidResourceRequestException("Invalid resource request" @@ -209,5 +216,115 @@ public static void validateResourceRequest(ResourceRequest resReq, + resReq.getCapability().getVirtualCores() + ", maxVirtualCores=" + maximumResource.getVirtualCores()); } + + // Get queue from scheduler + QueueInfo queueInfo = null; + try { + queueInfo = scheduler.getQueueInfo(queueName, false, false); + } catch (IOException e) { + // it is possible queue cannot get when queue mapping is set, just ignore + // the queueInfo here, and move forward + } + + // check labels in the resource request. + String labelExp = resReq.getNodeLabelExpression(); + + // if queue has default label expression, and RR doesn't have, use the + // default label expression of queue + if (labelExp == null && queueInfo != null) { + labelExp = queueInfo.getDefaultNodeLabelExpression(); + resReq.setNodeLabelExpression(labelExp); + } + + if (labelExp != null && !labelExp.trim().isEmpty() && queueInfo != null) { + if (!checkQueueLabelExpression(queueInfo.getNodeLabels(), + labelExp)) { + throw new InvalidResourceRequestException("Invalid resource request" + + ", queue=" + + queueInfo.getQueueName() + + " doesn't have permission to access all labels " + + "in resource request. labelExpression of resource request=" + + labelExp + + ". Queue labels=" + + (queueInfo.getNodeLabels() == null ? "" : StringUtils.join(queueInfo + .getNodeLabels().iterator(), ','))); + } + } + } + + public static boolean checkQueueAccessToNode(Set queueLabels, + Set nodeLabels) { + // if queue's label is *, it can access any node + if (queueLabels != null && queueLabels.contains(DynamicNodeLabelsManager.ANY)) { + return true; + } + // any queue can access to a node without label + if (nodeLabels == null || nodeLabels.isEmpty()) { + return true; + } + // a queue can access to a node only if it contains any label of the node + if (queueLabels != null + && Sets.intersection(queueLabels, nodeLabels).size() > 0) { + return true; + } + // sorry, you cannot access + return false; + } + + public static void checkAndThrowIfLabelNotIncluded(DynamicNodeLabelsManager mgr, + Set labels) throws IOException { + if (mgr == null) { + if (labels != null && !labels.isEmpty()) { + throw new IOException("NodeLabelManager is null, please check"); + } + return; + } + + if (labels != null) { + for (String label : labels) { + if (!mgr.containsNodeLabel(label)) { + throw new IOException("NodeLabelManager doesn't include label = " + + label + ", please check."); + } + } + } + } + + public static boolean checkNodeLabelExpression(Set nodeLabels, + String labelExpression) { + // empty label expression can only allocate on node with empty labels + if (labelExpression == null || labelExpression.trim().isEmpty()) { + if (!nodeLabels.isEmpty()) { + return false; + } + } + + if (labelExpression != null) { + for (String str : labelExpression.split("&&")) { + if (!str.trim().isEmpty() + && (nodeLabels == null || !nodeLabels.contains(str.trim()))) { + return false; + } + } + } + return true; + } + + public static boolean checkQueueLabelExpression(Set queueLabels, + String labelExpression) { + if (queueLabels != null && queueLabels.contains(DynamicNodeLabelsManager.ANY)) { + return true; + } + // if label expression is empty, we can allocate container on any node + if (labelExpression == null) { + return true; + } + for (String str : labelExpression.split("&&")) { + if (!str.trim().isEmpty() + && (queueLabels == null || !queueLabels.contains(str.trim()))) { + return false; + } + } + return true; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java new file mode 100644 index 0000000..d8ff831 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java @@ -0,0 +1,444 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import java.io.IOException; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.yarn.api.records.QueueACL; +import org.apache.hadoop.yarn.api.records.QueueInfo; +import org.apache.hadoop.yarn.api.records.QueueState; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.factories.RecordFactory; +import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; +import org.apache.hadoop.yarn.util.resource.ResourceCalculator; +import org.apache.hadoop.yarn.util.resource.Resources; + +import com.google.common.collect.Sets; + +public abstract class AbstractCSQueue implements CSQueue { + + CSQueue parent; + final String queueName; + + float capacity; + float maximumCapacity; + float absoluteCapacity; + float absoluteMaxCapacity; + float absoluteUsedCapacity = 0.0f; + + float usedCapacity = 0.0f; + volatile int numApplications; + volatile int numContainers; + + final Resource minimumAllocation; + final Resource maximumAllocation; + QueueState state; + final QueueMetrics metrics; + + final ResourceCalculator resourceCalculator; + Set labels; + DynamicNodeLabelsManager labelManager; + String defaultLabelExpression; + Resource usedResources = Resources.createResource(0, 0); + QueueInfo queueInfo; + final Comparator queueComparator; + Map absoluteNodeLabelCapacities; + Map nodeLabelCapacities; + Map usedResourcesByLabels = new HashMap(); + Map absoluteMaximumNodeLabelCapacities; + Map maximumNodeLabelCapacities; + + Map acls = + new HashMap(); + boolean reservationsContinueLooking; + + private final RecordFactory recordFactory = + RecordFactoryProvider.getRecordFactory(null); + + public AbstractCSQueue(CapacitySchedulerContext cs, + String queueName, CSQueue parent, CSQueue old) throws IOException { + this.minimumAllocation = cs.getMinimumResourceCapability(); + this.maximumAllocation = cs.getMaximumResourceCapability(); + this.labelManager = cs.getRMContext().getNodeLabelManager(); + this.parent = parent; + this.queueName = queueName; + this.resourceCalculator = cs.getResourceCalculator(); + this.queueComparator = cs.getQueueComparator(); + this.queueInfo = recordFactory.newRecordInstance(QueueInfo.class); + + // must be called after parent and queueName is set + this.metrics = old != null ? old.getMetrics() : + QueueMetrics.forQueue(getQueuePath(), parent, + cs.getConfiguration().getEnableUserMetrics(), + cs.getConf()); + + // get labels + this.labels = cs.getConfiguration().getAccessibleLabels(getQueuePath()); + this.defaultLabelExpression = cs.getConfiguration() + .getDefaultNodeLabelExpression(getQueuePath()); + + this.queueInfo.setQueueName(queueName); + + // inherit from parent if labels not set + if (this.labels == null && parent != null) { + this.labels = parent.getAccessibleLabels(); + SchedulerUtils.checkAndThrowIfLabelNotIncluded(labelManager, this.labels); + } + + // inherit from parent if labels not set + if (this.defaultLabelExpression == null && parent != null + && this.labels.containsAll(parent.getAccessibleLabels())) { + this.defaultLabelExpression = parent.getDefaultLabelExpression(); + } + + // set capacity by labels + nodeLabelCapacities = + cs.getConfiguration().getNodeLabelCapacities(getQueuePath(), labels); + + // set maximum capacity by labels + maximumNodeLabelCapacities = + cs.getConfiguration().getMaximumNodeLabelCapacities(getQueuePath(), + labels); + } + + @Override + public synchronized float getCapacity() { + return capacity; + } + + @Override + public synchronized float getAbsoluteCapacity() { + return absoluteCapacity; + } + + @Override + public float getAbsoluteMaximumCapacity() { + return absoluteMaxCapacity; + } + + @Override + public synchronized float getAbsoluteUsedCapacity() { + return absoluteUsedCapacity; + } + + @Override + public float getMaximumCapacity() { + return maximumCapacity; + } + + @Override + public synchronized float getUsedCapacity() { + return usedCapacity; + } + + @Override + public synchronized Resource getUsedResources() { + return usedResources; + } + + public synchronized int getNumContainers() { + return numContainers; + } + + public synchronized int getNumApplications() { + return numApplications; + } + + @Override + public synchronized QueueState getState() { + return state; + } + + @Override + public QueueMetrics getMetrics() { + return metrics; + } + + @Override + public String getQueueName() { + return queueName; + } + + @Override + public synchronized CSQueue getParent() { + return parent; + } + + @Override + public synchronized void setParent(CSQueue newParentQueue) { + this.parent = (ParentQueue)newParentQueue; + } + + public Set getAccessibleLabels() { + return labels; + } + + @Override + public boolean hasAccess(QueueACL acl, UserGroupInformation user) { + synchronized (this) { + if (acls.get(acl).isUserAllowed(user)) { + return true; + } + } + + if (parent != null) { + return parent.hasAccess(acl, user); + } + + return false; + } + + @Override + public synchronized void setUsedCapacity(float usedCapacity) { + this.usedCapacity = usedCapacity; + } + + @Override + public synchronized void setAbsoluteUsedCapacity(float absUsedCapacity) { + this.absoluteUsedCapacity = absUsedCapacity; + } + + /** + * Set maximum capacity - used only for testing. + * @param maximumCapacity new max capacity + */ + synchronized void setMaxCapacity(float maximumCapacity) { + // Sanity check + CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity); + float absMaxCapacity = + CSQueueUtils.computeAbsoluteMaximumCapacity(maximumCapacity, parent); + CSQueueUtils.checkAbsoluteCapacity(getQueueName(), absoluteCapacity, + absMaxCapacity); + + this.maximumCapacity = maximumCapacity; + this.absoluteMaxCapacity = absMaxCapacity; + } + + @Override + public float getAbsActualCapacity() { + // for now, simply return actual capacity = guaranteed capacity for parent + // queue + return absoluteCapacity; + } + + @Override + public String getDefaultLabelExpression() { + return defaultLabelExpression; + } + + synchronized void setupQueueConfigs(Resource clusterResource, float capacity, + float absoluteCapacity, float maximumCapacity, float absoluteMaxCapacity, + QueueState state, Map acls, + Set labels, String defaultLabelExpression, + Map nodeLabelCapacities, + Map maximumNodeLabelCapacities, boolean continueLooking) + throws IOException { + // Sanity check + CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity); + CSQueueUtils.checkAbsoluteCapacity(getQueueName(), absoluteCapacity, + absoluteMaxCapacity); + + this.capacity = capacity; + this.absoluteCapacity = absoluteCapacity; + + this.maximumCapacity = maximumCapacity; + this.absoluteMaxCapacity = absoluteMaxCapacity; + + this.state = state; + + this.acls = acls; + + // set labels + this.labels = labels; + + // set label expression + this.defaultLabelExpression = defaultLabelExpression; + + // copy node label capacity + this.nodeLabelCapacities = new HashMap(nodeLabelCapacities); + this.maximumNodeLabelCapacities = + new HashMap(maximumNodeLabelCapacities); + + this.queueInfo.setNodeLabels(this.labels); + this.queueInfo.setCapacity(this.capacity); + this.queueInfo.setMaximumCapacity(this.maximumCapacity); + this.queueInfo.setQueueState(this.state); + this.queueInfo.setDefaultNodeLabelExpression(this.defaultLabelExpression); + + // Update metrics + CSQueueUtils.updateQueueStatistics( + resourceCalculator, this, parent, clusterResource, minimumAllocation); + + // Check if labels of this queue is a subset of parent queue, only do this + // when we not root + if (parent != null && parent.getParent() != null) { + if (parent.getAccessibleLabels() != null + && !parent.getAccessibleLabels().contains(DynamicNodeLabelsManager.ANY)) { + // if parent isn't "*", child shouldn't be "*" too + if (this.getAccessibleLabels().contains(DynamicNodeLabelsManager.ANY)) { + throw new IOException("Parent's accessible queue is not ANY(*), " + + "but child's accessible queue is *"); + } else { + Set diff = + Sets.difference(this.getAccessibleLabels(), + parent.getAccessibleLabels()); + if (!diff.isEmpty()) { + throw new IOException("Some labels of child queue is not a subset " + + "of parent queue, these labels=[" + + StringUtils.join(diff, ",") + "]"); + } + } + } + } + + // calculate absolute capacity by each node label + this.absoluteNodeLabelCapacities = + CSQueueUtils.computeAbsoluteNodeLabelCapacities( + this.nodeLabelCapacities, parent); + + // calculate maximum capacity by each node label + this.absoluteMaximumNodeLabelCapacities = + CSQueueUtils.computeAbsoluteMaximumNodeLabelCapacities( + maximumNodeLabelCapacities, parent); + + // check absoluteMaximumNodeLabelCapacities is valid + CSQueueUtils.checkAbsoluteCapacitiesByLabel(getQueueName(), + absoluteNodeLabelCapacities, absoluteNodeLabelCapacities); + + this.reservationsContinueLooking = continueLooking; + } + + @Private + public Resource getMaximumAllocation() { + return maximumAllocation; + } + + @Private + public Resource getMinimumAllocation() { + return minimumAllocation; + } + + synchronized void allocateResource(Resource clusterResource, + Resource resource, Set nodeLabels) { + Resources.addTo(usedResources, resource); + + // Update usedResources by labels + if (nodeLabels == null || nodeLabels.isEmpty()) { + if (!usedResourcesByLabels.containsKey(DynamicNodeLabelsManager.NO_LABEL)) { + usedResourcesByLabels.put(DynamicNodeLabelsManager.NO_LABEL, + Resources.createResource(0)); + } + Resources.addTo(usedResourcesByLabels.get(DynamicNodeLabelsManager.NO_LABEL), + resource); + } else { + for (String label : Sets.intersection(labels, nodeLabels)) { + if (!usedResourcesByLabels.containsKey(label)) { + usedResourcesByLabels.put(label, Resources.createResource(0)); + } + Resources.addTo(usedResourcesByLabels.get(label), resource); + } + } + + ++numContainers; + CSQueueUtils.updateQueueStatistics(resourceCalculator, this, getParent(), + clusterResource, minimumAllocation); + } + + synchronized void releaseResource(Resource clusterResource, + Resource resource, Set nodeLabels) { + // Update queue metrics + Resources.subtractFrom(usedResources, resource); + + // Update usedResources by labels + if (nodeLabels.isEmpty()) { + if (!usedResourcesByLabels.containsKey(DynamicNodeLabelsManager.NO_LABEL)) { + usedResourcesByLabels.put(DynamicNodeLabelsManager.NO_LABEL, + Resources.createResource(0)); + } + Resources.subtractFrom( + usedResourcesByLabels.get(DynamicNodeLabelsManager.NO_LABEL), resource); + } else { + for (String label : Sets.intersection(labels, nodeLabels)) { + if (!usedResourcesByLabels.containsKey(label)) { + usedResourcesByLabels.put(label, Resources.createResource(0)); + } + Resources.subtractFrom(usedResourcesByLabels.get(label), resource); + } + } + + CSQueueUtils.updateQueueStatistics(resourceCalculator, this, getParent(), + clusterResource, minimumAllocation); + --numContainers; + } + + @Private + public float getCapacityByNodeLabel(String label) { + if (StringUtils.equals(label, DynamicNodeLabelsManager.NO_LABEL)) { + return getCapacity(); + } + + if (!nodeLabelCapacities.containsKey(label)) { + return 0; + } else { + return nodeLabelCapacities.get(label); + } + } + + @Private + public float getAbsoluteCapacityByNodeLabel(String label) { + if (StringUtils.equals(label, DynamicNodeLabelsManager.NO_LABEL)) { + return getAbsoluteCapacity(); + } + + if (null == parent) { + return 1; + } + + if (!absoluteNodeLabelCapacities.containsKey(label)) { + return 0; + } else { + return absoluteNodeLabelCapacities.get(label); + } + } + + @Private + public float getAbsoluteMaximumCapacityByNodeLabel(String label) { + if (StringUtils.equals(label, DynamicNodeLabelsManager.NO_LABEL)) { + return getAbsoluteMaximumCapacity(); + } + + return getAbsoluteCapacityByNodeLabel(label); + } + + @Private + public boolean getReservationContinueLooking() { + return reservationsContinueLooking; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java index db893dc..031bbad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java @@ -72,9 +72,18 @@ /** * Get the configured capacity of the queue. - * @return queue capacity + * @return configured queue capacity */ public float getCapacity(); + + /** + * Get actual capacity of the queue, this may be different from + * configured capacity when mis-config take place, like add labels to the + * cluster + * + * @return actual queue capacity + */ + public float getAbsActualCapacity(); /** * Get capacity of the parent of the queue as a function of the @@ -259,4 +268,25 @@ public void detachContainer(Resource clusterResource, */ public void attachContainer(Resource clusterResource, FiCaSchedulerApp application, RMContainer container); + + /** + * Get absolute capacity by label of this queue can use + * @param nodeLabel + * @return absolute capacity by label of this queue can use + */ + public float getAbsoluteCapacityByNodeLabel(String nodeLabel); + + /** + * Get absolute max capacity by label of this queue can use + * @param nodeLabel + * @return absolute capacity by label of this queue can use + */ + public float getAbsoluteMaximumCapacityByNodeLabel(String nodeLabel); + + /** + * Get capacity by node label + * @param nodeLabel + * @return capacity by node label + */ + public float getCapacityByNodeLabel(String nodeLabel); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java index 737062b..8769003 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; +import java.util.HashMap; +import java.util.Map; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.utils.Lock; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; @@ -40,7 +42,7 @@ public static void checkMaxCapacity(String queueName, } } - public static void checkAbsoluteCapacities(String queueName, + public static void checkAbsoluteCapacity(String queueName, float absCapacity, float absMaxCapacity) { if (absMaxCapacity < (absCapacity - EPSILON)) { throw new IllegalArgumentException("Illegal call to setMaxCapacity. " @@ -49,6 +51,22 @@ public static void checkAbsoluteCapacities(String queueName, + ")"); } } + + public static void checkAbsoluteCapacitiesByLabel(String queueName, + Map absCapacities, + Map absMaximumCapacities) { + for (String label : absCapacities.keySet()) { + float absCapacity = absCapacities.get(label); + float absMaxCapacity = absMaximumCapacities.get(label); + if (absMaxCapacity < (absCapacity - EPSILON)) { + throw new IllegalArgumentException("Illegal call to setMaxCapacity. " + + "Queue '" + queueName + "' has " + "an absolute capacity (" + + absCapacity + ") greater than " + + "its absolute maximumCapacity (" + absMaxCapacity + ") of label=" + + label); + } + } + } public static float computeAbsoluteMaximumCapacity( float maximumCapacity, CSQueue parent) { @@ -56,6 +74,37 @@ public static float computeAbsoluteMaximumCapacity( (parent == null) ? 1.0f : parent.getAbsoluteMaximumCapacity(); return (parentAbsMaxCapacity * maximumCapacity); } + + public static Map computeAbsoluteNodeLabelCapacities( + Map nodeLabelToCapacities, CSQueue parent) { + if (parent == null) { + return nodeLabelToCapacities; + } + + Map absoluteNodeLabelToCapacities = + new HashMap(); + for (String label : nodeLabelToCapacities.keySet()) { + absoluteNodeLabelToCapacities.put(label, nodeLabelToCapacities.get(label) + * parent.getAbsoluteCapacityByNodeLabel(label)); + } + return absoluteNodeLabelToCapacities; + } + + public static Map computeAbsoluteMaximumNodeLabelCapacities( + Map maximumNodeLabelToCapacities, CSQueue parent) { + if (parent == null) { + return maximumNodeLabelToCapacities; + } + Map absoluteMaximumNodeLabelToCapacities = + new HashMap(); + for (String label : maximumNodeLabelToCapacities.keySet()) { + absoluteMaximumNodeLabelToCapacities.put( + label, + maximumNodeLabelToCapacities.get(label) + * parent.getAbsoluteMaximumCapacityByNodeLabel(label)); + } + return absoluteMaximumNodeLabelToCapacities; + } public static int computeMaxActiveApplications( ResourceCalculator calculator, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 6a3c7dc..2202a8a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -20,7 +20,15 @@ import java.io.IOException; import java.io.InputStream; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -53,8 +61,13 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.*; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; @@ -191,6 +204,7 @@ public Configuration getConf() { private boolean scheduleAsynchronously; private AsyncScheduleThread asyncSchedulerThread; + private DynamicNodeLabelsManager labelManager; /** * EXPERT @@ -275,6 +289,8 @@ private synchronized void initScheduler(Configuration configuration) throws this.applications = new ConcurrentHashMap>(); + this.labelManager = rmContext.getNodeLabelManager(); + initializeQueues(this.conf); scheduleAsynchronously = this.conf.getScheduleAynschronously(); @@ -446,7 +462,7 @@ private void initializeQueues(CapacitySchedulerConfiguration conf) root = parseQueue(this, conf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, noop); - + labelManager.reinitializeQueueLabels(getQueueToLabels()); LOG.info("Initialized root queue " + root); initializeQueueMappings(); } @@ -469,6 +485,16 @@ private void reinitializeQueues(CapacitySchedulerConfiguration conf) // Re-configure queues root.reinitialize(newRoot, clusterResource); initializeQueueMappings(); + + labelManager.reinitializeQueueLabels(getQueueToLabels()); + } + + private Map> getQueueToLabels() { + Map> queueToLabels = new HashMap>(); + for (CSQueue queue : queues.values()) { + queueToLabels.put(queue.getQueueName(), queue.getAccessibleLabels()); + } + return queueToLabels; } /** @@ -511,7 +537,7 @@ private void addNewQueues( @Lock(CapacityScheduler.class) static CSQueue parseQueue( - CapacitySchedulerContext csContext, + CapacitySchedulerContext csContext, CapacitySchedulerConfiguration conf, CSQueue parent, String queueName, Map queues, Map oldQueues, @@ -1077,11 +1103,18 @@ public void handle(SchedulerEvent event) { } private synchronized void addNode(RMNode nodeManager) { + // update this node to node label manager + if (labelManager != null) { + labelManager.activateNode(nodeManager.getNodeID(), + nodeManager.getTotalCapability()); + } + this.nodes.put(nodeManager.getNodeID(), new FiCaSchedulerNode(nodeManager, usePortForNodeName)); Resources.addTo(clusterResource, nodeManager.getTotalCapability()); root.updateClusterResource(clusterResource); int numNodes = numNodeManagers.incrementAndGet(); + LOG.info("Added node " + nodeManager.getNodeAddress() + " clusterResource: " + clusterResource); @@ -1091,6 +1124,11 @@ private synchronized void addNode(RMNode nodeManager) { } private synchronized void removeNode(RMNode nodeInfo) { + // update this node to node label manager + if (labelManager != null) { + labelManager.deactivateNode(nodeInfo.getNodeID()); + } + FiCaSchedulerNode node = nodes.get(nodeInfo.getNodeID()); if (node == null) { return; @@ -1124,6 +1162,7 @@ private synchronized void removeNode(RMNode nodeInfo) { } this.nodes.remove(nodeInfo.getNodeID()); + LOG.info("Removed node " + nodeInfo.getNodeAddress() + " clusterResource: " + clusterResource); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index b1f239c..a0363ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -18,7 +18,15 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.StringTokenizer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -31,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; @@ -83,6 +92,12 @@ public static final String STATE = "state"; @Private + public static final String ACCESSIBLE_LABELS = "accessible-labels"; + + @Private + public static final String DEFAULT_NODE_LABEL_EXPRESSION = + "default-node-label-expression"; + public static final String RESERVE_CONT_LOOK_ALL_NODES = PREFIX + "reservations-continue-look-all-nodes"; @@ -268,6 +283,10 @@ private String getQueuePrefix(String queue) { return queueName; } + private String getNodeLabelPrefix(String queue, String label) { + return getQueuePrefix(queue) + ACCESSIBLE_LABELS + DOT + label + DOT; + } + public int getMaximumSystemApplications() { int maxApplications = getInt(MAXIMUM_SYSTEM_APPLICATIONS, DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS); @@ -343,6 +362,15 @@ public void setMaximumCapacity(String queue, float maxCapacity) { ", maxCapacity=" + maxCapacity); } + public void setCapacityByLabel(String queue, String label, float capacity) { + setFloat(getNodeLabelPrefix(queue, label) + CAPACITY, capacity); + } + + public void setMaximumCapacityByLabel(String queue, String label, + float capacity) { + setFloat(getNodeLabelPrefix(queue, label) + MAXIMUM_CAPACITY, capacity); + } + public int getUserLimit(String queue) { int userLimit = getInt(getQueuePrefix(queue) + USER_LIMIT, DEFAULT_USER_LIMIT); @@ -372,6 +400,97 @@ public QueueState getState(String queue) { QueueState.valueOf(state.toUpperCase()) : QueueState.RUNNING; } + public void setAccessibleLabels(String queue, Set labels) { + if (labels == null) { + return; + } + String str = StringUtils.join(",", labels); + set(getQueuePrefix(queue) + ACCESSIBLE_LABELS, str); + } + + public Set getAccessibleLabels(String queue) { + String labelStr = get(getQueuePrefix(queue) + ACCESSIBLE_LABELS); + if (labelStr == null) { + return queue.equals(ROOT) ? DynamicNodeLabelsManager.EMPTY_STRING_SET : null; + } else { + Set set = new HashSet(); + for (String str : labelStr.split(",")) { + if (!str.trim().isEmpty()) { + set.add(str.trim()); + } + } + // if labels contains "*", only leave ANY behind + if (set.contains(DynamicNodeLabelsManager.ANY)) { + set.clear(); + set.add(DynamicNodeLabelsManager.ANY); + } + return Collections.unmodifiableSet(set); + } + } + + public Map getNodeLabelCapacities(String queue, + Set labels) { + Map nodeLabelCapacities = new HashMap(); + + if (labels == null) { + return nodeLabelCapacities; + } + + for (String label : labels) { + // capacity of all labels in each queue should be 1 + if (org.apache.commons.lang.StringUtils.equals(ROOT, queue)) { + nodeLabelCapacities.put(label, 1.0f); + continue; + } + float capacity = + getFloat(getNodeLabelPrefix(queue, label) + CAPACITY, UNDEFINED); + if (capacity < MINIMUM_CAPACITY_VALUE + || capacity > MAXIMUM_CAPACITY_VALUE) { + throw new IllegalArgumentException("Illegal " + "capacity of " + + capacity + " for label=" + label + " in queue=" + queue); + } + LOG.debug("CSConf - getCapacityOfLabel: prefix=" + + getNodeLabelPrefix(queue, label) + ", capacity=" + capacity); + + nodeLabelCapacities.put(label, capacity / 100f); + } + return nodeLabelCapacities; + } + + public Map getMaximumNodeLabelCapacities(String queue, + Set labels) { + Map maximumNodeLabelCapacities = new HashMap(); + if (labels == null) { + return maximumNodeLabelCapacities; + } + + for (String label : labels) { + float maxCapacity = + getFloat(getNodeLabelPrefix(queue, label) + MAXIMUM_CAPACITY, + UNDEFINED); + maxCapacity = (maxCapacity == DEFAULT_MAXIMUM_CAPACITY_VALUE) ? + MAXIMUM_CAPACITY_VALUE : maxCapacity; + if (maxCapacity < MINIMUM_CAPACITY_VALUE + || maxCapacity > MAXIMUM_CAPACITY_VALUE) { + throw new IllegalArgumentException("Illegal " + "capacity of " + + maxCapacity + " for label=" + label + " in queue=" + queue); + } + LOG.debug("CSConf - getCapacityOfLabel: prefix=" + + getNodeLabelPrefix(queue, label) + ", capacity=" + maxCapacity); + + maximumNodeLabelCapacities.put(label, maxCapacity / 100f); + } + return maximumNodeLabelCapacities; + } + + public String getDefaultNodeLabelExpression(String queue) { + return get(getQueuePrefix(queue) + DEFAULT_NODE_LABEL_EXPRESSION); + } + + public void setDefaultNodeLabelExpression(String queue, String exp) { + set(getQueuePrefix(queue) + DEFAULT_NODE_LABEL_EXPRESSION, exp); + } + /* * Returns whether we should continue to look at all heart beating nodes even * after the reservation limit was hit. The node heart beating in could diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index f0cff71..5e3acf9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -24,12 +24,14 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; +import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -52,36 +54,31 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.server.utils.Lock; import org.apache.hadoop.yarn.server.utils.Lock.NoLock; -import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Sets; @Private @Unstable -public class LeafQueue implements CSQueue { +public class LeafQueue extends AbstractCSQueue { private static final Log LOG = LogFactory.getLog(LeafQueue.class); - private final String queueName; - private CSQueue parent; - private float capacity; - private float absoluteCapacity; - private float maximumCapacity; - private float absoluteMaxCapacity; private float absoluteUsedCapacity = 0.0f; private int userLimit; private float userLimitFactor; @@ -95,10 +92,6 @@ private int maxActiveApplicationsPerUser; private int nodeLocalityDelay; - - private Resource usedResources = Resources.createResource(0, 0); - private float usedCapacity = 0.0f; - private volatile int numContainers; Set activeApplications; Map applicationAttemptMap = @@ -106,20 +99,9 @@ Set pendingApplications; - private final Resource minimumAllocation; - private final Resource maximumAllocation; private final float minimumAllocationFactor; private Map users = new HashMap(); - - private final QueueMetrics metrics; - - private QueueInfo queueInfo; - - private QueueState state; - - private Map acls = - new HashMap(); private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); @@ -127,27 +109,18 @@ private CapacitySchedulerContext scheduler; private final ActiveUsersManager activeUsersManager; - - private final ResourceCalculator resourceCalculator; + + // cache last cluster resource to compute actual capacity + private Resource lastClusterResource = Resources.none(); private boolean reservationsContinueLooking; public LeafQueue(CapacitySchedulerContext cs, - String queueName, CSQueue parent, CSQueue old) { + String queueName, CSQueue parent, CSQueue old) throws IOException { + super(cs, queueName, parent, old); this.scheduler = cs; - this.queueName = queueName; - this.parent = parent; - - this.resourceCalculator = cs.getResourceCalculator(); - // must be after parent and queueName are initialized - this.metrics = old != null ? old.getMetrics() : - QueueMetrics.forQueue(getQueuePath(), parent, - cs.getConfiguration().getEnableUserMetrics(), - cs.getConf()); this.activeUsersManager = new ActiveUsersManager(metrics); - this.minimumAllocation = cs.getMinimumResourceCapability(); - this.maximumAllocation = cs.getMaximumResourceCapability(); this.minimumAllocationFactor = Resources.ratio(resourceCalculator, Resources.subtract(maximumAllocation, minimumAllocation), @@ -165,7 +138,8 @@ public LeafQueue(CapacitySchedulerContext cs, float userLimitFactor = cs.getConfiguration().getUserLimitFactor(getQueuePath()); - int maxApplications = cs.getConfiguration().getMaximumApplicationsPerQueue(getQueuePath()); + int maxApplications = + cs.getConfiguration().getMaximumApplicationsPerQueue(getQueuePath()); if (maxApplications < 0) { int maxSystemApps = cs.getConfiguration().getMaximumSystemApplications(); maxApplications = (int)(maxSystemApps * absoluteCapacity); @@ -185,12 +159,10 @@ public LeafQueue(CapacitySchedulerContext cs, resourceCalculator, cs.getClusterResource(), this.minimumAllocation, maxAMResourcePerQueuePercent, absoluteCapacity); - int maxActiveApplicationsPerUser = - CSQueueUtils.computeMaxActiveApplicationsPerUser(maxActiveAppsUsingAbsCap, userLimit, - userLimitFactor); + int maxActiveApplicationsPerUser = + CSQueueUtils.computeMaxActiveApplicationsPerUser( + maxActiveAppsUsingAbsCap, userLimit, userLimitFactor); - this.queueInfo = recordFactory.newRecordInstance(QueueInfo.class); - this.queueInfo.setQueueName(queueName); this.queueInfo.setChildQueues(new ArrayList()); QueueState state = cs.getConfiguration().getState(getQueuePath()); @@ -198,14 +170,13 @@ public LeafQueue(CapacitySchedulerContext cs, Map acls = cs.getConfiguration().getAcls(getQueuePath()); - setupQueueConfigs( - cs.getClusterResource(), - capacity, absoluteCapacity, - maximumCapacity, absoluteMaxCapacity, - userLimit, userLimitFactor, + setupQueueConfigs(cs.getClusterResource(), capacity, absoluteCapacity, + maximumCapacity, absoluteMaxCapacity, userLimit, userLimitFactor, maxApplications, maxAMResourcePerQueuePercent, maxApplicationsPerUser, - maxActiveApplications, maxActiveApplicationsPerUser, state, acls, - cs.getConfiguration().getNodeLocalityDelay(), + maxActiveApplications, maxActiveApplicationsPerUser, state, acls, cs + .getConfiguration().getNodeLocalityDelay(), labels, + defaultLabelExpression, this.nodeLabelCapacities, + this.maximumNodeLabelCapacities, cs.getConfiguration().getReservationContinueLook()); if(LOG.isDebugEnabled()) { @@ -219,7 +190,7 @@ public LeafQueue(CapacitySchedulerContext cs, new TreeSet(applicationComparator); this.activeApplications = new TreeSet(applicationComparator); } - + // externalizing in method, to allow overriding protected float getCapacityFromConf() { return (float)scheduler.getConfiguration().getCapacity(getQueuePath()) / 100; @@ -234,19 +205,22 @@ protected synchronized void setupQueueConfigs( int maxApplicationsPerUser, int maxActiveApplications, int maxActiveApplicationsPerUser, QueueState state, Map acls, int nodeLocalityDelay, - boolean continueLooking) - { + Set labels, String defaultLabelExpression, + Map capacitieByLabel, + Map maximumCapacitiesByLabel, boolean continueLooking) + throws IOException { + super.setupQueueConfigs(clusterResource, capacity, absoluteCapacity, + maximumCapacity, absoluteMaxCapacity, state, acls, labels, + defaultLabelExpression, capacitieByLabel, maximumCapacitiesByLabel, + continueLooking); // Sanity check CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity); float absCapacity = getParent().getAbsoluteCapacity() * capacity; - CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absCapacity, absoluteMaxCapacity); + CSQueueUtils.checkAbsoluteCapacity(getQueueName(), absCapacity, + absoluteMaxCapacity); - this.capacity = capacity; this.absoluteCapacity = absCapacity; - this.maximumCapacity = maximumCapacity; - this.absoluteMaxCapacity = absoluteMaxCapacity; - this.userLimit = userLimit; this.userLimitFactor = userLimitFactor; @@ -256,14 +230,20 @@ protected synchronized void setupQueueConfigs( this.maxActiveApplications = maxActiveApplications; this.maxActiveApplicationsPerUser = maxActiveApplicationsPerUser; - - this.state = state; - this.acls = acls; - - this.queueInfo.setCapacity(this.capacity); - this.queueInfo.setMaximumCapacity(this.maximumCapacity); - this.queueInfo.setQueueState(this.state); + if (!SchedulerUtils.checkQueueLabelExpression(this.labels, + this.defaultLabelExpression)) { + throw new IOException("Invalid default label expression of " + + " queue=" + + queueInfo.getQueueName() + + " doesn't have permission to access all labels " + + "in default label expression. labelExpression of resource request=" + + (this.defaultLabelExpression == null ? "" + : this.defaultLabelExpression) + + ". Queue labels=" + + (queueInfo.getNodeLabels() == null ? "" : StringUtils.join(queueInfo + .getNodeLabels().iterator(), ','))); + } this.nodeLocalityDelay = nodeLocalityDelay; this.reservationsContinueLooking = continueLooking; @@ -272,11 +252,14 @@ protected synchronized void setupQueueConfigs( for (Map.Entry e : acls.entrySet()) { aclsString.append(e.getKey() + ":" + e.getValue().getAclString()); } - - // Update metrics - CSQueueUtils.updateQueueStatistics( - resourceCalculator, this, getParent(), clusterResource, - minimumAllocation); + + StringBuilder labelStrBuilder = new StringBuilder(); + if (labels != null) { + for (String s : labels) { + labelStrBuilder.append(s); + labelStrBuilder.append(","); + } + } LOG.info("Initializing " + queueName + "\n" + "capacity = " + capacity + @@ -331,50 +314,12 @@ protected synchronized void setupQueueConfigs( " [= configuredState ]" + "\n" + "acls = " + aclsString + " [= configuredAcls ]" + "\n" + + "nodeLocalityDelay = " + nodeLocalityDelay + "\n" + + "labels=" + labelStrBuilder.toString() + "\n" + "nodeLocalityDelay = " + nodeLocalityDelay + "\n" + "reservationsContinueLooking = " + reservationsContinueLooking + "\n"); } - - @Override - public synchronized float getCapacity() { - return capacity; - } - - @Override - public synchronized float getAbsoluteCapacity() { - return absoluteCapacity; - } - - @Override - public synchronized float getMaximumCapacity() { - return maximumCapacity; - } - - @Override - public synchronized float getAbsoluteMaximumCapacity() { - return absoluteMaxCapacity; - } - - @Override - public synchronized float getAbsoluteUsedCapacity() { - return absoluteUsedCapacity; - } - - @Override - public synchronized CSQueue getParent() { - return parent; - } - - @Override - public synchronized void setParent(CSQueue newParentQueue) { - this.parent = (ParentQueue)newParentQueue; - } - - @Override - public String getQueueName() { - return queueName; - } @Override public String getQueuePath() { @@ -385,22 +330,6 @@ public String getQueuePath() { * Used only by tests. */ @Private - public Resource getMinimumAllocation() { - return minimumAllocation; - } - - /** - * Used only by tests. - */ - @Private - public Resource getMaximumAllocation() { - return maximumAllocation; - } - - /** - * Used only by tests. - */ - @Private public float getMinimumAllocationFactor() { return minimumAllocationFactor; } @@ -435,45 +364,9 @@ public ActiveUsersManager getActiveUsersManager() { } @Override - public synchronized float getUsedCapacity() { - return usedCapacity; - } - - @Override - public synchronized Resource getUsedResources() { - return usedResources; - } - - @Override public List getChildQueues() { return null; } - - @Override - public synchronized void setUsedCapacity(float usedCapacity) { - this.usedCapacity = usedCapacity; - } - - @Override - public synchronized void setAbsoluteUsedCapacity(float absUsedCapacity) { - this.absoluteUsedCapacity = absUsedCapacity; - } - - /** - * Set maximum capacity - used only for testing. - * @param maximumCapacity new max capacity - */ - synchronized void setMaxCapacity(float maximumCapacity) { - // Sanity check - CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity); - float absMaxCapacity = - CSQueueUtils.computeAbsoluteMaximumCapacity( - maximumCapacity, getParent()); - CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absoluteCapacity, absMaxCapacity); - - this.maximumCapacity = maximumCapacity; - this.absoluteMaxCapacity = absMaxCapacity; - } /** * Set user limit - used only for testing. @@ -567,11 +460,6 @@ public int getNodeLocalityDelay() { return nodeLocalityDelay; } - @Private - boolean getReservationContinueLooking() { - return reservationsContinueLooking; - } - public String toString() { return queueName + ": " + "capacity=" + capacity + ", " + @@ -582,6 +470,11 @@ public String toString() { "numApps=" + getNumApplications() + ", " + "numContainers=" + getNumContainers(); } + + @VisibleForTesting + public synchronized void setNodeLabelManager(DynamicNodeLabelsManager mgr) { + this.labelManager = mgr; + } @VisibleForTesting public synchronized User getUser(String userName) { @@ -631,6 +524,10 @@ public synchronized void reinitialize( newlyParsedLeafQueue.getMaximumActiveApplicationsPerUser(), newlyParsedLeafQueue.state, newlyParsedLeafQueue.acls, newlyParsedLeafQueue.getNodeLocalityDelay(), + newlyParsedLeafQueue.labels, + newlyParsedLeafQueue.defaultLabelExpression, + newlyParsedLeafQueue.nodeLabelCapacities, + newlyParsedLeafQueue.maximumNodeLabelCapacities, newlyParsedLeafQueue.reservationsContinueLooking); // queue metrics are updated, more resource may be available @@ -639,19 +536,6 @@ public synchronized void reinitialize( } @Override - public boolean hasAccess(QueueACL acl, UserGroupInformation user) { - // Check if the leaf-queue allows access - synchronized (this) { - if (acls.get(acl).isUserAllowed(user)) { - return true; - } - } - - // Check if parent-queue allows access - return getParent().hasAccess(acl, user); - } - - @Override public void submitApplicationAttempt(FiCaSchedulerApp application, String userName) { // Careful! Locking order is important! @@ -747,7 +631,8 @@ private synchronized void activateApplications() { } } - private synchronized void addApplicationAttempt(FiCaSchedulerApp application, User user) { + private synchronized void addApplicationAttempt(FiCaSchedulerApp application, + User user) { // Accept user.submitApplication(); pendingApplications.add(application); @@ -783,7 +668,8 @@ public void finishApplicationAttempt(FiCaSchedulerApp application, String queue) getParent().finishApplicationAttempt(application, queue); } - public synchronized void removeApplicationAttempt(FiCaSchedulerApp application, User user) { + public synchronized void removeApplicationAttempt( + FiCaSchedulerApp application, User user) { boolean wasActive = activeApplications.remove(application); if (!wasActive) { pendingApplications.remove(application); @@ -828,6 +714,12 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, + " #applications=" + activeApplications.size()); } + // if our queue cannot access this node, just return + if (!SchedulerUtils.checkQueueAccessToNode(labels, + labelManager.getLabelsOnNode(node.getNodeID()))) { + return NULL_ASSIGNMENT; + } + // Check for reserved resources RMContainer reservedContainer = node.getReservedContainer(); if (reservedContainer != null) { @@ -888,7 +780,8 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, required); // Check queue max-capacity limit - if (!assignToQueue(clusterResource, required, application, true)) { + if (!assignToQueue(clusterResource, required, + labelManager.getLabelsOnNode(node.getNodeID()), application, true)) { return NULL_ASSIGNMENT; } @@ -920,7 +813,8 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, // Book-keeping // Note: Update headroom to account for current allocation too... - allocateResource(clusterResource, application, assigned); + allocateResource(clusterResource, application, assigned, + labelManager.getLabelsOnNode(node.getNodeID())); // Don't reset scheduling opportunities for non-local assignments // otherwise the app will be delayed for each non-local assignment. @@ -971,26 +865,38 @@ public synchronized CSAssignment assignContainers(Resource clusterResource, return new CSAssignment(Resources.none(), NodeType.NODE_LOCAL); } - - @Private - protected synchronized boolean assignToQueue(Resource clusterResource, - Resource required, FiCaSchedulerApp application, + synchronized boolean assignToQueue(Resource clusterResource, + Resource required, Set nodeLabels, FiCaSchedulerApp application, boolean checkReservations) { - - Resource potentialTotalResource = Resources.add(usedResources, required); - // Check how of the cluster's absolute capacity we are currently using... - float potentialNewCapacity = Resources.divide(resourceCalculator, - clusterResource, potentialTotalResource, clusterResource); - if (potentialNewCapacity > absoluteMaxCapacity) { + // Get label of this queue can access, it's (nodeLabel AND queueLabel) + Set labelCanAccess = + new HashSet(Sets.intersection(labels, nodeLabels)); + if (null == nodeLabels || nodeLabels.isEmpty()) { + // Any queue can always access any node without label + labelCanAccess.add(DynamicNodeLabelsManager.NO_LABEL); + } + + boolean canAssign = false; + for (String label : labelCanAccess) { + if (!usedResourcesByLabels.containsKey(label)) { + usedResourcesByLabels.put(label, Resources.createResource(0)); + } + + Resource potentialTotalCapacity = + Resources.add(usedResourcesByLabels.get(label), required); + + float potenialNewCapacity = + Resources.divide(resourceCalculator, clusterResource, + potentialTotalCapacity, + labelManager.getResourceByLabel(label, clusterResource)); // if enabled, check to see if could we potentially use this node instead // of a reserved node if the application has reserved containers if (this.reservationsContinueLooking && checkReservations) { - float potentialNewWithoutReservedCapacity = Resources.divide( resourceCalculator, clusterResource, - Resources.subtract(potentialTotalResource, - application.getCurrentReservation()), + Resources.subtract(potentialTotalCapacity, + application.getCurrentReservation()), clusterResource); if (potentialNewWithoutReservedCapacity <= absoluteMaxCapacity) { @@ -1013,33 +919,41 @@ protected synchronized boolean assignToQueue(Resource clusterResource, // we could potentially use this node instead of reserved node return true; } - } + + // otherwise, if any of the label doesn't beyond limit, we can allocate on this node + if (potenialNewCapacity <= getAbsoluteMaximumCapacityByNodeLabel(label)) { + canAssign = true; + break; + } + if (LOG.isDebugEnabled()) { LOG.debug(getQueueName() - + " usedResources: " + usedResources + + "Check assign to queue, label=" + label + + " usedResources: " + usedResourcesByLabels.get(label) + " clusterResources: " + clusterResource + " currentCapacity " + Resources.divide(resourceCalculator, clusterResource, - usedResources, clusterResource) + " required " + required - + " potentialNewCapacity: " + potentialNewCapacity + " ( " + usedResourcesByLabels.get(label), + labelManager.getResourceByLabel(label, clusterResource)) + + " potentialNewCapacity: " + potenialNewCapacity + " ( " + " max-capacity: " + absoluteMaxCapacity + ")"); } - return false; } - return true; + + return canAssign; } @Lock({LeafQueue.class, FiCaSchedulerApp.class}) private Resource computeUserLimitAndSetHeadroom( - FiCaSchedulerApp application, Resource clusterResource, Resource required) { - + FiCaSchedulerApp application, Resource clusterResource, Resource required) { String user = application.getUser(); - /** - * Headroom is min((userLimit, queue-max-cap) - consumed) + /** + * Headroom = min(userLimit, queue-max-cap, max-capacity-consider-label) - + * consumed */ Resource userLimit = // User limit @@ -1058,11 +972,21 @@ private Resource computeUserLimitAndSetHeadroom( absoluteMaxAvailCapacity, minimumAllocation); - Resource userConsumed = getUser(user).getConsumedResources(); - Resource headroom = + // Max possible capacity this queue can access, will consider label only. + Resource maxCapacityConsiderLabel = + labelManager == null ? clusterResource : labelManager.getQueueResource( + queueName, labels, clusterResource); + maxCapacityConsiderLabel = + Resources.roundDown(resourceCalculator, maxCapacityConsiderLabel, + minimumAllocation); + Resource userConsumed = getUser(user).getConsumedResources(); + + Resource headroom = Resources.subtract( - Resources.min(resourceCalculator, clusterResource, - userLimit, queueMaxCap), + Resources.min(resourceCalculator, clusterResource, + Resources.min(resourceCalculator, clusterResource, userLimit, + queueMaxCap), + maxCapacityConsiderLabel), userConsumed); if (LOG.isDebugEnabled()) { @@ -1191,7 +1115,8 @@ protected synchronized boolean assignToUser(Resource clusterResource, return true; } - boolean needContainers(FiCaSchedulerApp application, Priority priority, Resource required) { + boolean needContainers(FiCaSchedulerApp application, Priority priority, + Resource required) { int requiredContainers = application.getTotalRequiredResources(priority); int reservedContainers = application.getNumReservedContainers(priority); int starvation = 0; @@ -1221,10 +1146,9 @@ boolean needContainers(FiCaSchedulerApp application, Priority priority, Resource return (((starvation + requiredContainers) - reservedContainers) > 0); } - private CSAssignment assignContainersOnNode(Resource clusterResource, - FiCaSchedulerNode node, FiCaSchedulerApp application, - Priority priority, RMContainer reservedContainer, boolean needToUnreserve) { - + private CSAssignment assignContainersOnNode(Resource clusterResource, + FiCaSchedulerNode node, FiCaSchedulerApp application, Priority priority, + RMContainer reservedContainer, boolean needToUnreserve) { Resource assigned = Resources.none(); // Data-local @@ -1331,8 +1255,9 @@ protected boolean checkLimitsToReserve(Resource clusterResource, Resource userLimit = computeUserLimitAndSetHeadroom(application, clusterResource, capability); - // Check queue max-capacity limit - if (!assignToQueue(clusterResource, capability, application, false)) { + // Check queue max-capacity limit, + // TODO: Consider reservation on labels + if (!assignToQueue(clusterResource, capability, null, application, false)) { if (LOG.isDebugEnabled()) { LOG.debug("was going to reserve but hit queue limit"); } @@ -1479,6 +1404,20 @@ private Resource assignContainer(Resource clusterResource, FiCaSchedulerNode nod + " request=" + request + " type=" + type + " needToUnreserve= " + needToUnreserve); } + + // check if the resource request can access the label + if (!SchedulerUtils.checkNodeLabelExpression( + labelManager.getLabelsOnNode(node.getNodeID()), + request.getNodeLabelExpression())) { + // this is a reserved container, but we cannot allocate it now according + // to label not match. This can be caused by node label changed + // We should un-reserve this container. + if (rmContainer != null) { + unreserve(application, priority, node, rmContainer); + } + return Resources.none(); + } + Resource capability = request.getCapability(); Resource available = node.getAvailableResource(); Resource totalResource = node.getTotalResource(); @@ -1658,8 +1597,9 @@ public void completedContainer(Resource clusterResource, // Book-keeping if (removed) { - releaseResource(clusterResource, - application, container.getResource()); + releaseResource(clusterResource, application, + container.getResource(), + labelManager.getLabelsOnNode(node.getNodeID())); LOG.info("completedContainer" + " container=" + container + " queue=" + this + @@ -1675,14 +1615,11 @@ public void completedContainer(Resource clusterResource, } } - synchronized void allocateResource(Resource clusterResource, - SchedulerApplicationAttempt application, Resource resource) { - // Update queue metrics - Resources.addTo(usedResources, resource); - CSQueueUtils.updateQueueStatistics( - resourceCalculator, this, getParent(), clusterResource, minimumAllocation); - ++numContainers; - + synchronized void allocateResource(Resource clusterResource, + SchedulerApplicationAttempt application, Resource resource, + Set nodeLabels) { + super.allocateResource(clusterResource, resource, nodeLabels); + // Update user metrics String userName = application.getUser(); User user = getUser(userName); @@ -1703,14 +1640,9 @@ synchronized void allocateResource(Resource clusterResource, } synchronized void releaseResource(Resource clusterResource, - FiCaSchedulerApp application, Resource resource) { - // Update queue metrics - Resources.subtractFrom(usedResources, resource); - CSQueueUtils.updateQueueStatistics( - resourceCalculator, this, getParent(), clusterResource, - minimumAllocation); - --numContainers; - + FiCaSchedulerApp application, Resource resource, Set nodeLabels) { + super.releaseResource(clusterResource, resource, nodeLabels); + // Update user metrics String userName = application.getUser(); User user = getUser(userName); @@ -1724,6 +1656,8 @@ synchronized void releaseResource(Resource clusterResource, @Override public synchronized void updateClusterResource(Resource clusterResource) { + lastClusterResource = clusterResource; + // Update queue properties maxActiveApplications = CSQueueUtils.computeMaxActiveApplications( @@ -1756,11 +1690,6 @@ public synchronized void updateClusterResource(Resource clusterResource) { } } } - - @Override - public QueueMetrics getMetrics() { - return metrics; - } @VisibleForTesting public static class User { @@ -1820,7 +1749,8 @@ public void recoverContainer(Resource clusterResource, // Careful! Locking order is important! synchronized (this) { allocateResource(clusterResource, attempt, rmContainer.getContainer() - .getResource()); + .getResource(), labelManager.getLabelsOnNode(rmContainer + .getContainer().getNodeId())); } getParent().recoverContainer(clusterResource, attempt, rmContainer); } @@ -1858,7 +1788,8 @@ public void attachContainer(Resource clusterResource, FiCaSchedulerApp application, RMContainer rmContainer) { if (application != null) { allocateResource(clusterResource, application, rmContainer.getContainer() - .getResource()); + .getResource(), labelManager.getLabelsOnNode(rmContainer + .getContainer().getNodeId())); LOG.info("movedContainer" + " container=" + rmContainer.getContainer() + " resource=" + rmContainer.getContainer().getResource() + " queueMoveIn=" + this + " usedCapacity=" + getUsedCapacity() @@ -1874,7 +1805,8 @@ public void detachContainer(Resource clusterResource, FiCaSchedulerApp application, RMContainer rmContainer) { if (application != null) { releaseResource(clusterResource, application, rmContainer.getContainer() - .getResource()); + .getResource(), labelManager.getLabelsOnNode(rmContainer.getContainer() + .getNodeId())); LOG.info("movedContainer" + " container=" + rmContainer.getContainer() + " resource=" + rmContainer.getContainer().getResource() + " queueMoveOut=" + this + " usedCapacity=" + getUsedCapacity() @@ -1885,6 +1817,24 @@ public void detachContainer(Resource clusterResource, } } + @Override + public float getAbsActualCapacity() { + if (Resources.lessThanOrEqual(resourceCalculator, lastClusterResource, + lastClusterResource, Resources.none())) { + return absoluteCapacity; + } + + Resource resourceRespectLabels = + labelManager == null ? lastClusterResource : labelManager + .getQueueResource(queueName, labels, lastClusterResource); + float absActualCapacity = + Resources.divide(resourceCalculator, lastClusterResource, + resourceRespectLabels, lastClusterResource); + + return absActualCapacity > absoluteCapacity ? absoluteCapacity + : absActualCapacity; + } + public void setCapacity(float capacity) { this.capacity = capacity; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 011c99c..98335cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -21,8 +21,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -46,77 +46,36 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; -import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; +import com.google.common.collect.Sets; + @Private @Evolving -public class ParentQueue implements CSQueue { +public class ParentQueue extends AbstractCSQueue { private static final Log LOG = LogFactory.getLog(ParentQueue.class); - private CSQueue parent; - private final String queueName; - - private float capacity; - private float maximumCapacity; - private float absoluteCapacity; - private float absoluteMaxCapacity; - private float absoluteUsedCapacity = 0.0f; - - private float usedCapacity = 0.0f; - - protected final Set childQueues; - private final Comparator queueComparator; - - private Resource usedResources = Resources.createResource(0, 0); - + protected final Set childQueues; private final boolean rootQueue; - - private final Resource minimumAllocation; - - private volatile int numApplications; - private volatile int numContainers; - - private QueueState state; - - private final QueueMetrics metrics; - - private QueueInfo queueInfo; - - private Map acls = - new HashMap(); private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); - private final ResourceCalculator resourceCalculator; - - private boolean reservationsContinueLooking; - public ParentQueue(CapacitySchedulerContext cs, - String queueName, CSQueue parent, CSQueue old) { - minimumAllocation = cs.getMinimumResourceCapability(); - - this.parent = parent; - this.queueName = queueName; - this.rootQueue = (parent == null); - this.resourceCalculator = cs.getResourceCalculator(); + String queueName, CSQueue parent, CSQueue old) throws IOException { + super(cs, queueName, parent, old); - // must be called after parent and queueName is set - this.metrics = old != null ? old.getMetrics() : - QueueMetrics.forQueue(getQueuePath(), parent, - cs.getConfiguration().getEnableUserMetrics(), - cs.getConf()); + this.rootQueue = (parent == null); float rawCapacity = cs.getConfiguration().getCapacity(getQueuePath()); @@ -141,17 +100,14 @@ public ParentQueue(CapacitySchedulerContext cs, Map acls = cs.getConfiguration().getAcls(getQueuePath()); - - this.queueInfo = recordFactory.newRecordInstance(QueueInfo.class); - this.queueInfo.setQueueName(queueName); + this.queueInfo.setChildQueues(new ArrayList()); - setupQueueConfigs(cs.getClusterResource(), - capacity, absoluteCapacity, - maximumCapacity, absoluteMaxCapacity, state, acls, + setupQueueConfigs(cs.getClusterResource(), capacity, absoluteCapacity, + maximumCapacity, absoluteMaxCapacity, state, acls, labels, + defaultLabelExpression, nodeLabelCapacities, maximumNodeLabelCapacities, cs.getConfiguration().getReservationContinueLook()); - this.queueComparator = cs.getQueueComparator(); this.childQueues = new TreeSet(queueComparator); LOG.info("Initialized parent-queue " + queueName + @@ -159,41 +115,29 @@ public ParentQueue(CapacitySchedulerContext cs, ", fullname=" + getQueuePath()); } - protected synchronized void setupQueueConfigs( - Resource clusterResource, - float capacity, float absoluteCapacity, - float maximumCapacity, float absoluteMaxCapacity, + synchronized void setupQueueConfigs(Resource clusterResource, float capacity, + float absoluteCapacity, float maximumCapacity, float absoluteMaxCapacity, QueueState state, Map acls, - boolean continueLooking - ) { - // Sanity check - CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity); - CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absoluteCapacity, absoluteMaxCapacity); - - this.capacity = capacity; - this.absoluteCapacity = absoluteCapacity; - - this.maximumCapacity = maximumCapacity; - this.absoluteMaxCapacity = absoluteMaxCapacity; - - this.state = state; - - this.acls = acls; - - this.queueInfo.setCapacity(this.capacity); - this.queueInfo.setMaximumCapacity(this.maximumCapacity); - this.queueInfo.setQueueState(this.state); - - this.reservationsContinueLooking = continueLooking; - - StringBuilder aclsString = new StringBuilder(); + Set labels, String defaultLabelExpression, + Map nodeLabelCapacities, + Map maximumCapacitiesByLabel, boolean continueLooking) + throws IOException { + super.setupQueueConfigs(clusterResource, capacity, absoluteCapacity, + maximumCapacity, absoluteMaxCapacity, state, acls, labels, + defaultLabelExpression, nodeLabelCapacities, maximumCapacitiesByLabel, + continueLooking); + StringBuilder aclsString = new StringBuilder(); for (Map.Entry e : acls.entrySet()) { aclsString.append(e.getKey() + ":" + e.getValue().getAclString()); } - // Update metrics - CSQueueUtils.updateQueueStatistics( - resourceCalculator, this, parent, clusterResource, minimumAllocation); + StringBuilder labelStrBuilder = new StringBuilder(); + if (labels != null) { + for (String s : labels) { + labelStrBuilder.append(s); + labelStrBuilder.append(","); + } + } LOG.info(queueName + ", capacity=" + capacity + @@ -201,13 +145,13 @@ protected synchronized void setupQueueConfigs( ", maxCapacity=" + maximumCapacity + ", asboluteMaxCapacity=" + absoluteMaxCapacity + ", state=" + state + - ", acls=" + aclsString + + ", acls=" + aclsString + + ", labels=" + labelStrBuilder.toString() + "\n" + ", reservationsContinueLooking=" + reservationsContinueLooking); } private static float PRECISION = 0.0005f; // 0.05% precision void setChildQueues(Collection childQueues) { - // Validate float childCapacities = 0; for (CSQueue queue : childQueues) { @@ -221,6 +165,21 @@ void setChildQueues(Collection childQueues) { " capacity of " + childCapacities + " for children of queue " + queueName); } + // check label capacities + for (String nodeLabel : labelManager.getClusterNodeLabels()) { + float capacityByLabel = getCapacityByNodeLabel(nodeLabel); + // check children's labels + float sum = 0; + for (CSQueue queue : childQueues) { + sum += queue.getCapacityByNodeLabel(nodeLabel); + } + if ((capacityByLabel > 0 && Math.abs(1.0f - sum) > PRECISION) + || (capacityByLabel == 0) && (sum > 0)) { + throw new IllegalArgumentException("Illegal" + " capacity of " + + sum + " for children of queue " + queueName + + " for label=" + nodeLabel); + } + } this.childQueues.clear(); this.childQueues.addAll(childQueues); @@ -228,21 +187,6 @@ void setChildQueues(Collection childQueues) { LOG.debug("setChildQueues: " + getChildQueuesToPrint()); } } - - @Override - public synchronized CSQueue getParent() { - return parent; - } - - @Override - public synchronized void setParent(CSQueue newParentQueue) { - this.parent = (ParentQueue)newParentQueue; - } - - @Override - public String getQueueName() { - return queueName; - } @Override public String getQueuePath() { @@ -251,65 +195,6 @@ public String getQueuePath() { } @Override - public synchronized float getCapacity() { - return capacity; - } - - @Override - public synchronized float getAbsoluteCapacity() { - return absoluteCapacity; - } - - @Override - public float getAbsoluteMaximumCapacity() { - return absoluteMaxCapacity; - } - - @Override - public synchronized float getAbsoluteUsedCapacity() { - return absoluteUsedCapacity; - } - - @Override - public float getMaximumCapacity() { - return maximumCapacity; - } - - @Override - public ActiveUsersManager getActiveUsersManager() { - // Should never be called since all applications are submitted to LeafQueues - return null; - } - - @Override - public synchronized float getUsedCapacity() { - return usedCapacity; - } - - @Override - public synchronized Resource getUsedResources() { - return usedResources; - } - - @Override - public synchronized List getChildQueues() { - return new ArrayList(childQueues); - } - - public synchronized int getNumContainers() { - return numContainers; - } - - public synchronized int getNumApplications() { - return numApplications; - } - - @Override - public synchronized QueueState getState() { - return state; - } - - @Override public synchronized QueueInfo getQueueInfo( boolean includeChildQueues, boolean recursive) { queueInfo.setCurrentCapacity(usedCapacity); @@ -391,6 +276,10 @@ public synchronized void reinitialize( newlyParsedParentQueue.absoluteMaxCapacity, newlyParsedParentQueue.state, newlyParsedParentQueue.acls, + newlyParsedParentQueue.labels, + newlyParsedParentQueue.defaultLabelExpression, + newlyParsedParentQueue.nodeLabelCapacities, + newlyParsedParentQueue.maximumNodeLabelCapacities, newlyParsedParentQueue.reservationsContinueLooking); // Re-configure existing child queues and add new ones @@ -434,21 +323,6 @@ public synchronized void reinitialize( } return queuesMap; } - - @Override - public boolean hasAccess(QueueACL acl, UserGroupInformation user) { - synchronized (this) { - if (acls.get(acl).isUserAllowed(user)) { - return true; - } - } - - if (parent != null) { - return parent.hasAccess(acl, user); - } - - return false; - } @Override public void submitApplication(ApplicationId applicationId, String user, @@ -532,30 +406,6 @@ public synchronized void removeApplication(ApplicationId applicationId, " leaf-queue of parent: " + getQueueName() + " #applications: " + getNumApplications()); } - - @Override - public synchronized void setUsedCapacity(float usedCapacity) { - this.usedCapacity = usedCapacity; - } - - @Override - public synchronized void setAbsoluteUsedCapacity(float absUsedCapacity) { - this.absoluteUsedCapacity = absUsedCapacity; - } - - /** - * Set maximum capacity - used only for testing. - * @param maximumCapacity new max capacity - */ - synchronized void setMaxCapacity(float maximumCapacity) { - // Sanity check - CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity); - float absMaxCapacity = CSQueueUtils.computeAbsoluteMaximumCapacity(maximumCapacity, parent); - CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absoluteCapacity, absMaxCapacity); - - this.maximumCapacity = maximumCapacity; - this.absoluteMaxCapacity = absMaxCapacity; - } @Override public synchronized CSAssignment assignContainers( @@ -571,7 +421,8 @@ public synchronized CSAssignment assignContainers( boolean localNeedToUnreserve = false; // Are we over maximum-capacity for this queue? - if (!assignToQueue(clusterResource)) { + if (!assignToQueue(clusterResource, + labelManager.getLabelsOnNode(node.getNodeID()))) { // check to see if we could if we unreserve first localNeedToUnreserve = assignToQueueIfUnreserve(clusterResource); if (!localNeedToUnreserve) { @@ -589,7 +440,8 @@ public synchronized CSAssignment assignContainers( resourceCalculator, clusterResource, assignedToChild.getResource(), Resources.none())) { // Track resource utilization for the parent-queue - allocateResource(clusterResource, assignedToChild.getResource()); + allocateResource(clusterResource, assignedToChild.getResource(), + labelManager.getLabelsOnNode(node.getNodeID())); // Track resource utilization in this pass of the scheduler Resources.addTo(assignment.getResource(), assignedToChild.getResource()); @@ -628,22 +480,37 @@ public synchronized CSAssignment assignContainers( return assignment; } - private synchronized boolean assignToQueue(Resource clusterResource) { - // Check how of the cluster's absolute capacity we are currently using... - float currentCapacity = - Resources.divide( - resourceCalculator, clusterResource, - usedResources, clusterResource); + private synchronized boolean assignToQueue(Resource clusterResource, + Set nodeLabels) { + Set labelCanAccess = + new HashSet(Sets.intersection(labels, nodeLabels)); + if (nodeLabels.isEmpty()) { + // Any queue can always access any node without label + labelCanAccess.add(DynamicNodeLabelsManager.NO_LABEL); + } - if (currentCapacity >= absoluteMaxCapacity) { - LOG.info(getQueueName() + - " used=" + usedResources + - " current-capacity (" + currentCapacity + ") " + - " >= max-capacity (" + absoluteMaxCapacity + ")"); - return false; + boolean canAssign = false; + for (String label : labelCanAccess) { + if (!usedResourcesByLabels.containsKey(label)) { + usedResourcesByLabels.put(label, Resources.createResource(0)); + } + float currentLabelUsedCapacity = + Resources.divide(resourceCalculator, clusterResource, + usedResourcesByLabels.get(label), + labelManager.getResourceByLabel(label, clusterResource)); + // if any of the label doesn't beyond limit, we can allocate on this node + if (currentLabelUsedCapacity >= getAbsoluteMaximumCapacityByNodeLabel(label)) { + if (LOG.isDebugEnabled()) { + LOG.debug(getQueueName() + " used=" + usedResources + + " current-capacity (" + usedResourcesByLabels.get(label) + ") " + + " >= max-capacity (" + labelManager.getResourceByLabel(label, clusterResource) + ")"); + } + } else { + canAssign = true; + } } - return true; - + + return canAssign; } @@ -749,8 +616,8 @@ public void completedContainer(Resource clusterResource, // Careful! Locking order is important! // Book keeping synchronized (this) { - releaseResource(clusterResource, - rmContainer.getContainer().getResource()); + releaseResource(clusterResource, rmContainer.getContainer() + .getResource(), labelManager.getLabelsOnNode(node.getNodeID())); LOG.info("completedContainer" + " queue=" + getQueueName() + @@ -787,27 +654,6 @@ public void completedContainer(Resource clusterResource, } } - @Private - boolean getReservationContinueLooking() { - return reservationsContinueLooking; - } - - synchronized void allocateResource(Resource clusterResource, - Resource resource) { - Resources.addTo(usedResources, resource); - CSQueueUtils.updateQueueStatistics( - resourceCalculator, this, parent, clusterResource, minimumAllocation); - ++numContainers; - } - - synchronized void releaseResource(Resource clusterResource, - Resource resource) { - Resources.subtractFrom(usedResources, resource); - CSQueueUtils.updateQueueStatistics( - resourceCalculator, this, parent, clusterResource, minimumAllocation); - --numContainers; - } - @Override public synchronized void updateClusterResource(Resource clusterResource) { // Update all children @@ -821,10 +667,9 @@ public synchronized void updateClusterResource(Resource clusterResource) { } @Override - public QueueMetrics getMetrics() { - return metrics; + public synchronized List getChildQueues() { + return new ArrayList(childQueues); } - @Override public void recoverContainer(Resource clusterResource, @@ -834,12 +679,20 @@ public void recoverContainer(Resource clusterResource, } // Careful! Locking order is important! synchronized (this) { - allocateResource(clusterResource,rmContainer.getContainer().getResource()); + allocateResource(clusterResource, rmContainer.getContainer() + .getResource(), labelManager.getLabelsOnNode(rmContainer + .getContainer().getNodeId())); } if (parent != null) { parent.recoverContainer(clusterResource, attempt, rmContainer); } } + + @Override + public ActiveUsersManager getActiveUsersManager() { + // Should never be called since all applications are submitted to LeafQueues + return null; + } @Override public void collectSchedulerApplications( @@ -854,7 +707,8 @@ public void attachContainer(Resource clusterResource, FiCaSchedulerApp application, RMContainer rmContainer) { if (application != null) { allocateResource(clusterResource, rmContainer.getContainer() - .getResource()); + .getResource(), labelManager.getLabelsOnNode(rmContainer + .getContainer().getNodeId())); LOG.info("movedContainer" + " queueMoveIn=" + getQueueName() + " usedCapacity=" + getUsedCapacity() + " absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + " used=" + usedResources + " cluster=" @@ -870,7 +724,9 @@ public void attachContainer(Resource clusterResource, public void detachContainer(Resource clusterResource, FiCaSchedulerApp application, RMContainer rmContainer) { if (application != null) { - releaseResource(clusterResource, rmContainer.getContainer().getResource()); + releaseResource(clusterResource, + rmContainer.getContainer().getResource(), + labelManager.getLabelsOnNode(rmContainer.getContainer().getNodeId())); LOG.info("movedContainer" + " queueMoveOut=" + getQueueName() + " usedCapacity=" + getUsedCapacity() + " absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + " used=" + usedResources + " cluster=" @@ -882,6 +738,13 @@ public void detachContainer(Resource clusterResource, } } + @Override + public float getAbsActualCapacity() { + // for now, simply return actual capacity = guaranteed capacity for parent + // queue + return absoluteCapacity; + } + public Map getACLs() { return acls; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PlanQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PlanQueue.java index b87744d..d0251a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PlanQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/PlanQueue.java @@ -19,6 +19,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import org.apache.hadoop.yarn.api.records.Resource; @@ -47,7 +49,7 @@ private boolean showReservationsAsQueues; public PlanQueue(CapacitySchedulerContext cs, String queueName, - CSQueue parent, CSQueue old) { + CSQueue parent, CSQueue old) throws IOException { super(cs, queueName, parent, old); this.schedulerContext = cs; @@ -99,11 +101,14 @@ public synchronized void reinitialize(CSQueue newlyParsedQueue, } // Set new configs + // TODO: add support for node labels setupQueueConfigs(clusterResource, newlyParsedParentQueue.getCapacity(), newlyParsedParentQueue.getAbsoluteCapacity(), newlyParsedParentQueue.getMaximumCapacity(), newlyParsedParentQueue.getAbsoluteMaximumCapacity(), newlyParsedParentQueue.getState(), newlyParsedParentQueue.getACLs(), + new HashSet(), null, + new HashMap(), new HashMap(), newlyParsedParentQueue.getReservationContinueLooking()); updateQuotas(newlyParsedParentQueue.userLimit, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ReservationQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ReservationQueue.java index 8e61821..c4424b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ReservationQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ReservationQueue.java @@ -42,7 +42,7 @@ private int maxSystemApps; public ReservationQueue(CapacitySchedulerContext cs, String queueName, - PlanQueue parent) { + PlanQueue parent) throws IOException { super(cs, queueName, parent, null); maxSystemApps = cs.getConfiguration().getMaximumSystemApplications(); // the following parameters are common to all reservation in the plan diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index d4e043d..11027b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -270,4 +271,16 @@ public String toString() { return String.format("[%s, demand=%s, running=%s, share=%s, w=%s]", getName(), getDemand(), getResourceUsage(), fairShare, getWeights()); } + + @Override + public Set getAccessibleLabels() { + // TODO, add implementation for FS + return null; + } + + @Override + public String getDefaultLabelExpression() { + // TODO, add implementation for FS + return null; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index ea21c2b..612f2f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -25,6 +25,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentSkipListMap; import org.apache.commons.logging.Log; @@ -187,6 +188,18 @@ public void recoverContainer(Resource clusterResource, updateAppHeadRoom(schedulerAttempt); updateAvailableResourcesMetrics(); } + + @Override + public Set getAccessibleLabels() { + // TODO add implementation for FIFO scheduler + return null; + } + + @Override + public String getDefaultLabelExpression() { + // TODO add implementation for FIFO scheduler + return null; + } }; public FifoScheduler() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java index a53ad98..976a41e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java @@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.webapp.ResponseInfo; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.A; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL; @@ -49,8 +50,10 @@ static final float Q_STATS_POS = Q_MAX_WIDTH + 0.05f; static final String Q_END = "left:101%"; static final String Q_GIVEN = "left:0%;background:none;border:1px dashed rgba(0,0,0,0.25)"; + static final String Q_ACTUAL = "left:0%;background:none;border:1px dashed rgba(255,0,0,0.8)"; static final String Q_OVER = "background:rgba(255, 140, 0, 0.8)"; static final String Q_UNDER = "background:rgba(50, 205, 50, 0.8)"; + static final String Q_MAX_LESS_ACTUAL = "background: rgba(255, 255, 0, 0.3)"; @RequestScoped static class CSQInfo { @@ -120,7 +123,9 @@ protected void render(Block html) { _("Configured Max Capacity:", percent(lqinfo.getMaxCapacity() / 100)). _("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%"). _("Configured User Limit Factor:", String.format("%.1f", lqinfo.getUserLimitFactor())). - _r("Active users: ", activeUserList.toString()); + _("Active users: ", activeUserList.toString()). + _("Actual Absolute Capacity:", percent(lqinfo.getAbsActualCapacity() / 100)). + _r("Labels Can Access:", StringUtils.join(",", lqinfo.getLabels().getLabels())); html._(InfoBlock.class); @@ -147,18 +152,31 @@ public void render(Block html) { float absCap = info.getAbsoluteCapacity() / 100; float absMaxCap = info.getAbsoluteMaxCapacity() / 100; float absUsedCap = info.getAbsoluteUsedCapacity() / 100; - LI> li = ul. - li(). - a(_Q).$style(width(absMaxCap * Q_MAX_WIDTH)). - $title(join("Absolute Capacity:", percent(absCap))). - span().$style(join(Q_GIVEN, ";font-size:1px;", width(absCap/absMaxCap))). - _('.')._(). - span().$style(join(width(absUsedCap/absMaxCap), - ";font-size:1px;left:0%;", absUsedCap > absCap ? Q_OVER : Q_UNDER)). - _('.')._(). - span(".q", info.getQueuePath().substring(5))._(). - span().$class("qstats").$style(left(Q_STATS_POS)). - _(join(percent(used), " used"))._(); + float absActualCap = info.getAbsActualCapacity() / 100; + + A>> a = ul. + li().a(_Q).$style(width(absMaxCap * Q_MAX_WIDTH)). + $title(join("Absolute Capacity:", percent(absCap))); + + if (absActualCap < absCap) { + a = a.span().$style(join(width(1), + ";font-size:1px;left:0%;", Q_MAX_LESS_ACTUAL)). + _('.')._(); + } + + LI> li = a. + span().$style(join(absActualCap < absCap ? Q_ACTUAL :Q_GIVEN, + ";font-size:1px;", + (absActualCap < absCap ? + width(absActualCap / absMaxCap) : + width(absCap / absMaxCap)))). + _('.')._(). + span().$style(join(width(absUsedCap/absMaxCap), + ";font-size:1px;left:0%;", absUsedCap > absCap ? Q_OVER : Q_UNDER)). + _('.')._(). + span(".q", info.getQueuePath().substring(5))._(). + span().$class("qstats").$style(left(Q_STATS_POS)). + _(join(percent(used), " used"))._(); csqinfo.qinfo = info; if (info.getQueues() == null) { @@ -209,12 +227,16 @@ public void render(Block html) { span().$style("font-weight: bold")._("Legend:")._(). span().$class("qlegend ui-corner-all").$style(Q_GIVEN). _("Capacity")._(). + span().$class("qlegend ui-corner-all").$style(Q_ACTUAL). + _("Actual Capacity (< Capacity)")._(). span().$class("qlegend ui-corner-all").$style(Q_UNDER). _("Used")._(). span().$class("qlegend ui-corner-all").$style(Q_OVER). _("Used (over capacity)")._(). span().$class("qlegend ui-corner-all ui-state-default"). _("Max Capacity")._(). + span().$class("qlegend ui-corner-all").$style(Q_MAX_LESS_ACTUAL). + _("Max Capacity (< Capacity)")._(). _(). li(). a(_Q).$style(width(Q_MAX_WIDTH)). diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java index f10e255..b4447ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java @@ -66,6 +66,7 @@ protected void render(Block html) { TBODY> tbody = html.table("#nodes"). thead(). tr(). + th(".label", "Labels"). th(".rack", "Rack"). th(".state", "Node State"). th(".nodeaddress", "Node Address"). @@ -113,6 +114,7 @@ protected void render(Block html) { int usedMemory = (int)info.getUsedMemory(); int availableMemory = (int)info.getAvailableMemory(); TR>> row = tbody.tr(). + td(StringUtils.join(",", info.getLabels())). td(info.getRack()). td(info.getState()). td(info.getNodeId()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index 24a90bd..28886ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -132,6 +132,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.StatisticsItemInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelNamesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesToLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.BadRequestException; @@ -714,6 +719,160 @@ public Response updateAppState(AppState targetState, return Response.status(Status.OK).entity(ret).build(); } + + @GET + @Path("/labels/all-node-labels") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public LabelNamesInfo getLabels(@Context HttpServletRequest hsr) + throws AuthorizationException, IOException { + init(); + + LabelNamesInfo ret = + new LabelNamesInfo(rm.getRMContext().getNodeLabelManager().getClusterNodeLabels()); + + return ret; + } + + @GET + @Path("/labels/nodes-to-labels") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public NodesToLabelsInfo getNodesToLabels(@Context HttpServletRequest hsr, + @QueryParam("labels") Set labelsQuery) + throws AuthorizationException, IOException { + init(); + + NodesToLabelsInfo nodesToLabelsInfo = new NodesToLabelsInfo(); + + Map> nodesToLabels = + rm.getRMContext().getNodeLabelManager().getNodeLabels(); + + boolean filterLabels = false; + if (labelsQuery != null && !labelsQuery.isEmpty()) { + filterLabels = true; + } + + for (Map.Entry> nlEntry : nodesToLabels.entrySet()) { + Set nodeLabels = nlEntry.getValue(); + if (filterLabels) { + Set labelIntersect = new HashSet(nodeLabels); + labelIntersect.retainAll(labelsQuery); + if (!labelIntersect.isEmpty()) { + nodesToLabelsInfo.add(new NodeToLabelsInfo(nlEntry.getKey().toString(), + labelIntersect)); + } + } else { + nodesToLabelsInfo.add(new NodeToLabelsInfo(nlEntry.getKey().toString(), + nlEntry.getValue())); + } + } + + return nodesToLabelsInfo; + } + + @POST + @Path("/labels/add-node-labels") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public Response addLabels(final LabelNamesInfo newLabels, + @Context HttpServletRequest hsr) + throws Exception { + init(); + + UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true); + if (callerUGI == null) { + String msg = "Unable to obtain user name, user not authenticated"; + throw new AuthorizationException(msg); + } + if (!rm.getRMContext().getNodeLabelManager().checkAccess(callerUGI)) { + String msg = "User not authorized for this action " + + callerUGI.getShortUserName(); + throw new AuthorizationException(msg); + } + + rm.getRMContext().getNodeLabelManager() + .addToCluserNodeLabels(new HashSet(newLabels.getLabels())); + + return Response.status(Status.OK).build(); + + } + + @POST + @Path("/labels/remove-node-labels") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public Response removeLabels(final LabelNamesInfo oldLabels, + @Context HttpServletRequest hsr) + throws Exception { + init(); + + UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true); + if (callerUGI == null) { + String msg = "Unable to obtain user name, user not authenticated"; + throw new AuthorizationException(msg); + } + if (!rm.getRMContext().getNodeLabelManager().checkAccess(callerUGI)) { + String msg = "User not authorized for this action " + + callerUGI.getShortUserName(); + throw new AuthorizationException(msg); + } + + rm.getRMContext().getNodeLabelManager() + .removeFromClusterNodeLabels(new HashSet(oldLabels.getLabels())); + + return Response.status(Status.OK).build(); + + } + + @POST + @Path("/labels/set-node-to-labels") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public Response addLabels(NodesToLabelsInfo newNodesToLabelsInfo, + @Context HttpServletRequest hsr) + throws Exception { + init(); + + final Map> newNodeToLabels = + new HashMap>(); + + for (NodeToLabelsInfo nodeToLabelsInfo : + newNodesToLabelsInfo.getNodeToLabelsInfos()) { + //It's a list, the same node could be specified > once + Set labels = newNodeToLabels.get(nodeToLabelsInfo.getNode()); + if (labels == null) { + labels = new HashSet(); + String id = nodeToLabelsInfo.getNode(); + String hostName; + int port; + if (id.contains(":")) { + hostName = id.substring(0, id.indexOf(':')); + port = Integer.valueOf(id.substring(id.indexOf(':') + 1)); + } else { + hostName = id; + port = 0; + } + NodeId nodeId = NodeId.newInstance(hostName, port); + newNodeToLabels.put(nodeId, labels); + } + labels.addAll(nodeToLabelsInfo.getLabels()); + } + + UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true); + if (callerUGI == null) { + String msg = "Unable to obtain user name, user not authenticated"; + throw new AuthorizationException(msg); + } + + if (!rm.getRMContext().getNodeLabelManager().checkAccess(callerUGI)) { + String msg = "User not authorized for this action " + + callerUGI.getShortUserName(); + throw new AuthorizationException(msg); + } + + rm.getRMContext().getNodeLabelManager() + .replaceLabelsOnNode(newNodeToLabels); + + + return Response.status(Status.OK).build(); + + } protected Response killApp(RMApp app, UserGroupInformation callerUGI, HttpServletRequest hsr) throws IOException, InterruptedException { @@ -964,7 +1123,9 @@ protected ApplicationSubmissionContext createAppSubmissionContext( newApp.getCancelTokensWhenComplete(), newApp.getMaxAppAttempts(), createAppSubmissionContextResource(newApp), newApp.getApplicationType(), - newApp.getKeepContainersAcrossApplicationAttempts()); + newApp.getKeepContainersAcrossApplicationAttempts(), + newApp.getAppLabelExpression(), + newApp.getAMContainerLabelExpression()); appContext.setApplicationTags(newApp.getApplicationTags()); return appContext; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java index f7233e6..d8d93e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java @@ -71,6 +71,12 @@ @XmlElementWrapper(name = "application-tags") @XmlElement(name = "tag") Set tags; + + @XmlElement(name = "app-label-expression") + String appLabelExpression; + + @XmlElement(name = "am-container-label-expression") + String amContainerLabelExpression; public ApplicationSubmissionContextInfo() { applicationId = ""; @@ -83,6 +89,8 @@ public ApplicationSubmissionContextInfo() { keepContainers = false; applicationType = ""; tags = new HashSet(); + appLabelExpression = ""; + amContainerLabelExpression = ""; } public String getApplicationId() { @@ -132,6 +140,14 @@ public boolean getKeepContainersAcrossApplicationAttempts() { public Set getApplicationTags() { return tags; } + + public String getAppLabelExpression() { + return appLabelExpression; + } + + public String getAMContainerLabelExpression() { + return amContainerLabelExpression; + } public void setApplicationId(String applicationId) { this.applicationId = applicationId; @@ -182,5 +198,12 @@ public void setApplicationType(String applicationType) { public void setApplicationTags(Set tags) { this.tags = tags; } + + public void setAppLabelExpression(String appLabelExpression) { + this.appLabelExpression = appLabelExpression; + } + public void setAMContainerLabelExpression(String labelExpression) { + this.amContainerLabelExpression = labelExpression; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java index d5c84d8..fdfed87 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; +import java.util.Set; + import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @@ -49,6 +51,8 @@ protected QueueState state; protected CapacitySchedulerQueueInfoList queues; protected ResourceInfo resourcesUsed; + protected float absActualCapacity; + protected LabelNamesInfo labels = new LabelNamesInfo(); private boolean hideReservationQueues = false; CapacitySchedulerQueueInfo() { @@ -71,6 +75,14 @@ queueName = q.getQueueName(); state = q.getState(); resourcesUsed = new ResourceInfo(q.getUsedResources()); + absActualCapacity = cap(q.getAbsActualCapacity(), 0f, 1f) * 100; + + // add labels + Set labelSet = q.getAccessibleLabels(); + if (labelSet != null) { + labels = new LabelNamesInfo(labelSet); + } + if(q instanceof PlanQueue && !((PlanQueue)q).showReservationsAsQueues()) { hideReservationQueues = true; @@ -100,6 +112,10 @@ public float getAbsoluteMaxCapacity() { public float getAbsoluteUsedCapacity() { return absoluteUsedCapacity; } + + public float getAbsActualCapacity() { + return absActualCapacity; + } public int getNumApplications() { return numApplications; @@ -127,6 +143,10 @@ public CapacitySchedulerQueueInfoList getQueues() { public ResourceInfo getResourcesUsed() { return resourcesUsed; } + + public LabelNamesInfo getLabels() { + return labels; + } /** * Limit a value to a specified range. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelInfo.java new file mode 100644 index 0000000..bd1f926 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelInfo.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.*; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "labelInfo") +@XmlAccessorType(XmlAccessType.FIELD) +public class LabelInfo { + + protected String labelName; + protected ArrayList activeNodes = new ArrayList(); + protected ArrayList inactiveNodes = new ArrayList(); + + public LabelInfo() { + } // JAXB needs this + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelNamesInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelNamesInfo.java new file mode 100644 index 0000000..1468b78 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelNamesInfo.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.*; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "labelNamesInfo") +@XmlAccessorType(XmlAccessType.FIELD) +public class LabelNamesInfo { + + protected ArrayList label = new ArrayList(); + + public LabelNamesInfo() { + } // JAXB needs this + + public LabelNamesInfo(Set labelSet) { + label.addAll(labelSet); + } + + public ArrayList getLabels() { + return label; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsInfo.java new file mode 100644 index 0000000..481fe40 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsInfo.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.*; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "labelsInfo") +@XmlAccessorType(XmlAccessType.FIELD) +public class LabelsInfo { + + protected ArrayList labels = new ArrayList(); + + public LabelsInfo() { + } // JAXB needs this + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java index 73a2db1..1eb8856 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java @@ -18,6 +18,10 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Set; + import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @@ -45,6 +49,7 @@ protected long availMemoryMB; protected long usedVirtualCores; protected long availableVirtualCores; + protected ArrayList labels = new ArrayList(); public NodeInfo() { } // JAXB needs this @@ -70,6 +75,13 @@ public NodeInfo(RMNode ni, ResourceScheduler sched) { this.lastHealthUpdate = ni.getLastHealthReportTime(); this.healthReport = String.valueOf(ni.getHealthReport()); this.version = ni.getNodeManagerVersion(); + + // add labels + Set labelSet = ni.getNodeLabels(); + if (labelSet != null) { + labels.addAll(labelSet); + Collections.sort(labels); + } } public String getRack() { @@ -123,5 +135,9 @@ public long getUsedVirtualCores() { public long getAvailableVirtualCores() { return this.availableVirtualCores; } + + public ArrayList getLabels() { + return this.labels; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeToLabelsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeToLabelsInfo.java new file mode 100644 index 0000000..527d12c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeToLabelsInfo.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.*; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "nodeToLabelsInfo") +@XmlAccessorType(XmlAccessType.FIELD) +public class NodeToLabelsInfo { + + protected String node; + protected ArrayList labels = new ArrayList(); + + public NodeToLabelsInfo() { + } // JAXB needs this + + public NodeToLabelsInfo(String node) { + this.node = node; + } + + public NodeToLabelsInfo(String node, Set labels) { + this.node = node; + this.labels.addAll(labels); + } + + public String getNode() { + return node; + } + + public ArrayList getLabels() { + return labels; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesToLabelsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesToLabelsInfo.java new file mode 100644 index 0000000..3c2eb08 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesToLabelsInfo.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.*; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "nodesToLabelsInfo") +@XmlAccessorType(XmlAccessType.FIELD) +public class NodesToLabelsInfo { + + protected ArrayList nodeToLabelsInfos = new ArrayList(); + + public NodesToLabelsInfo() { + } // JAXB needs this + + public ArrayList getNodeToLabelsInfos() { + return nodeToLabelsInfos; + } + + public void add(NodeToLabelsInfo nodeToLabelInfo) { + nodeToLabelsInfos.add(nodeToLabelInfo); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java index ce5dd96..76ede39 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java @@ -147,6 +147,7 @@ public Resource getUsedResources() { return used; } + @SuppressWarnings("deprecation") public synchronized void submit() throws IOException, YarnException { ApplicationSubmissionContext context = recordFactory.newRecordInstance(ApplicationSubmissionContext.class); context.setApplicationId(this.applicationId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java index 91e1905..7af9966 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java @@ -135,34 +135,52 @@ public AllocateResponse schedule() throws Exception { public void addContainerToBeReleased(ContainerId containerId) { releases.add(containerId); } + public AllocateResponse allocate( String host, int memory, int numContainers, List releases) throws Exception { - List reqs = createReq(new String[]{host}, memory, 1, numContainers); + return allocate(host, memory, numContainers, releases, null); + } + + public AllocateResponse allocate( + String host, int memory, int numContainers, + List releases, String labelExpression) throws Exception { + List reqs = + createReq(new String[] { host }, memory, 1, numContainers, + labelExpression); return allocate(reqs, releases); } - + public List createReq(String[] hosts, int memory, int priority, int containers) throws Exception { + return createReq(hosts, memory, priority, containers, null); + } + + public List createReq(String[] hosts, int memory, int priority, + int containers, String labelExpression) throws Exception { List reqs = new ArrayList(); for (String host : hosts) { ResourceRequest hostReq = createResourceReq(host, memory, priority, - containers); + containers, labelExpression); reqs.add(hostReq); ResourceRequest rackReq = createResourceReq("/default-rack", memory, - priority, containers); + priority, containers, labelExpression); reqs.add(rackReq); } ResourceRequest offRackReq = createResourceReq(ResourceRequest.ANY, memory, - priority, containers); + priority, containers, labelExpression); reqs.add(offRackReq); return reqs; - } - + public ResourceRequest createResourceReq(String resource, int memory, int priority, int containers) throws Exception { + return createResourceReq(resource, memory, priority, containers, null); + } + + public ResourceRequest createResourceReq(String resource, int memory, int priority, + int containers, String labelExpression) throws Exception { ResourceRequest req = Records.newRecord(ResourceRequest.class); req.setResourceName(resource); req.setNumContainers(containers); @@ -172,6 +190,9 @@ public ResourceRequest createResourceReq(String resource, int memory, int priori Resource capability = Records.newRecord(Resource.class); capability.setMemory(memory); req.setCapability(capability); + if (labelExpression != null) { + req.setNodeLabelExpression(labelExpression); + } return req; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index 79f9098..228f200 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Set; import org.apache.hadoop.net.Node; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -202,7 +203,11 @@ public String getHealthReport() { public long getLastHealthReportTime() { return lastHealthReportTime; } - + + @Override + public Set getNodeLabels() { + return null; + } }; private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 4f5fdeb..b8378a8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -183,27 +183,43 @@ public MockAM waitForNewAMToLaunchAndRegister(ApplicationId appId, int attemptSi return launchAndRegisterAM(app, this, nm); } - public void waitForState(MockNM nm, ContainerId containerId, + public boolean waitForState(MockNM nm, ContainerId containerId, RMContainerState containerState) throws Exception { + // default is wait for 30,000 ms + return waitForState(nm, containerId, containerState, 30 * 1000); + } + + public boolean waitForState(MockNM nm, ContainerId containerId, + RMContainerState containerState, int timeoutMillisecs) throws Exception { RMContainer container = getResourceScheduler().getRMContainer(containerId); int timeoutSecs = 0; - while(container == null && timeoutSecs++ < 100) { + while(container == null && timeoutSecs++ < timeoutMillisecs / 100) { nm.nodeHeartbeat(true); container = getResourceScheduler().getRMContainer(containerId); System.out.println("Waiting for container " + containerId + " to be allocated."); Thread.sleep(100); + + if (timeoutMillisecs <= timeoutSecs * 100) { + return false; + } } Assert.assertNotNull("Container shouldn't be null", container); - timeoutSecs = 0; - while (!containerState.equals(container.getState()) && timeoutSecs++ < 40) { + while (!containerState.equals(container.getState()) + && timeoutSecs++ < timeoutMillisecs / 100) { System.out.println("Container : " + containerId + " State is : " + container.getState() + " Waiting for state : " + containerState); nm.nodeHeartbeat(true); - Thread.sleep(300); + Thread.sleep(100); + + if (timeoutMillisecs <= timeoutSecs * 100) { + return false; + } } + System.out.println("Container State is : " + container.getState()); Assert.assertEquals("Container state is not correct (timedout)", containerState, container.getState()); + return true; } // get new application id @@ -301,6 +317,7 @@ public RMApp submitApp(int masterMemory, String name, String user, isAppIdProvided, applicationId, 0, null); } + @SuppressWarnings("deprecation") public RMApp submitApp(int masterMemory, LogAggregationContext logAggregationContext) throws Exception { return submitApp(masterMemory, "", UserGroupInformation.getCurrentUser() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java index 58258ac..9f54de8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -34,9 +35,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -150,7 +153,7 @@ protected void submitApplication( this.rmContext.getScheduler(), this.rmContext.getApplicationMasterService(), submitTime, submissionContext.getApplicationType(), - submissionContext.getApplicationTags()); + submissionContext.getApplicationTags(), null); this.rmContext.getRMApps().put(submissionContext.getApplicationId(), application); //Do not send RMAppEventType.START event diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index 333d0cf..e146611 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl; + import static org.mockito.Matchers.isA; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; @@ -37,7 +38,6 @@ import java.util.concurrent.ConcurrentMap; import org.junit.Assert; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.Service; import org.apache.hadoop.yarn.MockApps; @@ -207,6 +207,7 @@ protected void addToCompletedApps(TestRMAppManager appMonitor, RMContext rmConte private ApplicationSubmissionContext asContext; private ApplicationId appId; + @SuppressWarnings("deprecation") @Before public void setUp() { long now = System.currentTimeMillis(); @@ -540,6 +541,7 @@ public void testRMAppSubmitDuplicateApplicationId() throws Exception { Assert.assertEquals("app state doesn't match", RMAppState.FINISHED, app.getState()); } + @SuppressWarnings("deprecation") @Test (timeout = 30000) public void testRMAppSubmitInvalidResourceRequest() throws Exception { asContext.setResource(Resources.createResource( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java index a288c57..5b20149 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java @@ -22,6 +22,7 @@ import static org.mockito.Mockito.when; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; + import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedExceptionAction; @@ -30,7 +31,6 @@ import java.util.Map; import org.junit.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -179,6 +179,7 @@ public void testApplicationACLs() throws Exception { verifyAdministerQueueUserAccess(); } + @SuppressWarnings("deprecation") private ApplicationId submitAppAndGetAppId(AccessControlList viewACL, AccessControlList modifyACL) throws Exception { SubmitApplicationRequest submitRequest = recordFactory diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 954e21d..a891094 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -44,13 +44,12 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CyclicBarrier; -import org.junit.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; @@ -87,7 +86,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; -import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -100,10 +98,11 @@ import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.ReservationDefinition; import org.apache.hadoop.yarn.api.records.ReservationId; +import org.apache.hadoop.yarn.api.records.ReservationRequest; import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter; import org.apache.hadoop.yarn.api.records.ReservationRequests; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.ReservationRequest; +import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; @@ -138,10 +137,10 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.Records; -import org.apache.hadoop.yarn.util.SystemClock; import org.apache.hadoop.yarn.util.UTCClock; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -333,7 +332,7 @@ public void handle(Event event) { mock(ApplicationSubmissionContext.class); YarnConfiguration config = new YarnConfiguration(); RMAppAttemptImpl rmAppAttemptImpl = new RMAppAttemptImpl(attemptId, - rmContext, yarnScheduler, null, asContext, config, false); + rmContext, yarnScheduler, null, asContext, config, false, null); ApplicationResourceUsageReport report = rmAppAttemptImpl .getApplicationResourceUsageReport(); assertEquals(report, RMServerUtils.DUMMY_APPLICATION_RESOURCE_USAGE_REPORT); @@ -1061,6 +1060,7 @@ private SubmitApplicationRequest mockSubmitAppRequest(ApplicationId appId, return mockSubmitAppRequest(appId, name, queue, tags, false); } + @SuppressWarnings("deprecation") private SubmitApplicationRequest mockSubmitAppRequest(ApplicationId appId, String name, String queue, Set tags, boolean unmanaged) { @@ -1150,26 +1150,32 @@ private RMAppImpl getRMApp(RMContext rmContext, YarnScheduler yarnScheduler, final long memorySeconds, final long vcoreSeconds) { ApplicationSubmissionContext asContext = mock(ApplicationSubmissionContext.class); when(asContext.getMaxAppAttempts()).thenReturn(1); - RMAppImpl app = spy(new RMAppImpl(applicationId3, rmContext, config, null, - null, queueName, asContext, yarnScheduler, null, - System.currentTimeMillis(), "YARN", null) { - @Override - public ApplicationReport createAndGetApplicationReport( - String clientUserName, boolean allowAccess) { - ApplicationReport report = super.createAndGetApplicationReport( - clientUserName, allowAccess); - ApplicationResourceUsageReport usageReport = - report.getApplicationResourceUsageReport(); - usageReport.setMemorySeconds(memorySeconds); - usageReport.setVcoreSeconds(vcoreSeconds); - report.setApplicationResourceUsageReport(usageReport); - return report; - } - }); + + RMAppImpl app = + spy(new RMAppImpl(applicationId3, rmContext, config, null, null, + queueName, asContext, yarnScheduler, null, + System.currentTimeMillis(), "YARN", null, + BuilderUtils.newResourceRequest( + RMAppAttemptImpl.AM_CONTAINER_PRIORITY, ResourceRequest.ANY, + Resource.newInstance(1024, 1), 1)){ + @Override + public ApplicationReport createAndGetApplicationReport( + String clientUserName, boolean allowAccess) { + ApplicationReport report = super.createAndGetApplicationReport( + clientUserName, allowAccess); + ApplicationResourceUsageReport usageReport = + report.getApplicationResourceUsageReport(); + usageReport.setMemorySeconds(memorySeconds); + usageReport.setVcoreSeconds(vcoreSeconds); + report.setApplicationResourceUsageReport(usageReport); + return report; + } + }); + ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(123456, 1), 1); RMAppAttemptImpl rmAppAttemptImpl = spy(new RMAppAttemptImpl(attemptId, - rmContext, yarnScheduler, null, asContext, config, false)); + rmContext, yarnScheduler, null, asContext, config, false, null)); Container container = Container.newInstance( ContainerId.newInstance(attemptId, 1), null, "", null, null, null); RMContainerImpl containerimpl = spy(new RMContainerImpl(container, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java index a0c2b01..7c62cfd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java @@ -28,6 +28,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.isA; import static org.mockito.Mockito.mock; @@ -37,12 +38,17 @@ import static org.mockito.Mockito.when; import java.util.ArrayList; +import java.util.Arrays; import java.util.Comparator; import java.util.Deque; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.NavigableSet; +import java.util.Queue; import java.util.Random; +import java.util.Set; import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; @@ -51,15 +57,18 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingMonitor; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.resource.Priority; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEventType; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; @@ -74,6 +83,11 @@ import org.junit.rules.TestName; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatcher; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import com.google.common.collect.ImmutableSet; public class TestProportionalCapacityPreemptionPolicy { @@ -100,6 +114,8 @@ ApplicationId.newInstance(TS, 4), 0); final ArgumentCaptor evtCaptor = ArgumentCaptor.forClass(ContainerPreemptEvent.class); + DynamicNodeLabelsManager labelManager = mock(DynamicNodeLabelsManager.class); + private Map> nodeToLabels; @Rule public TestName name = new TestName(); @@ -572,6 +588,239 @@ public void testAMResourcePercentForSkippedAMContainers() { setAMContainer = false; } + @SuppressWarnings("unchecked") + @Test + public void testIgnoreBecauseQueueCannotAccessSomeLabels() { + int[][] qData = new int[][]{ + // / A B C + { 100, 40, 40, 20 }, // abs + { 100, 100, 100, 100 }, // maxCap + { 100, 10, 60, 30 }, // used + { 0, 30, 0, 0 }, // pending + { 0, 0, 0, 0 }, // reserved + { 3, 1, 1, 1 }, // apps + { -1, 1, 1, 1 }, // req granularity + { 3, 0, 0, 0 }, // subqueues + }; + + DynamicNodeLabelsManager labelManager = mock(DynamicNodeLabelsManager.class); + when( + labelManager.getQueueResource(any(String.class), any(Set.class), + any(Resource.class))).thenReturn(Resource.newInstance(10, 0), + Resource.newInstance(100, 0), Resource.newInstance(10, 0)); + ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); + policy.setNodeLabelManager(labelManager); + policy.editSchedule(); + // don't correct imbalances without demand + verify(mDisp, never()).handle(isA(ContainerPreemptEvent.class)); + } + + @SuppressWarnings({ "rawtypes" }) + @Test + public void testPreemptContainerRespectLabels() { + /* + * A: yellow + * B: blue + * C: green, yellow + * D: red + * E: green + * + * All node has labels, so C should only preempt container from A/E + */ + int[][] qData = new int[][]{ + // / A B C D E + { 100, 20, 20, 20, 20, 20 }, // abs + { 100, 100, 100, 100, 100, 100 }, // maxCap + { 100, 25, 25, 0, 25, 25 }, // used + { 0, 0, 0, 20, 0, 0 }, // pending + { 0, 0, 0, 0, 0, 0 }, // reserved + { 5, 1, 1, 1, 1, 1 }, // apps + { -1, 1, 1, 1, 1, 1 }, // req granularity + { 5, 0, 0, 0, 0, 0 }, // subqueues + }; + + Set[] queueLabels = new Set[6]; + queueLabels[1] = ImmutableSet.of("yellow"); + queueLabels[2] = ImmutableSet.of("blue"); + queueLabels[3] = ImmutableSet.of("yellow", "green"); + queueLabels[4] = ImmutableSet.of("red"); + queueLabels[5] = ImmutableSet.of("green"); + + String[] hostnames = new String[] { "host1", "host2", "host3", "host4" }; + Set[] nodeLabels = new Set[4]; + nodeLabels[0] = ImmutableSet.of("yellow", "green"); + nodeLabels[1] = ImmutableSet.of("blue"); + nodeLabels[2] = ImmutableSet.of("red"); + nodeLabels[3] = ImmutableSet.of("yellow", "green"); + Resource[] nodeResources = + new Resource[] { + Resource.newInstance(25, 0), + Resource.newInstance(25, 0), + Resource.newInstance(25, 0), + Resource.newInstance(25, 0) }; + + Queue containerHosts = new LinkedList(); + addContainerHosts(containerHosts, "host1", 25); + addContainerHosts(containerHosts, "host2", 25); + addContainerHosts(containerHosts, "host3", 25); + addContainerHosts(containerHosts, "host4", 25); + + // build policy and run + ProportionalCapacityPreemptionPolicy policy = + buildPolicy(qData, queueLabels, hostnames, nodeLabels, nodeResources, + containerHosts); + policy.editSchedule(); + + // B,D don't have expected labels, will not preempt resource from them + verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appB))); + verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appD))); + + // A,E have expected resource, preempt resource from them + verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appA))); + verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appE))); + } + + @SuppressWarnings({ "rawtypes" }) + @Test + public void + testPreemptContainerRespectLabelsInHierarchyQueuesWithAvailableRes() { + /* + * A-E: (x) + * F: (y) + * + * All node has labels, so C should only preempt container from B/F + * + * Queue structure: + * root + * / \ + * A F + * / \ + * B E + * / \ + * C D + */ + int[][] qData = new int[][] { + // / A B C D E F + { 100, 60, 30, 15, 15, 30, 40 }, // abs + { 100, 100, 100, 100, 100, 100, 100 }, // maxCap + { 65, 65, 65, 10, 55, 0, 0 }, // used + { 0, 0, 0, 0, 0, 30, 0 }, // pending + { 0, 0, 0, 0, 0, 0, 0 }, // reserved + { 4, 3, 2, 1, 1, 1, 0 }, // apps + { -1, 1, 1, 1, 1, 1, 1 }, // req granularity + { 2, 2, 2, 0, 0, 0, 0 }, // subqueues + }; + + Set[] queueLabels = new Set[7]; + queueLabels[1] = ImmutableSet.of("x"); + queueLabels[2] = ImmutableSet.of("x"); + queueLabels[3] = ImmutableSet.of("x"); + queueLabels[4] = ImmutableSet.of("x"); + queueLabels[5] = ImmutableSet.of("x"); + queueLabels[6] = ImmutableSet.of("y"); + + String[] hostnames = new String[] { "host1", "host2", "host3" }; + Set[] nodeLabels = new Set[3]; + nodeLabels[0] = ImmutableSet.of("x"); + nodeLabels[1] = ImmutableSet.of("x"); + nodeLabels[2] = ImmutableSet.of("y"); + Resource[] nodeResources = + new Resource[] { Resource.newInstance(30, 0), + Resource.newInstance(40, 0), Resource.newInstance(30, 0) }; + + Queue containerHosts = new LinkedList(); + addContainerHosts(containerHosts, "host1", 30); + addContainerHosts(containerHosts, "host2", 35); + + // build policy and run + ProportionalCapacityPreemptionPolicy policy = + buildPolicy(qData, queueLabels, hostnames, nodeLabels, nodeResources, + containerHosts); + policy.editSchedule(); + + // B,D don't have expected labels, will not preempt resource from them + verify(mDisp, times(0)).handle(argThat(new IsPreemptionRequestFor(appA))); + + // A,E have expected resource, preempt resource from them + // because of real->integer, it is possible preempted 23 or 25 containers + // from B + verify(mDisp, times(25)).handle(argThat(new IsPreemptionRequestFor(appB))); + } + + @SuppressWarnings({ "rawtypes" }) + @Test + public void testPreemptContainerRespectLabelsInHierarchyQueues() { + /* + * A: + * B: yellow + * C: blue + * D: green, yellow + * E: + * F: green + * + * All node has labels, so C should only preempt container from B/F + * + * Queue structure: + * root + * / | \ + * A D E + * / \ \ + * B C F + */ + int[][] qData = new int[][] { + // / A B C D E F + { 100, 50, 25, 25, 25, 25, 25 }, // abs + { 100, 100, 100, 100, 100, 100, 100 }, // maxCap + { 100, 60, 30, 30, 0, 40, 40 }, // used + { 0, 0, 0, 0, 25, 0, 0 }, // pending + { 0, 0, 0, 0, 0, 0, 0 }, // reserved + { 4, 2, 1, 1, 1, 1, 1 }, // apps + { -1, 1, 1, 1, 1, 1, 1 }, // req granularity + { 3, 2, 0, 0, 0, 1, 0 }, // subqueues + }; + + Set[] queueLabels = new Set[7]; + queueLabels[2] = ImmutableSet.of("yellow"); // B + queueLabels[3] = ImmutableSet.of("blue"); // C + queueLabels[4] = ImmutableSet.of("yellow", "green"); // D + queueLabels[6] = ImmutableSet.of("green"); // F + + String[] hostnames = new String[] { "host1", "host2", "host3" }; + Set[] nodeLabels = new Set[3]; + nodeLabels[0] = ImmutableSet.of("blue"); + nodeLabels[1] = ImmutableSet.of("yellow", "green"); + nodeLabels[2] = ImmutableSet.of("yellow", "green"); + Resource[] nodeResources = + new Resource[] { Resource.newInstance(30, 0), + Resource.newInstance(40, 0), Resource.newInstance(30, 0) }; + + Queue containerHosts = new LinkedList(); + addContainerHosts(containerHosts, "host2", 30); + addContainerHosts(containerHosts, "host1", 30); + addContainerHosts(containerHosts, "host2", 10); + addContainerHosts(containerHosts, "host3", 30); + + // build policy and run + ProportionalCapacityPreemptionPolicy policy = + buildPolicy(qData, queueLabels, hostnames, nodeLabels, nodeResources, + containerHosts); + policy.editSchedule(); + + // B,D don't have expected labels, will not preempt resource from them + verify(mDisp, times(0)).handle(argThat(new IsPreemptionRequestFor(appB))); + + // A,E have expected resource, preempt resource from them + verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appA))); + verify(mDisp, times(15)).handle(argThat(new IsPreemptionRequestFor(appD))); + } + + private void addContainerHosts(Queue containerHosts, String host, + int times) { + for (int i = 0; i < times; i++) { + containerHosts.offer(host); + } + } + static class IsPreemptionRequestFor extends ArgumentMatcher { private final ApplicationAttemptId appAttId; @@ -594,20 +843,68 @@ public String toString() { return appAttId.toString(); } } - + ProportionalCapacityPreemptionPolicy buildPolicy(int[][] qData) { + return buildPolicy(qData, null, null, null, null, null); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + ProportionalCapacityPreemptionPolicy buildPolicy(int[][] qData, Set[] labels, + String[] hostnames, Set[] nodeLabels, Resource[] nodeResources, + Queue containerHosts) { + nodeToLabels = new HashMap>(); + ProportionalCapacityPreemptionPolicy policy = - new ProportionalCapacityPreemptionPolicy(conf, mDisp, mCS, mClock); - ParentQueue mRoot = buildMockRootQueue(rand, qData); + new ProportionalCapacityPreemptionPolicy(conf, mDisp, mCS, mClock, + labelManager); + ParentQueue mRoot = buildMockRootQueue(rand, labels, containerHosts, qData); when(mCS.getRootQueue()).thenReturn(mRoot); Resource clusterResources = Resource.newInstance(leafAbsCapacities(qData[0], qData[7]), 0); when(mCS.getClusterResource()).thenReturn(clusterResources); + // by default, queue's resource equals clusterResource when no label exists + when( + labelManager.getQueueResource(any(String.class), any(Set.class), + any(Resource.class))).thenReturn(clusterResources); + Mockito.doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + NodeId nodeId = (NodeId) invocation.getArguments()[0]; + return nodeToLabels.get(nodeId); + } + }).when(labelManager).getLabelsOnNode(any(NodeId.class)); + when(labelManager.getNodeLabels()).thenReturn(nodeToLabels); + + // mock scheduler node + if (hostnames == null) { + SchedulerNode node = mock(SchedulerNode.class); + when(node.getNodeName()).thenReturn("mock_host"); + when(node.getTotalResource()).thenReturn(clusterResources); + when(mCS.getSchedulerNodes()).thenReturn(Arrays.asList(node)); + } else { + List schedulerNodes = new ArrayList(); + + for (int i = 0; i < hostnames.length; i++) { + String hostname = hostnames[i]; + Set nLabels = nodeLabels[i]; + Resource res = nodeResources[i]; + + SchedulerNode node = mock(SchedulerNode.class); + when(node.getNodeName()).thenReturn(hostname); + when(node.getTotalResource()).thenReturn(res); + nodeToLabels.put(NodeId.newInstance(hostname, 1), nLabels); + schedulerNodes.add(node); + } + when(mCS.getSchedulerNodes()).thenReturn(schedulerNodes); + } + return policy; } - ParentQueue buildMockRootQueue(Random r, int[]... queueData) { + @SuppressWarnings({ "rawtypes", "unchecked" }) + ParentQueue buildMockRootQueue(Random r, Set[] queueLabels, + Queue containerHosts, int[]... queueData) { int[] abs = queueData[0]; int[] maxCap = queueData[1]; int[] used = queueData[2]; @@ -617,14 +914,30 @@ ParentQueue buildMockRootQueue(Random r, int[]... queueData) { int[] gran = queueData[6]; int[] queues = queueData[7]; - return mockNested(abs, maxCap, used, pending, reserved, apps, gran, queues); + return mockNested(abs, maxCap, used, pending, reserved, apps, gran, queues, + queueLabels, containerHosts); } - + ParentQueue mockNested(int[] abs, int[] maxCap, int[] used, int[] pending, int[] reserved, int[] apps, int[] gran, int[] queues) { + return mockNested(abs, maxCap, used, pending, reserved, apps, gran, queues, + null, null); + } + + @SuppressWarnings("unchecked") + ParentQueue mockNested(int[] abs, int[] maxCap, int[] used, + int[] pending, int[] reserved, int[] apps, int[] gran, int[] queues, + Set[] queueLabels, Queue containerLabels) { + if (queueLabels == null) { + queueLabels = new Set[abs.length]; + for (int i = 0; i < queueLabels.length; i++) { + queueLabels[i] = null; + } + } + float tot = leafAbsCapacities(abs, queues); Deque pqs = new LinkedList(); - ParentQueue root = mockParentQueue(null, queues[0], pqs); + ParentQueue root = mockParentQueue(null, queues[0], pqs, queueLabels[0]); when(root.getQueueName()).thenReturn("/"); when(root.getAbsoluteUsedCapacity()).thenReturn(used[0] / tot); when(root.getAbsoluteCapacity()).thenReturn(abs[0] / tot); @@ -635,9 +948,11 @@ ParentQueue mockNested(int[] abs, int[] maxCap, int[] used, final ParentQueue p = pqs.removeLast(); final String queueName = "queue" + ((char)('A' + i - 1)); if (queues[i] > 0) { - q = mockParentQueue(p, queues[i], pqs); + q = mockParentQueue(p, queues[i], pqs, queueLabels[i]); } else { - q = mockLeafQueue(p, tot, i, abs, used, pending, reserved, apps, gran); + q = + mockLeafQueue(p, tot, i, abs, used, pending, reserved, apps, gran, + queueLabels[i], containerLabels); } when(q.getParent()).thenReturn(p); when(q.getQueueName()).thenReturn(queueName); @@ -650,7 +965,7 @@ ParentQueue mockNested(int[] abs, int[] maxCap, int[] used, } ParentQueue mockParentQueue(ParentQueue p, int subqueues, - Deque pqs) { + Deque pqs, Set labels) { ParentQueue pq = mock(ParentQueue.class); List cqs = new ArrayList(); when(pq.getChildQueues()).thenReturn(cqs); @@ -663,11 +978,16 @@ ParentQueue mockParentQueue(ParentQueue p, int subqueues, return pq; } - LeafQueue mockLeafQueue(ParentQueue p, float tot, int i, int[] abs, - int[] used, int[] pending, int[] reserved, int[] apps, int[] gran) { + LeafQueue mockLeafQueue(ParentQueue p, float tot, int i, int[] abs, + int[] used, int[] pending, int[] reserved, int[] apps, int[] gran, + Set queueLabels, + Queue containerLabels) { LeafQueue lq = mock(LeafQueue.class); when(lq.getTotalResourcePending()).thenReturn( Resource.newInstance(pending[i], 0)); + if (queueLabels != null) { + when(lq.getAccessibleLabels()).thenReturn(queueLabels); + } // consider moving where CapacityScheduler::comparator accessible NavigableSet qApps = new TreeSet( new Comparator() { @@ -683,7 +1003,8 @@ public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) { int aPending = pending[i] / apps[i]; int aReserve = reserved[i] / apps[i]; for (int a = 0; a < apps[i]; ++a) { - qApps.add(mockApp(i, appAlloc, aUsed, aPending, aReserve, gran[i])); + qApps.add(mockApp(i, appAlloc, aUsed, aPending, aReserve, gran[i], + containerLabels)); ++appAlloc; } } @@ -696,7 +1017,7 @@ public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) { } FiCaSchedulerApp mockApp(int qid, int id, int used, int pending, int reserved, - int gran) { + int gran, Queue containerHosts) { FiCaSchedulerApp app = mock(FiCaSchedulerApp.class); ApplicationId appId = ApplicationId.newInstance(TS, id); @@ -715,23 +1036,35 @@ FiCaSchedulerApp mockApp(int qid, int id, int used, int pending, int reserved, List cLive = new ArrayList(); for (int i = 0; i < used; i += gran) { - if(setAMContainer && i == 0){ - cLive.add(mockContainer(appAttId, cAlloc, unit, 0)); - }else{ - cLive.add(mockContainer(appAttId, cAlloc, unit, 1)); + if (setAMContainer && i == 0) { + cLive.add(mockContainer(appAttId, cAlloc, unit, 0, + containerHosts == null ? null : containerHosts.remove())); + } else { + cLive.add(mockContainer(appAttId, cAlloc, unit, 1, + containerHosts == null ? null : containerHosts.remove())); } ++cAlloc; } when(app.getLiveContainers()).thenReturn(cLive); return app; } - + RMContainer mockContainer(ApplicationAttemptId appAttId, int id, Resource r, int priority) { + return mockContainer(appAttId, id, r, priority, null); + } + + RMContainer mockContainer(ApplicationAttemptId appAttId, int id, + Resource r, int priority, String host) { ContainerId cId = ContainerId.newInstance(appAttId, id); Container c = mock(Container.class); when(c.getResource()).thenReturn(r); when(c.getPriority()).thenReturn(Priority.create(priority)); + if (host != null) { + when(c.getNodeId()).thenReturn(NodeId.newInstance(host, 1)); + } else { + when(c.getNodeId()).thenReturn(NodeId.newInstance("mock_host", 1)); + } RMContainer mC = mock(RMContainer.class); when(mC.getContainerId()).thenReturn(cId); when(mC.getContainer()).thenReturn(c); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/DummyDynamicNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/DummyDynamicNodeLabelsManager.java new file mode 100644 index 0000000..f1115c7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/DummyDynamicNodeLabelsManager.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.nodelabels; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.event.InlineDispatcher; +import org.apache.hadoop.yarn.nodelabels.NodeLabelsStore; + +public class DummyDynamicNodeLabelsManager extends DynamicNodeLabelsManager { + Map> lastNodeToLabels = null; + Collection lastAddedlabels = null; + Collection lastRemovedlabels = null; + + @Override + public void initNodeLabelStore(Configuration conf) { + this.store = new NodeLabelsStore(this) { + + @Override + public void recover() throws IOException { + // do nothing + } + + @Override + public void persistRemovingLabels(Collection labels) + throws IOException { + // do nothing + } + + @Override + public void persistNodeToLabelsChanges( + Map> nodeToLabels) throws IOException { + // do nothing + } + + @Override + public void persistAddingLabels(Set label) throws IOException { + // do nothing + } + }; + } + + @Override + protected void initDispatcher(Configuration conf) { + super.dispatcher = new InlineDispatcher(); + } + + @Override + protected void startDispatcher() { + // do nothing + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestDynamicNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestDynamicNodeLabelsManager.java new file mode 100644 index 0000000..d4b3e0c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestDynamicNodeLabelsManager.java @@ -0,0 +1,367 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.nodelabels; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.nodelabels.NodeLabelTestBase; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; + +public class TestDynamicNodeLabelsManager extends NodeLabelTestBase { + private final Resource EMPTY_RESOURCE = Resource.newInstance(0, 0); + private final Resource SMALL_NODE = Resource.newInstance(100, 0); + private final Resource LARGE_NODE = Resource.newInstance(1000, 0); + + DummyDynamicNodeLabelsManager mgr = null; + + @Before + public void before() { + mgr = new DummyDynamicNodeLabelsManager(); + mgr.init(new Configuration()); + mgr.start(); + } + + @After + public void after() { + mgr.stop(); + } + + @Test(timeout = 5000) + public void testNodeActiveDeactiveUpdate() throws Exception { + mgr.addToCluserNodeLabels(toSet("p1", "p2", "p3")); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"), + toNodeId("n2"), toSet("p2"), toNodeId("n3"), toSet("p3"))); + + Assert.assertEquals(mgr.getResourceByLabel("p1", null), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getResourceByLabel("p2", null), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getResourceByLabel("p3", null), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getResourceByLabel(DynamicNodeLabelsManager.NO_LABEL, null), + EMPTY_RESOURCE); + + // active two NM to n1, one large and one small + mgr.activateNode(NodeId.newInstance("n1", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("n1", 2), LARGE_NODE); + Assert.assertEquals(mgr.getResourceByLabel("p1", null), + Resources.add(SMALL_NODE, LARGE_NODE)); + + // change the large NM to small, check if resource updated + mgr.updateNodeResource(NodeId.newInstance("n1", 2), SMALL_NODE); + Assert.assertEquals(mgr.getResourceByLabel("p1", null), + Resources.multiply(SMALL_NODE, 2)); + + // deactive one NM, and check if resource updated + mgr.deactivateNode(NodeId.newInstance("n1", 1)); + Assert.assertEquals(mgr.getResourceByLabel("p1", null), SMALL_NODE); + + // continus deactive, check if resource updated + mgr.deactivateNode(NodeId.newInstance("n1", 2)); + Assert.assertEquals(mgr.getResourceByLabel("p1", null), EMPTY_RESOURCE); + + // Add two NM to n1 back + mgr.activateNode(NodeId.newInstance("n1", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("n1", 2), LARGE_NODE); + + // And remove p1, now the two NM should come to default label, + mgr.removeFromClusterNodeLabels(ImmutableSet.of("p1")); + Assert.assertEquals(mgr.getResourceByLabel(DynamicNodeLabelsManager.NO_LABEL, null), + Resources.add(SMALL_NODE, LARGE_NODE)); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 5000) + public void testUpdateNodeLabelWithActiveNode() throws Exception { + mgr.addToCluserNodeLabels(toSet("p1", "p2", "p3")); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p1"), + toNodeId("n2"), toSet("p2"), toNodeId("n3"), toSet("p3"))); + + // active two NM to n1, one large and one small + mgr.activateNode(NodeId.newInstance("n1", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("n2", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("n3", 1), SMALL_NODE); + + // change label of n1 to p2 + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("n1"), toSet("p2"))); + Assert.assertEquals(mgr.getResourceByLabel("p1", null), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getResourceByLabel("p2", null), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getResourceByLabel("p3", null), SMALL_NODE); + + // add more labels + mgr.addToCluserNodeLabels(toSet("p4", "p5", "p6")); + mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n4"), toSet("p1"), + toNodeId("n5"), toSet("p2"), toNodeId("n6"), toSet("p3"), + toNodeId("n7"), toSet("p4"), toNodeId("n8"), toSet("p5"))); + + // now node -> label is, + // p1 : n4 + // p2 : n1, n2, n5 + // p3 : n3, n6 + // p4 : n7 + // p5 : n8 + // no-label : n9 + + // active these nodes + mgr.activateNode(NodeId.newInstance("n4", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("n5", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("n6", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("n7", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("n8", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("n9", 1), SMALL_NODE); + + // check varibles + Assert.assertEquals(mgr.getResourceByLabel("p1", null), SMALL_NODE); + Assert.assertEquals(mgr.getResourceByLabel("p2", null), + Resources.multiply(SMALL_NODE, 3)); + Assert.assertEquals(mgr.getResourceByLabel("p3", null), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getResourceByLabel("p4", null), + Resources.multiply(SMALL_NODE, 1)); + Assert.assertEquals(mgr.getResourceByLabel("p5", null), + Resources.multiply(SMALL_NODE, 1)); + Assert.assertEquals(mgr.getResourceByLabel(DynamicNodeLabelsManager.NO_LABEL, null), + Resources.multiply(SMALL_NODE, 1)); + + // change a bunch of nodes -> labels + // n4 -> p2 + // n7 -> empty + // n5 -> p1 + // n8 -> empty + // n9 -> p1 + // + // now become: + // p1 : n5, n9 + // p2 : n1, n2, n4 + // p3 : n3, n6 + // p4 : [ ] + // p5 : [ ] + // no label: n8, n7 + mgr.replaceLabelsOnNode((Map) ImmutableMap.of(toNodeId("n4"), toSet("p2"), + toNodeId("n7"), DynamicNodeLabelsManager.EMPTY_STRING_SET, toNodeId("n5"), + toSet("p1"), toNodeId("n8"), DynamicNodeLabelsManager.EMPTY_STRING_SET, + toNodeId("n9"), toSet("p1"))); + + // check varibles + Assert.assertEquals(mgr.getResourceByLabel("p1", null), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getResourceByLabel("p2", null), + Resources.multiply(SMALL_NODE, 3)); + Assert.assertEquals(mgr.getResourceByLabel("p3", null), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getResourceByLabel("p4", null), + Resources.multiply(SMALL_NODE, 0)); + Assert.assertEquals(mgr.getResourceByLabel("p5", null), + Resources.multiply(SMALL_NODE, 0)); + Assert.assertEquals(mgr.getResourceByLabel("", null), + Resources.multiply(SMALL_NODE, 2)); + } + + @Test(timeout=5000) + public void testGetQueueResource() throws Exception { + Resource clusterResource = Resource.newInstance(9999, 1); + + /* + * Node->Labels: + * host1 : red, blue + * host2 : blue, yellow + * host3 : yellow + * host4 : + */ + mgr.addToCluserNodeLabels(toSet("red", "blue", "yellow")); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("host1"), + toSet("red", "blue"))); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("host2"), + toSet("blue", "yellow"))); + mgr.replaceLabelsOnNode(ImmutableMap.of(toNodeId("host3"), toSet("yellow"))); + + // active two NM to n1, one large and one small + mgr.activateNode(NodeId.newInstance("host1", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("host2", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("host3", 1), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("host4", 1), SMALL_NODE); + + // reinitialize queue + Set q1Label = toSet("red", "blue"); + Set q2Label = toSet("blue", "yellow"); + Set q3Label = toSet("yellow"); + Set q4Label = DynamicNodeLabelsManager.EMPTY_STRING_SET; + Set q5Label = toSet(DynamicNodeLabelsManager.ANY); + + Map> queueToLabels = new HashMap>(); + queueToLabels.put("Q1", q1Label); + queueToLabels.put("Q2", q2Label); + queueToLabels.put("Q3", q3Label); + queueToLabels.put("Q4", q4Label); + queueToLabels.put("Q5", q5Label); + + mgr.reinitializeQueueLabels(queueToLabels); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 4), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 1), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + mgr.removeLabelsFromNode(ImmutableMap.of(toNodeId("host1"), toSet("red"), + toNodeId("host2"), toSet("blue", "yellow"))); + mgr.addLabelsToNode(ImmutableMap.of(toNodeId("host3"), toSet("red"))); + /* + * Check resource after changes some labels + * Node->Labels: + * host1 : blue (was: red, blue) + * host2 : (was: blue, yellow) + * host3 : red, yellow (was: yellow) + * host4 : + */ + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 4), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 4), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Check resource after deactive/active some nodes + * Node->Labels: + * (deactived) host1 : blue + * host2 : + * (deactived and then actived) host3 : red, yellow + * host4 : + */ + mgr.deactivateNode(NodeId.newInstance("host1", 1)); + mgr.deactivateNode(NodeId.newInstance("host3", 1)); + mgr.activateNode(NodeId.newInstance("host3", 1), SMALL_NODE); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Check resource after refresh queue: + * Q1: blue + * Q2: red, blue + * Q3: red + * Q4: + * Q5: ANY + */ + q1Label = toSet("blue"); + q2Label = toSet("blue", "red"); + q3Label = toSet("red"); + q4Label = DynamicNodeLabelsManager.EMPTY_STRING_SET; + q5Label = toSet(DynamicNodeLabelsManager.ANY); + + queueToLabels.clear(); + queueToLabels.put("Q1", q1Label); + queueToLabels.put("Q2", q2Label); + queueToLabels.put("Q3", q3Label); + queueToLabels.put("Q4", q4Label); + queueToLabels.put("Q5", q5Label); + + mgr.reinitializeQueueLabels(queueToLabels); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Active NMs in nodes already have NM + * Node->Labels: + * host2 : + * host3 : red, yellow (3 NMs) + * host4 : (2 NMs) + */ + mgr.activateNode(NodeId.newInstance("host3", 2), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("host3", 3), SMALL_NODE); + mgr.activateNode(NodeId.newInstance("host4", 2), SMALL_NODE); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 6), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 6), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Deactive NMs in nodes already have NMs + * Node->Labels: + * host2 : + * host3 : red, yellow (2 NMs) + * host4 : (0 NMs) + */ + mgr.deactivateNode(NodeId.newInstance("host3", 3)); + mgr.deactivateNode(NodeId.newInstance("host4", 2)); + mgr.deactivateNode(NodeId.newInstance("host4", 1)); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 1), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 1), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index 457f21e..6a66385 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.DrainDispatcher; @@ -63,6 +64,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; @@ -73,6 +75,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -254,7 +257,7 @@ protected RMApp createNewTestApp(ApplicationSubmissionContext submissionContext) RMApp application = new RMAppImpl(applicationId, rmContext, conf, name, user, queue, submissionContext, scheduler, masterService, - System.currentTimeMillis(), "YARN", null); + System.currentTimeMillis(), "YARN", null, null); testAppStartState(applicationId, user, name, queue, application); this.rmContext.getRMApps().putIfAbsent(application.getApplicationId(), @@ -914,6 +917,7 @@ public void testAppsRecoveringStates() throws Exception { } } + @SuppressWarnings("deprecation") public void testRecoverApplication(ApplicationState appState, RMState rmState) throws Exception { ApplicationSubmissionContext submissionContext = @@ -923,7 +927,10 @@ public void testRecoverApplication(ApplicationState appState, RMState rmState) submissionContext.getApplicationName(), null, submissionContext.getQueue(), submissionContext, null, null, appState.getSubmitTime(), submissionContext.getApplicationType(), - submissionContext.getApplicationTags()); + submissionContext.getApplicationTags(), + BuilderUtils.newResourceRequest( + RMAppAttemptImpl.AM_CONTAINER_PRIORITY, ResourceRequest.ANY, + submissionContext.getResource(), 1)); Assert.assertEquals(RMAppState.NEW, application.getState()); application.recover(rmState); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index 7f27f4e..eab4477 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -63,6 +63,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.EventHandler; @@ -83,8 +84,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppFailedAttemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRejectedEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRunningOnNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerAllocatedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent; @@ -229,6 +230,7 @@ public TestRMAppAttemptTransitions(Boolean isSecurityEnabled) { this.isSecurityEnabled = isSecurityEnabled; } + @SuppressWarnings("deprecation") @Before public void setUp() throws Exception { AuthenticationMethod authMethod = AuthenticationMethod.SIMPLE; @@ -300,6 +302,7 @@ public void setUp() throws Exception { Mockito.doReturn(resourceScheduler).when(spyRMContext).getScheduler(); + final String user = MockApps.newUserName(); final String queue = MockApps.newQueue(); submissionContext = mock(ApplicationSubmissionContext.class); when(submissionContext.getQueue()).thenReturn(queue); @@ -315,7 +318,11 @@ public void setUp() throws Exception { application = mock(RMAppImpl.class); applicationAttempt = new RMAppAttemptImpl(applicationAttemptId, spyRMContext, scheduler, - masterService, submissionContext, new Configuration(), false); + masterService, submissionContext, new Configuration(), false, + BuilderUtils.newResourceRequest( + RMAppAttemptImpl.AM_CONTAINER_PRIORITY, ResourceRequest.ANY, + submissionContext.getResource(), 1)); + when(application.getCurrentAppAttempt()).thenReturn(applicationAttempt); when(application.getApplicationId()).thenReturn(applicationId); spyRMContext.getRMApps().put(application.getApplicationId(), application); @@ -1399,13 +1406,16 @@ public void testFailedToFailed() { } + @SuppressWarnings("deprecation") @Test public void testContainersCleanupForLastAttempt() { // create a failed attempt. applicationAttempt = new RMAppAttemptImpl(applicationAttempt.getAppAttemptId(), spyRMContext, scheduler, masterService, submissionContext, new Configuration(), - true); + true, BuilderUtils.newResourceRequest( + RMAppAttemptImpl.AM_CONTAINER_PRIORITY, ResourceRequest.ANY, + submissionContext.getResource(), 1)); when(submissionContext.getKeepContainersAcrossApplicationAttempts()) .thenReturn(true); when(submissionContext.getMaxAppAttempts()).thenReturn(1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index 460f35e..05592c3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -21,13 +21,18 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedAction; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -46,6 +51,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; @@ -58,6 +64,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMWithAMS; import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MyContainerManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; @@ -74,6 +81,8 @@ import org.junit.Assert; import org.junit.Test; +import com.google.common.collect.Sets; + public class TestSchedulerUtils { private static final Log LOG = LogFactory.getLog(TestSchedulerUtils.class); @@ -173,69 +182,220 @@ public void testNormalizeRequestWithDominantResourceCalculator() { assertEquals(1, ask.getCapability().getVirtualCores()); assertEquals(2048, ask.getCapability().getMemory()); } - + @Test (timeout = 30000) - public void testValidateResourceRequest() { + public void testValidateResourceRequestWithErrorLabelsPermission() + throws IOException { + // mock queue and scheduler + YarnScheduler scheduler = mock(YarnScheduler.class); + Set labels = Sets.newHashSet("x", "y"); + QueueInfo queueInfo = mock(QueueInfo.class); + when(queueInfo.getQueueName()).thenReturn("queue"); + when(queueInfo.getNodeLabels()).thenReturn(labels); + when(scheduler.getQueueInfo(any(String.class), anyBoolean(), anyBoolean())) + .thenReturn(queueInfo); + Resource maxResource = Resources.createResource( YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); - // zero memory + // queue has labels, success try { Resource resource = Resources.createResource( 0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + resReq.setNodeLabelExpression("x"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setNodeLabelExpression("x && y"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setNodeLabelExpression("y"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setNodeLabelExpression(""); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setNodeLabelExpression(" "); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); } catch (InvalidResourceRequestException e) { - fail("Zero memory should be accepted"); + e.printStackTrace(); + fail("Should be valid when request labels is a subset of queue labels"); } - - // zero vcores + + // queue has labels, failed try { Resource resource = Resources.createResource( - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, - 0); + 0, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + resReq.setNodeLabelExpression("z"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + fail("Should fail"); } catch (InvalidResourceRequestException e) { - fail("Zero vcores should be accepted"); } - - // max memory + try { Resource resource = Resources.createResource( - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, + 0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + resReq.setNodeLabelExpression("x && y && z"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + fail("Should fail"); } catch (InvalidResourceRequestException e) { - fail("Max memory should be accepted"); } - - // max vcores + + // queue doesn't have label, succeed + labels.clear(); try { Resource resource = Resources.createResource( - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); + 0, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setNodeLabelExpression(""); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setNodeLabelExpression(" "); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); } catch (InvalidResourceRequestException e) { - fail("Max vcores should not be accepted"); + e.printStackTrace(); + fail("Should be valid when request labels is empty"); } - - // negative memory + + // queue doesn't have label, failed + try { + Resource resource = Resources.createResource( + 0, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = BuilderUtils.newResourceRequest( + mock(Priority.class), ResourceRequest.ANY, resource, 1); + resReq.setNodeLabelExpression("x"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + fail("Should fail"); + } catch (InvalidResourceRequestException e) { + } + + // queue is "*", always succeeded + labels.add(DynamicNodeLabelsManager.ANY); try { Resource resource = Resources.createResource( - -1, + 0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + resReq.setNodeLabelExpression("x"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setNodeLabelExpression("x && y && z"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setNodeLabelExpression("z"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + } catch (InvalidResourceRequestException e) { + e.printStackTrace(); + fail("Should be valid when request labels is empty"); + } + } + + @Test (timeout = 30000) + public void testValidateResourceRequest() { + YarnScheduler mockScheduler = mock(YarnScheduler.class); + + Resource maxResource = + Resources.createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); + + // zero memory + try { + Resource resource = + Resources.createResource(0, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = + BuilderUtils.newResourceRequest(mock(Priority.class), + ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); + } catch (InvalidResourceRequestException e) { + fail("Zero memory should be accepted"); + } + + // zero vcores + try { + Resource resource = + Resources.createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0); + ResourceRequest resReq = + BuilderUtils.newResourceRequest(mock(Priority.class), + ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); + } catch (InvalidResourceRequestException e) { + fail("Zero vcores should be accepted"); + } + + // max memory + try { + Resource resource = + Resources.createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = + BuilderUtils.newResourceRequest(mock(Priority.class), + ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); + } catch (InvalidResourceRequestException e) { + fail("Max memory should be accepted"); + } + + // max vcores + try { + Resource resource = + Resources.createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = + BuilderUtils.newResourceRequest(mock(Priority.class), + ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); + } catch (InvalidResourceRequestException e) { + fail("Max vcores should not be accepted"); + } + + // negative memory + try { + Resource resource = + Resources.createResource(-1, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = + BuilderUtils.newResourceRequest(mock(Priority.class), + ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); fail("Negative memory should not be accepted"); } catch (InvalidResourceRequestException e) { // expected @@ -243,12 +403,14 @@ public void testValidateResourceRequest() { // negative vcores try { - Resource resource = Resources.createResource( - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, - -1); - ResourceRequest resReq = BuilderUtils.newResourceRequest( - mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + Resource resource = + Resources.createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -1); + ResourceRequest resReq = + BuilderUtils.newResourceRequest(mock(Priority.class), + ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); fail("Negative vcores should not be accepted"); } catch (InvalidResourceRequestException e) { // expected @@ -256,12 +418,15 @@ public void testValidateResourceRequest() { // more than max memory try { - Resource resource = Resources.createResource( - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB + 1, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); - ResourceRequest resReq = BuilderUtils.newResourceRequest( - mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + Resource resource = + Resources.createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB + 1, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = + BuilderUtils.newResourceRequest(mock(Priority.class), + ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); fail("More than max memory should not be accepted"); } catch (InvalidResourceRequestException e) { // expected @@ -269,13 +434,16 @@ public void testValidateResourceRequest() { // more than max vcores try { - Resource resource = Resources.createResource( - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES - + 1); - ResourceRequest resReq = BuilderUtils.newResourceRequest( - mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + Resource resource = + Resources + .createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES + 1); + ResourceRequest resReq = + BuilderUtils.newResourceRequest(mock(Priority.class), + ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); fail("More than max vcores should not be accepted"); } catch (InvalidResourceRequestException e) { // expected diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java index ff8e873..ebf604d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java @@ -65,6 +65,9 @@ LeafQueue queue; private final ResourceCalculator resourceCalculator = new DefaultResourceCalculator(); + + RMContext rmContext = null; + @Before public void setUp() throws IOException { @@ -73,7 +76,9 @@ public void setUp() throws IOException { YarnConfiguration conf = new YarnConfiguration(); setupQueueConfiguration(csConf); - + rmContext = TestUtils.getMockRMContext(); + + CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class); when(csContext.getConfiguration()).thenReturn(csConf); when(csContext.getConf()).thenReturn(conf); @@ -89,6 +94,8 @@ public void setUp() throws IOException { thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()). thenReturn(resourceCalculator); + when(csContext.getRMContext()).thenReturn(rmContext); + RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf); containerTokenSecretManager.rollMasterKey(); @@ -162,6 +169,7 @@ public void testLimitsComputation() throws Exception { when(csContext.getQueueComparator()). thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()).thenReturn(resourceCalculator); + when(csContext.getRMContext()).thenReturn(rmContext); // Say cluster has 100 nodes of 16G each Resource clusterResource = Resources.createResource(100 * 16 * GB, 100 * 16); @@ -475,6 +483,7 @@ public void testHeadroom() throws Exception { when(csContext.getQueueComparator()). thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()).thenReturn(resourceCalculator); + when(csContext.getRMContext()).thenReturn(rmContext); // Say cluster has 100 nodes of 16G each Resource clusterResource = Resources.createResource(100 * 16 * GB); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSQueueUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSQueueUtils.java index 7260afd..297c551 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSQueueUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSQueueUtils.java @@ -19,38 +19,19 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.reset; import static org.mockito.Mockito.when; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.junit.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; -import org.junit.After; -import org.junit.Before; import org.junit.Test; -import org.mockito.InOrder; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; public class TestCSQueueUtils { @@ -88,6 +69,8 @@ public void runInvalidDivisorTest(boolean useDominant) throws Exception { thenReturn(Resources.createResource(GB, 1)); when(csContext.getMaximumResourceCapability()). thenReturn(Resources.createResource(0, 0)); + RMContext rmContext = mock(RMContext.class); + when(csContext.getRMContext()).thenReturn(rmContext); final String L1Q1 = "L1Q1"; csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {L1Q1}); @@ -129,6 +112,8 @@ public void testAbsoluteMaxAvailCapacityNoUse() throws Exception { thenReturn(Resources.createResource(GB, 1)); when(csContext.getMaximumResourceCapability()). thenReturn(Resources.createResource(16*GB, 32)); + RMContext rmContext = mock(RMContext.class); + when(csContext.getRMContext()).thenReturn(rmContext); final String L1Q1 = "L1Q1"; csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {L1Q1}); @@ -174,6 +159,9 @@ public void testAbsoluteMaxAvailCapacityWithUse() throws Exception { when(csContext.getMaximumResourceCapability()). thenReturn(Resources.createResource(16*GB, 32)); + RMContext rmContext = mock(RMContext.class); + when(csContext.getRMContext()).thenReturn(rmContext); + final String L1Q1 = "L1Q1"; final String L1Q2 = "L1Q2"; final String L2Q1 = "L2Q1"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index f7c098c..76f6c5a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -962,10 +962,7 @@ public void testNumClusterNodes() throws Exception { YarnConfiguration conf = new YarnConfiguration(); CapacityScheduler cs = new CapacityScheduler(); cs.setConf(conf); - RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, - null, new RMContainerTokenSecretManager(conf), - new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null); + RMContext rmContext = TestUtils.getMockRMContext(); cs.setRMContext(rmContext); CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java index fdb9028..af58a43 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java @@ -99,6 +99,7 @@ public void setUp() throws Exception { thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()). thenReturn(resourceComparator); + when(csContext.getRMContext()).thenReturn(rmContext); } private FiCaSchedulerApp getMockApplication(int appId, String user) { @@ -132,11 +133,11 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable { final Resource allocatedResource = Resources.createResource(allocation); if (queue instanceof ParentQueue) { ((ParentQueue)queue).allocateResource(clusterResource, - allocatedResource); + allocatedResource, null); } else { FiCaSchedulerApp app1 = getMockApplication(0, ""); ((LeafQueue)queue).allocateResource(clusterResource, app1, - allocatedResource); + allocatedResource, null); } // Next call - nothing diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java index 85ef381..c078428 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java @@ -20,12 +20,14 @@ import java.util.ArrayList; import java.util.List; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LogAggregationContext; @@ -41,19 +43,28 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.RMSecretManagerService; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.TestFifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DummyDynamicNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; + public class TestContainerAllocation { @@ -307,4 +318,411 @@ protected RMSecretManagerService createRMSecretManagerService() { rm1.waitForState(attempt.getAppAttemptId(), RMAppAttemptState.ALLOCATED); MockRM.launchAndRegisterAM(app1, rm1, nm1); } + + private Configuration getConfigurationWithDefaultQueueLabels( + Configuration config) { + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + + CapacitySchedulerConfiguration conf = + (CapacitySchedulerConfiguration) getConfigurationWithQueueLabels(config); + new CapacitySchedulerConfiguration(config); + conf.setDefaultNodeLabelExpression(A, "x"); + conf.setDefaultNodeLabelExpression(B, "y"); + return conf; + } + + private Configuration getConfigurationWithQueueLabels(Configuration config) { + CapacitySchedulerConfiguration conf = + new CapacitySchedulerConfiguration(config); + + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"}); + + // root can access anything + conf.setAccessibleLabels(CapacitySchedulerConfiguration.ROOT, + toSet("x", "y")); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + conf.setCapacity(A, 10); + conf.setMaximumCapacity(A, 15); + conf.setAccessibleLabels(A, toSet("x")); + conf.setCapacityByLabel(A, "x", 100); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + conf.setCapacity(B, 20); + conf.setAccessibleLabels(B, toSet("y")); + conf.setCapacityByLabel(B, "y", 100); + + final String C = CapacitySchedulerConfiguration.ROOT + ".c"; + conf.setCapacity(C, 70); + conf.setMaximumCapacity(C, 70); + conf.setAccessibleLabels(C, DynamicNodeLabelsManager.EMPTY_STRING_SET); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + conf.setQueues(A, new String[] {"a1"}); + conf.setCapacity(A1, 100); + conf.setMaximumCapacity(A1, 100); + conf.setCapacityByLabel(A1, "x", 100); + + final String B1 = B + ".b1"; + conf.setQueues(B, new String[] {"b1"}); + conf.setCapacity(B1, 100); + conf.setMaximumCapacity(B1, 100); + conf.setCapacityByLabel(B1, "y", 100); + + final String C1 = C + ".c1"; + conf.setQueues(C, new String[] {"c1"}); + conf.setCapacity(C1, 100); + conf.setMaximumCapacity(C1, 100); + + return conf; + } + + private void checkTaskContainersHost(ApplicationAttemptId attemptId, + ContainerId containerId, ResourceManager rm, String host) { + YarnScheduler scheduler = rm.getRMContext().getScheduler(); + SchedulerAppReport appReport = scheduler.getSchedulerAppInfo(attemptId); + + Assert.assertTrue(appReport.getLiveContainers().size() > 0); + for (RMContainer c : appReport.getLiveContainers()) { + if (c.getContainerId().equals(containerId)) { + Assert.assertEquals(host, c.getAllocatedNode().getHost()); + } + } + } + + private Set toSet(E... elements) { + Set set = Sets.newHashSet(elements); + return set; + } + + private Configuration getComplexConfigurationWithQueueLabels( + Configuration config) { + CapacitySchedulerConfiguration conf = + new CapacitySchedulerConfiguration(config); + + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"}); + + // root can access anything + conf.setAccessibleLabels(CapacitySchedulerConfiguration.ROOT, + toSet("x", "y", "z")); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + conf.setCapacity(A, 10); + conf.setMaximumCapacity(A, 10); + conf.setAccessibleLabels(A, toSet("x", "y")); + conf.setCapacityByLabel(A, "x", 100); + conf.setCapacityByLabel(A, "y", 50); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + conf.setCapacity(B, 90); + conf.setMaximumCapacity(B, 100); + conf.setAccessibleLabels(B, toSet("y", "z")); + conf.setCapacityByLabel(B, "y", 50); + conf.setCapacityByLabel(B, "z", 100); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + conf.setQueues(A, new String[] {"a1"}); + conf.setCapacity(A1, 100); + conf.setMaximumCapacity(A1, 100); + conf.setAccessibleLabels(A1, toSet("x", "y")); + conf.setDefaultNodeLabelExpression(A1, "x"); + conf.setCapacityByLabel(A1, "x", 100); + conf.setCapacityByLabel(A1, "y", 100); + + conf.setQueues(B, new String[] {"b1", "b2"}); + final String B1 = B + ".b1"; + conf.setCapacity(B1, 50); + conf.setMaximumCapacity(B1, 50); + conf.setAccessibleLabels(B1, DynamicNodeLabelsManager.EMPTY_STRING_SET); + + final String B2 = B + ".b2"; + conf.setCapacity(B2, 50); + conf.setMaximumCapacity(B2, 50); + conf.setAccessibleLabels(B2, toSet("y", "z")); + conf.setCapacityByLabel(B2, "y", 100); + conf.setCapacityByLabel(B2, "z", 100); + + return conf; + } + + @Test (timeout = 300000) + public void testContainerAllocateWithComplexLabels() throws Exception { + // make it harder .. + final DynamicNodeLabelsManager mgr = new DummyDynamicNodeLabelsManager(); + mgr.init(conf); + + /* + * Queue structure: + * root (*) + * / \ + * a x(100%), y(50%) b y(50%), z(100%) + * / / \ + * a1 (x,y) b1(no) b2(y,z) + * 100% y = 100%, z = 100% + * + * Node structure: + * h1 : x + * h2 : x, y + * h3 : y + * h4 : y, z + * h5 : NO + * + * Total resource: + * x: 4G + * y: 6G + * z: 2G + * *: 2G + * + * Resource of + * a1: x=4G, y=3G, NO=1G + * b1: NO=0.1G + * b2: y=3, z=2G, NO=0.9G + * + * Each node can only allocate two containers + */ + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y", "z")); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), + toSet("x"), NodeId.newInstance("h2", 0), toSet("x", "y"), + NodeId.newInstance("h3", 0), toSet("y"), NodeId.newInstance("h4", 0), + toSet("y", "z"), NodeId.newInstance("h5", 0), + DynamicNodeLabelsManager.EMPTY_STRING_SET)); + + // inject node label manager + MockRM rm1 = new MockRM(getComplexConfigurationWithQueueLabels(conf)) { + @Override + public DynamicNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 2048); + MockNM nm2 = rm1.registerNode("h2:1234", 2048); + MockNM nm3 = rm1.registerNode("h3:1234", 2048); + MockNM nm4 = rm1.registerNode("h4:1234", 2048); + MockNM nm5 = rm1.registerNode("h5:1234", 2048); + + ContainerId containerId; + + // launch an app to queue a1 (label = x), and check all container will + // be allocated in h1 + RMApp app1 = rm1.submitApp(1024, "app", "user", null, "a1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // request a container (label = x && y). can only allocate on nm2 + am1.allocate("*", 1024, 1, new ArrayList(), "x && y"); + containerId = + ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // launch an app to queue b1 (label = y), and check all container will + // be allocated in h5 + RMApp app2 = rm1.submitApp(1024, "app", "user", null, "b1"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm5); + + // request a container for AM, will succeed + // and now b1's queue capacity will be used, cannot allocate more containers + // (Maximum capacity reached) + am2.allocate("*", 1024, 1, new ArrayList()); + containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm4, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertFalse(rm1.waitForState(nm5, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + + // launch an app to queue b2 + RMApp app3 = rm1.submitApp(1024, "app", "user", null, "b2"); + MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm5); + + // request a container. try to allocate on nm1 (label = x) and nm3 (label = + // y,z). Will successfully allocate on nm3 + am3.allocate("*", 1024, 1, new ArrayList(), "y"); + containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h3"); + + // try to allocate container (request label = y && z) on nm3 (label = y) and + // nm4 (label = y,z). Will sucessfully allocate on nm4 only. + am3.allocate("*", 1024, 1, new ArrayList(), "y && z"); + containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 3); + Assert.assertFalse(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm4, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h4"); + + rm1.close(); + } + + @Test (timeout = 120000) + public void testContainerAllocateWithLabels() throws Exception { + final DynamicNodeLabelsManager mgr = new DummyDynamicNodeLabelsManager(); + mgr.init(conf); + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"), + NodeId.newInstance("h2", 0), toSet("y"))); + + // inject node label manager + MockRM rm1 = new MockRM(getConfigurationWithQueueLabels(conf)) { + @Override + public DynamicNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y + MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = + + ContainerId containerId; + + // launch an app to queue a1 (label = x), and check all container will + // be allocated in h1 + RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm3); + + // request a container. + am1.allocate("*", 1024, 1, new ArrayList(), "x"); + containerId = + ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + // launch an app to queue b1 (label = y), and check all container will + // be allocated in h2 + RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm3); + + // request a container. + am2.allocate("*", 1024, 1, new ArrayList(), "y"); + containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // launch an app to queue c1 (label = ""), and check all container will + // be allocated in h3 + RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1"); + MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3); + + // request a container. + am3.allocate("*", 1024, 1, new ArrayList()); + containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h3"); + + rm1.close(); + } + + @Test (timeout = 120000) + public void testContainerAllocateWithDefaultQueueLabels() throws Exception { + // This test is pretty much similar to testContainerAllocateWithLabel. + // Difference is, this test doesn't specify label expression in ResourceRequest, + // instead, it uses default queue label expression + + final DynamicNodeLabelsManager mgr = new DummyDynamicNodeLabelsManager(); + mgr.init(conf); + + // set node -> label + mgr.addToCluserNodeLabels(ImmutableSet.of("x", "y")); + mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x"), + NodeId.newInstance("h2", 0), toSet("y"))); + + // inject node label manager + MockRM rm1 = new MockRM(getConfigurationWithDefaultQueueLabels(conf)) { + @Override + public DynamicNodeLabelsManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y + MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = + + ContainerId containerId; + + // launch an app to queue a1 (label = x), and check all container will + // be allocated in h1 + RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // request a container. + am1.allocate("*", 1024, 1, new ArrayList()); + containerId = + ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + // launch an app to queue b1 (label = y), and check all container will + // be allocated in h2 + RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2); + + // request a container. + am2.allocate("*", 1024, 1, new ArrayList()); + containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // launch an app to queue c1 (label = ""), and check all container will + // be allocated in h3 + RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1"); + MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3); + + // request a container. + am3.allocate("*", 1024, 1, new ArrayList()); + containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h3"); + + rm1.close(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 092ff83..780606d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -40,10 +40,10 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import org.junit.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.security.UserGroupInformation; @@ -63,6 +63,7 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; @@ -82,6 +83,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Matchers; @@ -147,6 +149,7 @@ public void setUp() throws Exception { thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()). thenReturn(resourceCalculator); + when(csContext.getRMContext()).thenReturn(rmContext); RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf); containerTokenSecretManager.rollMasterKey(); @@ -747,6 +750,81 @@ public void testHeadroomWithMaxCap() throws Exception { a.assignContainers(clusterResource, node_1, false); assertEquals(1*GB, app_2.getHeadroom().getMemory()); // hit queue max-cap } + + @SuppressWarnings("unchecked") + @Test + public void testHeadroomWithLabel() throws Exception { + DynamicNodeLabelsManager nlm = mock(DynamicNodeLabelsManager.class); + + // Mock the queue + LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A)); + + //unset maxCapacity + a.setMaxCapacity(1.0f); + + // Users + final String user_0 = "user_0"; + + // Submit applications + final ApplicationAttemptId appAttemptId_0 = + TestUtils.getMockApplicationAttemptId(0, 0); + FiCaSchedulerApp app_0 = + new FiCaSchedulerApp(appAttemptId_0, user_0, a, + a.getActiveUsersManager(), rmContext); + a.submitApplicationAttempt(app_0, user_0); + + // Setup some nodes + String host_0 = "127.0.0.1"; + FiCaSchedulerNode node_0 = + TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 64 * GB); + + final int numNodes = 1; + Resource clusterResource = Resources.createResource(numNodes * (64 * GB), 1); + when(csContext.getNumClusterNodes()).thenReturn(numNodes); + + // Setup resource-requests + Priority priority = TestUtils.createMockPriority(1); + app_0.updateResourceRequests(Collections.singletonList(TestUtils + .createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, + recordFactory))); + + /** + * Start testing... + */ + + // Set user-limit + a.setUserLimit(100); + a.setUserLimitFactor(1); + + // 1 container to user_0 + a.assignContainers(clusterResource, node_0, false); + assertEquals(1 * GB, a.getUsedResources().getMemory()); + assertEquals(1 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(5 * GB, app_0.getHeadroom().getMemory()); // User limit = 6G + + // mock getQueueResource to 4999 MB + when( + nlm.getQueueResource(any(String.class), any(Set.class), + any(Resource.class))).thenReturn(Resource.newInstance(4999, 1)); + a.setNodeLabelManager(nlm); + + // do a resource allocation again + app_0.updateResourceRequests(Collections.singletonList(TestUtils + .createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, + recordFactory))); + + when( + nlm.getResourceByLabel(any(String.class), + any(Resource.class))).thenReturn(Resource.newInstance(4999, 1)); + a.assignContainers(clusterResource, node_0, false); + + // current headroom should be + // Headroom = min(6G (user-limit), 4G (queueLabelResource)) - + // 2G (used-resource) = 2G + assertEquals(2 * GB, a.getUsedResources().getMemory()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(2 * GB, app_0.getHeadroom().getMemory()); + } @Test public void testSingleQueueWithMultipleUsers() throws Exception { @@ -2042,6 +2120,7 @@ public void testMaxAMResourcePerQueuePercentAfterQueueRefresh() Resource clusterResource = Resources .createResource(100 * 16 * GB, 100 * 32); CapacitySchedulerContext csContext = mockCSContext(csConf, clusterResource); + when(csContext.getRMContext()).thenReturn(rmContext); csConf.setFloat(CapacitySchedulerConfiguration. MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT, 0.1f); ParentQueue root = new ParentQueue(csContext, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java index 8b24a7e..72983ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java @@ -95,6 +95,7 @@ public void setUp() throws Exception { thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()). thenReturn(resourceComparator); + when(csContext.getRMContext()).thenReturn(rmContext); } private static final String A = "a"; @@ -144,11 +145,11 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable { final Resource allocatedResource = Resources.createResource(allocation); if (queue instanceof ParentQueue) { ((ParentQueue)queue).allocateResource(clusterResource, - allocatedResource); + allocatedResource, null); } else { FiCaSchedulerApp app1 = getMockApplication(0, ""); ((LeafQueue)queue).allocateResource(clusterResource, app1, - allocatedResource); + allocatedResource, null); } // Next call - nothing diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java index f573f43..2317fab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java @@ -27,14 +27,11 @@ import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; -import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SimpleGroupsMapping; -import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; -import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; -import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.junit.After; import org.junit.Assert; import org.junit.Test; @@ -79,10 +76,7 @@ public void testQueueMapping() throws Exception { YarnConfiguration conf = new YarnConfiguration(csConf); CapacityScheduler cs = new CapacityScheduler(); - RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, - null, new RMContainerTokenSecretManager(conf), - new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null); + RMContext rmContext = TestUtils.getMockRMContext(); cs.setConf(conf); cs.setRMContext(rmContext); cs.init(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java index a3b990c..a8702b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java @@ -18,23 +18,40 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import org.junit.Assert; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; -import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; +import org.junit.Assert; +import org.junit.Before; import org.junit.Test; +import com.google.common.collect.ImmutableSet; + public class TestQueueParsing { private static final Log LOG = LogFactory.getLog(TestQueueParsing.class); private static final double DELTA = 0.000001; + private DynamicNodeLabelsManager nodeLabelManager; + + @Before + public void setup() { + nodeLabelManager = mock(DynamicNodeLabelsManager.class); + when(nodeLabelManager.containsNodeLabel(any(String.class))).thenReturn(true); + } + @Test public void testQueueParsing() throws Exception { CapacitySchedulerConfiguration csConf = @@ -43,15 +60,11 @@ public void testQueueParsing() throws Exception { YarnConfiguration conf = new YarnConfiguration(csConf); CapacityScheduler capacityScheduler = new CapacityScheduler(); - RMContextImpl rmContext = new RMContextImpl(null, null, - null, null, null, null, new RMContainerTokenSecretManager(conf), - new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null); capacityScheduler.setConf(conf); - capacityScheduler.setRMContext(rmContext); + capacityScheduler.setRMContext(TestUtils.getMockRMContext()); capacityScheduler.init(conf); capacityScheduler.start(); - capacityScheduler.reinitialize(conf, rmContext); + capacityScheduler.reinitialize(conf, TestUtils.getMockRMContext()); CSQueue a = capacityScheduler.getQueue("a"); Assert.assertEquals(0.10, a.getAbsoluteCapacity(), DELTA); @@ -202,4 +215,164 @@ public void testMaxCapacity() throws Exception { capacityScheduler.stop(); } + private void setupQueueConfigurationWithoutLabels(CapacitySchedulerConfiguration conf) { + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"}); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + conf.setCapacity(A, 10); + conf.setMaximumCapacity(A, 15); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + conf.setCapacity(B, 90); + + LOG.info("Setup top-level queues"); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + final String A2 = A + ".a2"; + conf.setQueues(A, new String[] {"a1", "a2"}); + conf.setCapacity(A1, 30); + conf.setMaximumCapacity(A1, 45); + conf.setCapacity(A2, 70); + conf.setMaximumCapacity(A2, 85); + + final String B1 = B + ".b1"; + final String B2 = B + ".b2"; + final String B3 = B + ".b3"; + conf.setQueues(B, new String[] {"b1", "b2", "b3"}); + conf.setCapacity(B1, 50); + conf.setMaximumCapacity(B1, 85); + conf.setCapacity(B2, 30); + conf.setMaximumCapacity(B2, 35); + conf.setCapacity(B3, 20); + conf.setMaximumCapacity(B3, 35); + } + + private void setupQueueConfigurationWithLabels(CapacitySchedulerConfiguration conf) { + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"}); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + conf.setCapacity(A, 10); + conf.setMaximumCapacity(A, 15); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + conf.setCapacity(B, 90); + + LOG.info("Setup top-level queues"); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + final String A2 = A + ".a2"; + conf.setQueues(A, new String[] {"a1", "a2"}); + conf.setAccessibleLabels(A, ImmutableSet.of("red", "blue")); + conf.setCapacityByLabel(A, "red", 50); + conf.setCapacityByLabel(A, "blue", 50); + + conf.setCapacity(A1, 30); + conf.setMaximumCapacity(A1, 45); + conf.setCapacityByLabel(A1, "red", 50); + conf.setCapacityByLabel(A1, "blue", 100); + + conf.setCapacity(A2, 70); + conf.setMaximumCapacity(A2, 85); + conf.setAccessibleLabels(A2, ImmutableSet.of("red")); + conf.setCapacityByLabel(A2, "red", 50); + + final String B1 = B + ".b1"; + final String B2 = B + ".b2"; + final String B3 = B + ".b3"; + conf.setQueues(B, new String[] {"b1", "b2", "b3"}); + conf.setAccessibleLabels(B, ImmutableSet.of("red", "blue")); + conf.setCapacityByLabel(B, "red", 50); + conf.setCapacityByLabel(B, "blue", 50); + + conf.setCapacity(B1, 50); + conf.setMaximumCapacity(B1, 85); + conf.setCapacityByLabel(B1, "red", 50); + conf.setCapacityByLabel(B1, "blue", 50); + + conf.setCapacity(B2, 30); + conf.setMaximumCapacity(B2, 35); + conf.setCapacityByLabel(B2, "red", 25); + conf.setCapacityByLabel(B2, "blue", 25); + + conf.setCapacity(B3, 20); + conf.setMaximumCapacity(B3, 35); + conf.setCapacityByLabel(B3, "red", 25); + conf.setCapacityByLabel(B3, "blue", 25); + } + + @Test + public void testQueueParsingReinitializeWithLabels() throws IOException { + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + setupQueueConfigurationWithoutLabels(csConf); + YarnConfiguration conf = new YarnConfiguration(csConf); + + CapacityScheduler capacityScheduler = new CapacityScheduler(); + RMContextImpl rmContext = + new RMContextImpl(null, null, null, null, null, null, + new RMContainerTokenSecretManager(conf), + new NMTokenSecretManagerInRM(conf), + new ClientToAMTokenSecretManagerInRM(), null); + rmContext.setNodeLabelManager(nodeLabelManager); + capacityScheduler.setConf(conf); + capacityScheduler.setRMContext(rmContext); + capacityScheduler.init(conf); + capacityScheduler.start(); + csConf = new CapacitySchedulerConfiguration(); + setupQueueConfigurationWithLabels(csConf); + conf = new YarnConfiguration(csConf); + capacityScheduler.reinitialize(conf, rmContext); + checkQueueLabels(capacityScheduler); + capacityScheduler.stop(); + } + + private void checkQueueLabels(CapacityScheduler capacityScheduler) { + // queue-A is red, blue + Assert.assertTrue(capacityScheduler.getQueue("a").getAccessibleLabels() + .containsAll(ImmutableSet.of("red", "blue"))); + + // queue-A1 inherits A's configuration + Assert.assertTrue(capacityScheduler.getQueue("a1").getAccessibleLabels() + .containsAll(ImmutableSet.of("red", "blue"))); + + // queue-A2 is "red" + Assert.assertEquals(1, capacityScheduler + .getQueue("a2").getAccessibleLabels().size()); + Assert.assertTrue(capacityScheduler + .getQueue("a2").getAccessibleLabels().contains("red")); + + // queue-B is "red"/"blue" + Assert.assertTrue(capacityScheduler + .getQueue("b").getAccessibleLabels().containsAll(ImmutableSet.of("red", "blue"))); + + // queue-B2 inherits "red"/"blue" + Assert.assertTrue(capacityScheduler + .getQueue("b2").getAccessibleLabels().containsAll(ImmutableSet.of("red", "blue"))); + } + + @Test + public void testQueueParsingWithLabels() throws IOException { + YarnConfiguration conf = new YarnConfiguration(); + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(conf); + setupQueueConfigurationWithLabels(csConf); + + CapacityScheduler capacityScheduler = new CapacityScheduler(); + RMContextImpl rmContext = + new RMContextImpl(null, null, null, null, null, null, + new RMContainerTokenSecretManager(csConf), + new NMTokenSecretManagerInRM(csConf), + new ClientToAMTokenSecretManagerInRM(), null); + rmContext.setNodeLabelManager(nodeLabelManager); + capacityScheduler.setConf(csConf); + capacityScheduler.setRMContext(rmContext); + capacityScheduler.init(csConf); + capacityScheduler.start(); + checkQueueLabels(capacityScheduler); + capacityScheduler.stop(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservationQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservationQueue.java index c53b7a9..29d4061 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservationQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservationQueue.java @@ -23,6 +23,8 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.IOException; + import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement; @@ -42,8 +44,7 @@ ReservationQueue reservationQueue; @Before - public void setup() { - + public void setup() throws IOException { // setup a context / conf csConf = new CapacitySchedulerConfiguration(); YarnConfiguration conf = new YarnConfiguration(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java index 0f8290e..6c09e44 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java @@ -48,6 +48,7 @@ import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.nodelabels.NodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; @@ -819,7 +820,9 @@ public void testAssignToQueue() throws Exception { // allocate to queue so that the potential new capacity is greater then // absoluteMaxCapacity Resource capability = Resources.createResource(32 * GB, 0); - boolean res = a.assignToQueue(clusterResource, capability, app_0, true); + boolean res = + a.assignToQueue(clusterResource, capability, + NodeLabelsManager.EMPTY_STRING_SET, app_0, true); assertFalse(res); // now add in reservations and make sure it continues if config set @@ -836,23 +839,29 @@ public void testAssignToQueue() throws Exception { assertEquals(3 * GB, node_1.getUsedResource().getMemory()); capability = Resources.createResource(5 * GB, 0); - res = a - .assignToQueue(clusterResource, capability, app_0, true); + res = + a.assignToQueue(clusterResource, capability, + NodeLabelsManager.EMPTY_STRING_SET, app_0, true); assertTrue(res); // tell to not check reservations - res = a.assignToQueue(clusterResource, capability, app_0, false); + res = + a.assignToQueue(clusterResource, capability, + NodeLabelsManager.EMPTY_STRING_SET, app_0, false); assertFalse(res); refreshQueuesTurnOffReservationsContLook(a, csConf); // should return false no matter what checkReservations is passed // in since feature is off - res = a.assignToQueue(clusterResource, capability, app_0, false); + res = + a.assignToQueue(clusterResource, capability, + NodeLabelsManager.EMPTY_STRING_SET, app_0, false); assertFalse(res); - res = a - .assignToQueue(clusterResource, capability, app_0, true); + res = + a.assignToQueue(clusterResource, capability, + NodeLabelsManager.EMPTY_STRING_SET, app_0, true); assertFalse(res); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 9cb902d..f37f409 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -18,11 +18,14 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; +import static org.mockito.Matchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; +import java.util.Set; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -43,16 +46,19 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; +import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; -import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.util.resource.Resources; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; public class TestUtils { private static final Log LOG = LogFactory.getLog(TestUtils.class); @@ -61,7 +67,7 @@ * Get a mock {@link RMContext} for use in test cases. * @return a mock {@link RMContext} for use in test cases */ - @SuppressWarnings("rawtypes") + @SuppressWarnings({ "rawtypes", "unchecked" }) public static RMContext getMockRMContext() { // Null dispatcher Dispatcher nullDispatcher = new Dispatcher() { @@ -93,6 +99,27 @@ public EventHandler getEventHandler() { new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), new ClientToAMTokenSecretManagerInRM(), writer); + DynamicNodeLabelsManager nlm = mock(DynamicNodeLabelsManager.class); + when( + nlm.getQueueResource(any(String.class), any(Set.class), + any(Resource.class))).thenAnswer(new Answer() { + @Override + public Resource answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + return (Resource) args[2]; + } + }); + + when(nlm.getResourceByLabel(any(String.class), any(Resource.class))) + .thenAnswer(new Answer() { + @Override + public Resource answer(InvocationOnMock invocation) throws Throwable { + Object[] args = invocation.getArguments(); + return (Resource) args[1]; + } + }); + + rmContext.setNodeLabelManager(nlm); rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class)); return rmContext; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java index bd7f1bd..7b6aaf3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java @@ -216,7 +216,7 @@ protected void createApplicationWithAMResource(ApplicationAttemptId attId, RMApp rmApp = new RMAppImpl(attId.getApplicationId(), rmContext, conf, null, null, null, ApplicationSubmissionContext.newInstance(null, null, null, null, null, false, false, 0, amResource, null), null, null, - 0, null, null); + 0, null, null, null); rmContext.getRMApps().put(attId.getApplicationId(), rmApp); AppAddedSchedulerEvent appAddedEvent = new AppAddedSchedulerEvent( attId.getApplicationId(), queue, user); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 67164c6..843555f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -2420,7 +2420,7 @@ public void testNotAllowSubmitApplication() throws Exception { RMApp application = new RMAppImpl(applicationId, resourceManager.getRMContext(), conf, name, user, queue, submissionContext, scheduler, masterService, - System.currentTimeMillis(), "YARN", null); + System.currentTimeMillis(), "YARN", null, null); resourceManager.getRMContext().getRMApps().putIfAbsent(applicationId, application); application.handle(new RMAppEvent(applicationId, RMAppEventType.START)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java index 0974311..bb38079 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java @@ -49,7 +49,7 @@ // Number of Actual Table Headers for NodesPage.NodesBlock might change in // future. In that case this value should be adjusted to the new value. final int numberOfThInMetricsTable = 16; - final int numberOfActualTableHeaders = 12; + final int numberOfActualTableHeaders = 13; private Injector injector; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java index 0df7c0d..8c30dcb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java @@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.MockAsm; +import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.DynamicNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; @@ -46,8 +47,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; -import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.util.StringHelper; import org.apache.hadoop.yarn.webapp.WebApps; @@ -162,7 +163,7 @@ public static RMContext mockRMContext(int numApps, int racks, int numNodes, for (RMNode node : deactivatedNodes) { deactivatedNodesMap.put(node.getHostName(), node); } - return new RMContextImpl(null, null, null, null, + RMContext rmContext = new RMContextImpl(null, null, null, null, null, null, null, null, null, null) { @Override public ConcurrentMap getRMApps() { @@ -177,6 +178,9 @@ public static RMContext mockRMContext(int numApps, int racks, int numNodes, return nodesMap; } }; + DynamicNodeLabelsManager nlm = mock(DynamicNodeLabelsManager.class); + rmContext.setNodeLabelManager(nlm); + return rmContext; } public static ResourceManager mockRm(int apps, int racks, int nodes, @@ -203,10 +207,13 @@ public static CapacityScheduler mockCapacityScheduler() throws IOException { CapacityScheduler cs = new CapacityScheduler(); cs.setConf(new YarnConfiguration()); - cs.setRMContext(new RMContextImpl(null, null, null, null, null, + DynamicNodeLabelsManager nlm = mock(DynamicNodeLabelsManager.class); + RMContext rmContext = new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null)); + new ClientToAMTokenSecretManagerInRM(), null); + rmContext.setNodeLabelManager(nlm); + cs.setRMContext(rmContext); cs.init(conf); return cs; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java index e58c30f..3e62c3c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java @@ -357,10 +357,10 @@ private void verifyClusterSchedulerGeneric(String type, float usedCapacity, private void verifySubQueue(JSONObject info, String q, float parentAbsCapacity, float parentAbsMaxCapacity) throws JSONException, Exception { - int numExpectedElements = 12; + int numExpectedElements = 13; boolean isParentQueue = true; if (!info.has("queues")) { - numExpectedElements = 22; + numExpectedElements = 23; isParentQueue = false; } assertEquals("incorrect number of elements", numExpectedElements, info.length()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java new file mode 100644 index 0000000..d29194b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java @@ -0,0 +1,402 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.io.StringReader; +import java.io.StringWriter; + +import javax.ws.rs.core.MediaType; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesToLabelsInfo; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; +import org.codehaus.jettison.json.JSONArray; +import org.codehaus.jettison.json.JSONException; +import org.codehaus.jettison.json.JSONObject; +import org.junit.Test; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.servlet.GuiceServletContextListener; +import com.google.inject.servlet.ServletModule; +import com.sun.jersey.api.client.ClientResponse; +import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.api.json.JSONJAXBContext; +import com.sun.jersey.api.json.JSONMarshaller; +import com.sun.jersey.api.json.JSONUnmarshaller; +import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; +import com.sun.jersey.test.framework.JerseyTest; +import com.sun.jersey.test.framework.WebAppDescriptor; + +public class TestRMWebServicesNodeLabels extends JerseyTest { + + private static final Log LOG = LogFactory + .getLog(TestRMWebServicesNodeLabels.class); + + private static MockRM rm; + private YarnConfiguration conf; + + private String userName; + private String notUserName; + + private Injector injector = Guice.createInjector(new ServletModule() { + @Override + protected void configureServlets() { + bind(JAXBContextResolver.class); + bind(RMWebServices.class); + bind(GenericExceptionHandler.class); + try { + userName = UserGroupInformation.getCurrentUser().getShortUserName(); + } catch (IOException ioe) { + throw new RuntimeException("Unable to get current user name " + + ioe.getMessage(), ioe); + } + notUserName = userName + "abc123"; + conf = new YarnConfiguration(); + conf.set(YarnConfiguration.YARN_ADMIN_ACL, userName); + rm = new MockRM(conf); + bind(ResourceManager.class).toInstance(rm); + bind(RMContext.class).toInstance(rm.getRMContext()); + filter("/*").through( + TestRMWebServicesAppsModification.TestRMCustomAuthFilter.class); + serve("/*").with(GuiceContainer.class); + } + }); + + public class GuiceServletConfig extends GuiceServletContextListener { + + @Override + protected Injector getInjector() { + return injector; + } + } + + public TestRMWebServicesNodeLabels() { + super(new WebAppDescriptor.Builder( + "org.apache.hadoop.yarn.server.resourcemanager.webapp") + .contextListenerClass(GuiceServletConfig.class) + .filterClass(com.google.inject.servlet.GuiceFilter.class) + .contextPath("jersey-guice-filter").servletPath("/").build()); + } + + @Test + public void testNodeLabels() throws JSONException, Exception { + WebResource r = resource(); + + ClientResponse response; + JSONObject json; + JSONArray jarr; + String responseString; + + // Add a label + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("add-node-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity("{\"label\":\"a\"}", MediaType.APPLICATION_JSON) + .post(ClientResponse.class); + + // Verify it is present + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("all-node-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + assertEquals("a", json.getString("label")); + + // Add another + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("add-node-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity("{\"label\":\"b\"}", MediaType.APPLICATION_JSON) + .post(ClientResponse.class); + + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("all-node-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + + // Verify + jarr = json.getJSONArray("label"); + assertEquals(2, jarr.length()); + + // Remove one + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("remove-node-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity("{\"label\":\"a\"}", MediaType.APPLICATION_JSON) + .post(ClientResponse.class); + + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("all-node-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + // Verify + assertEquals("b", json.getString("label")); + + // Add a node->label mapping + NodesToLabelsInfo nsli = new NodesToLabelsInfo(); + NodeToLabelsInfo nli = new NodeToLabelsInfo("node1"); + nli.getLabels().add("b"); + nsli.add(nli); + + response = + r.path("ws") + .path("v1") + .path("cluster") + .path("labels") + .path("set-node-to-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity(toJson(nsli, NodesToLabelsInfo.class), + MediaType.APPLICATION_JSON).post(ClientResponse.class); + + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("nodes-to-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + responseString = response.getEntity(String.class); + LOG.info(responseString); + // Verify + nsli = + (NodesToLabelsInfo) fromJson(responseString, NodesToLabelsInfo.class); + assertEquals(1, nsli.getNodeToLabelsInfos().size()); + nli = nsli.getNodeToLabelsInfos().get(0); + assertEquals("node1", nli.getNode()); + assertEquals(1, nli.getLabels().size()); + assertTrue(nli.getLabels().contains("b")); + + // Get with filter which should suppress results + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("nodes-to-labels").queryParam("labels", "a") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + responseString = response.getEntity(String.class); + LOG.info(responseString); + nsli = + (NodesToLabelsInfo) fromJson(responseString, NodesToLabelsInfo.class); + assertEquals(0, nsli.getNodeToLabelsInfos().size()); + + // Get with filter which should include results + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("nodes-to-labels").queryParam("labels", "b") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + responseString = response.getEntity(String.class); + LOG.info(responseString); + nsli = + (NodesToLabelsInfo) fromJson(responseString, NodesToLabelsInfo.class); + assertEquals(1, nsli.getNodeToLabelsInfos().size()); + + // "Remove" by setting with an empty label set + nli = nsli.getNodeToLabelsInfos().get(0); + nli.getLabels().remove("b"); + + response = + r.path("ws") + .path("v1") + .path("cluster") + .path("labels") + .path("set-node-to-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity(toJson(nsli, NodesToLabelsInfo.class), + MediaType.APPLICATION_JSON).post(ClientResponse.class); + + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("nodes-to-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + responseString = response.getEntity(String.class); + LOG.info(responseString); + nsli = + (NodesToLabelsInfo) fromJson(responseString, NodesToLabelsInfo.class); + assertEquals(1, nsli.getNodeToLabelsInfos().size()); + nli = nsli.getNodeToLabelsInfos().get(0); + assertTrue(nli.getLabels().isEmpty()); + + } + + @Test + public void testNodeLabelsAuthFail() throws JSONException, Exception { + + WebResource r = resource(); + + ClientResponse response; + JSONObject json; + String responseString; + + // Add a label + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("add-node-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity("{\"label\":\"a\"}", MediaType.APPLICATION_JSON) + .post(ClientResponse.class); + + // Verify it is present + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("all-node-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + assertEquals("a", json.getString("label")); + + // Fail adding another + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("add-node-labels").queryParam("user.name", notUserName) + .accept(MediaType.APPLICATION_JSON) + .entity("{\"label\":\"b\"}", MediaType.APPLICATION_JSON) + .post(ClientResponse.class); + + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("all-node-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + + // Verify + assertEquals("a", json.getString("label")); + + // Faile to remove one + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("remove-node-labels").queryParam("user.name", notUserName) + .accept(MediaType.APPLICATION_JSON) + .entity("{\"label\":\"a\"}", MediaType.APPLICATION_JSON) + .post(ClientResponse.class); + + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("all-node-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + // Verify + assertEquals("a", json.getString("label")); + + // Add a node->label mapping + NodesToLabelsInfo nsli = new NodesToLabelsInfo(); + NodeToLabelsInfo nli = new NodeToLabelsInfo("node1"); + nli.getLabels().add("a"); + nsli.add(nli); + + response = + r.path("ws") + .path("v1") + .path("cluster") + .path("labels") + .path("set-node-to-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity(toJson(nsli, NodesToLabelsInfo.class), + MediaType.APPLICATION_JSON).post(ClientResponse.class); + + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("nodes-to-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + responseString = response.getEntity(String.class); + LOG.info(responseString); + // Verify + nsli = + (NodesToLabelsInfo) fromJson(responseString, NodesToLabelsInfo.class); + assertEquals(1, nsli.getNodeToLabelsInfos().size()); + nli = nsli.getNodeToLabelsInfos().get(0); + assertEquals("node1", nli.getNode()); + assertEquals(1, nli.getLabels().size()); + assertTrue(nli.getLabels().contains("a")); + + // Fail "Remove" by setting with an empty label set + nli = nsli.getNodeToLabelsInfos().get(0); + nli.getLabels().remove("a"); + + response = + r.path("ws") + .path("v1") + .path("cluster") + .path("labels") + .path("set-node-to-labels") + .queryParam("user.name", notUserName) + .accept(MediaType.APPLICATION_JSON) + .entity(toJson(nsli, NodesToLabelsInfo.class), + MediaType.APPLICATION_JSON).post(ClientResponse.class); + + response = + r.path("ws").path("v1").path("cluster").path("labels") + .path("nodes-to-labels").queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + responseString = response.getEntity(String.class); + LOG.info(responseString); + nsli = + (NodesToLabelsInfo) fromJson(responseString, NodesToLabelsInfo.class); + assertEquals(1, nsli.getNodeToLabelsInfos().size()); + nli = nsli.getNodeToLabelsInfos().get(0); + assertFalse(nli.getLabels().isEmpty()); + + } + + @SuppressWarnings("rawtypes") + private String toJson(Object nsli, Class klass) throws Exception { + StringWriter sw = new StringWriter(); + JSONJAXBContext ctx = new JSONJAXBContext(klass); + JSONMarshaller jm = ctx.createJSONMarshaller(); + jm.marshallToJSON(nsli, sw); + return sw.toString(); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + private Object fromJson(String json, Class klass) throws Exception { + StringReader sr = new StringReader(json); + JSONJAXBContext ctx = new JSONJAXBContext(klass); + JSONUnmarshaller jm = ctx.createJSONUnmarshaller(); + return jm.unmarshalFromJSON(sr, klass); + } + +}