diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java index 029fa87..24002c5 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -159,6 +160,10 @@ public String getNodeManagerVersion() { return null; } + @Override + public Set getLabels() { + return null; + } } public static RMNode newNodeInfo(String rackName, String hostName, diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java index 7eca66f..8cda734 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java @@ -33,6 +33,7 @@ import java.util.Collections; import java.util.List; +import java.util.Set; @Private @Unstable @@ -147,4 +148,8 @@ public String getNodeManagerVersion() { return node.getNodeManagerVersion(); } + @Override + public Set getLabels() { + return null; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java index 1ee04f0..3009ddf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java @@ -62,7 +62,8 @@ public static ApplicationSubmissionContext newInstance( Priority priority, ContainerLaunchContext amContainer, boolean isUnmanagedAM, boolean cancelTokensWhenComplete, int maxAppAttempts, Resource resource, String applicationType, - boolean keepContainers) { + boolean keepContainers, String appLabelExpression, + String amContainerLabelExpression) { ApplicationSubmissionContext context = Records.newRecord(ApplicationSubmissionContext.class); context.setApplicationId(applicationId); @@ -88,7 +89,7 @@ public static ApplicationSubmissionContext newInstance( int maxAppAttempts, Resource resource, String applicationType) { return newInstance(applicationId, applicationName, queue, priority, amContainer, isUnmanagedAM, cancelTokensWhenComplete, maxAppAttempts, - resource, applicationType, false); + resource, applicationType, false, "", ""); } @Public @@ -338,4 +339,47 @@ public abstract void setKeepContainersAcrossApplicationAttempts( @Public @Stable public abstract void setApplicationTags(Set tags); + + /** + * Get label expression for this app + * + * @return label expression for this app + */ + @Public + @Stable + public abstract String getAppLabelExpression(); + + /** + * Set label expression for the APP + * + * By default, APP label expression is empty. This field can be overwrite by + * resource request label expression and AM container label expression + * + * e.g. + * TODO: change to real name + * - APP label expression = "red && blue" + * - Resource Request label expression = "", it will be set "red && blue" + * - Resource Request label expression = "blue && yellow", + * it will be "blue && yellow" + * As same as AM container label expression + */ + @Public + @Stable + public abstract void setAppLabelExpression(String labelExpression); + + /** + * Get label expression for AM container + * + * @return label expression for AM container + */ + @Public + @Stable + public abstract String getAMContainerLabelExpression(); + + /** + * Set label expression for AM container + */ + @Public + @Stable + public abstract void setAMContainerLabelExpression(String labelExpression); } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeToLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeToLabels.java new file mode 100644 index 0000000..7f13821 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeToLabels.java @@ -0,0 +1,36 @@ +package org.apache.hadoop.yarn.api.records; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class NodeToLabels { + @Public + @Evolving + public static NodeToLabels newInstance(String node, List labels) { + NodeToLabels record = Records.newRecord(NodeToLabels.class); + record.setLabels(labels); + record.setNode(node); + return record; + } + + @Public + @Evolving + public abstract void setNode(String node); + + @Public + @Evolving + public abstract String getNode(); + + @Public + @Evolving + public abstract void setLabels(List labels); + + @Public + @Evolving + public abstract List getLabels(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java index 7146db2..a18b0c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.api.records; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; @@ -48,13 +49,23 @@ @Public @Stable public abstract class QueueInfo { - + @Private @Unstable public static QueueInfo newInstance(String queueName, float capacity, float maximumCapacity, float currentCapacity, List childQueues, List applications, QueueState queueState) { + return newInstance(queueName, capacity, maximumCapacity, currentCapacity, + childQueues, applications, queueState, null, null); + } + + @Private + @Unstable + public static QueueInfo newInstance(String queueName, float capacity, + float maximumCapacity, float currentCapacity, + List childQueues, List applications, + QueueState queueState, Set labels, String defaultLabelExpression) { QueueInfo queueInfo = Records.newRecord(QueueInfo.class); queueInfo.setQueueName(queueName); queueInfo.setCapacity(capacity); @@ -149,4 +160,28 @@ public static QueueInfo newInstance(String queueName, float capacity, @Private @Unstable public abstract void setQueueState(QueueState queueState); + + /** + * Get the labels of the queue. + * @return labels of the queue + */ + @Public + @Stable + public abstract Set getLabels(); + + @Private + @Unstable + public abstract void setLabels(Set labels); + + /** + * Get the default label expression of the queue + * @return default label expression of the queue + */ + @Public + @Stable + public abstract String getDefaultLabelExpression(); + + @Public + @Stable + public abstract void setDefaultLabelExpression(String defaultLabelExpression); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java index 86b55d1..51d8e5f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java @@ -70,12 +70,22 @@ public static ResourceRequest newInstance(Priority priority, String hostName, @Stable public static ResourceRequest newInstance(Priority priority, String hostName, Resource capability, int numContainers, boolean relaxLocality) { + return newInstance(priority, hostName, capability, numContainers, + relaxLocality, null); + } + + @Public + @Stable + public static ResourceRequest newInstance(Priority priority, String hostName, + Resource capability, int numContainers, boolean relaxLocality, + String labelExpression) { ResourceRequest request = Records.newRecord(ResourceRequest.class); request.setPriority(priority); request.setResourceName(hostName); request.setCapability(capability); request.setNumContainers(numContainers); request.setRelaxLocality(relaxLocality); + request.setLabelExpression(labelExpression); return request; } @@ -239,6 +249,30 @@ public static boolean isAnyLocation(String hostName) { @Stable public abstract void setRelaxLocality(boolean relaxLocality); + /** + * Get Label expression for this Resource Request + * + * @return label expression + */ + @Public + @Stable + public abstract String getLabelExpression(); + + /** + * Set label expression associated with this resource request. Now only + * support AND(&&), in the future will provide support for OR(||), NOT(!). + * + * Examples: + * - red && blue && green, ask for node contains red/blue/green at + * the same time + * - "" (empty) means ask for node doesn't have label on it + * + * @param labelExpression + */ + @Public + @Stable + public abstract void setLabelExpression(String labelExpression); + @Override public int hashCode() { final int prime = 2153; @@ -283,6 +317,20 @@ public boolean equals(Object obj) { return false; } else if (!priority.equals(other.getPriority())) return false; + if (getLabelExpression() == null) { + if (other.getLabelExpression() != null) { + return false; + } + } else { + // do normalize on label expression before compare + String label1 = getLabelExpression().replaceAll("[\\t ]", ""); + String label2 = + other.getLabelExpression() == null ? null : other + .getLabelExpression().replaceAll("[\\t ]", ""); + if (!label1.equals(label2)) { + return false; + } + } return true; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 034ec4f..ab1c52e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1299,6 +1299,18 @@ public static final String YARN_HTTP_POLICY_KEY = YARN_PREFIX + "http.policy"; public static final String YARN_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY .name(); + + /** URI for NodeLabelManager */ + public static final String FS_NODE_LABEL_STORE_URI = RM_PREFIX + + "fs.node-label-store.uri"; + public static final String FS_NODE_LABEL_STORE_RETRY_POLICY_SPEC = RM_PREFIX + + "fs.node-label-store.retry-policy-spec"; + public static final String DEFAULT_FS_NODE_LABEL_STORE_RETRY_POLICY_SPEC = + "2000, 500"; + + /** Class of node label manager */ + public static final String RM_NODE_LABEL_MANAGER_CLS = RM_PREFIX + + "node-label-manager.class"; public YarnConfiguration() { super(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java index 4b777ea..0f6468c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java @@ -30,6 +30,14 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ClearAllLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ClearAllLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; @@ -42,6 +50,10 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; @@ -110,4 +122,40 @@ public RefreshServiceAclsResponse refreshServiceAcls( public UpdateNodeResourceResponse updateNodeResource( UpdateNodeResourceRequest request) throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public AddLabelsResponse addLabels(AddLabelsRequest request) + throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public RemoveLabelsResponse removeLabels( + RemoveLabelsRequest request) throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public SetNodeToLabelsResponse setNodeToLabels( + SetNodeToLabelsRequest request) throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public GetNodeToLabelsResponse getNodeToLabels( + GetNodeToLabelsRequest request) throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public GetLabelsResponse getLabels( + GetLabelsRequest request) throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public ClearAllLabelsResponse clearAllLabels( + ClearAllLabelsRequest request) throws YarnException, IOException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddLabelsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddLabelsRequest.java new file mode 100644 index 0000000..dadf1b8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddLabelsRequest.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class AddLabelsRequest { + public static AddLabelsRequest newInstance(Set labels) { + AddLabelsRequest request = + Records.newRecord(AddLabelsRequest.class); + request.setLabels(labels); + return request; + } + + @Public + @Evolving + public abstract void setLabels(Set labels); + + @Public + @Evolving + public abstract Set getLabels(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddLabelsResponse.java new file mode 100644 index 0000000..4d50b4f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddLabelsResponse.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class AddLabelsResponse { + public static AddLabelsResponse newInstance() { + return Records.newRecord(AddLabelsResponse.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ClearAllLabelsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ClearAllLabelsRequest.java new file mode 100644 index 0000000..4489c5b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ClearAllLabelsRequest.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class ClearAllLabelsRequest { + public static ClearAllLabelsRequest newInstance() { + return Records.newRecord(ClearAllLabelsRequest.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ClearAllLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ClearAllLabelsResponse.java new file mode 100644 index 0000000..9fb4b27 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ClearAllLabelsResponse.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class ClearAllLabelsResponse { + public static ClearAllLabelsResponse newInstance() { + return Records.newRecord(ClearAllLabelsResponse.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetLabelsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetLabelsRequest.java new file mode 100644 index 0000000..5e95c2a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetLabelsRequest.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class GetLabelsRequest { + public static GetLabelsRequest newInstance() { + return Records.newRecord(GetLabelsRequest.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetLabelsResponse.java new file mode 100644 index 0000000..6b23c8c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetLabelsResponse.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class GetLabelsResponse { + public static GetLabelsResponse newInstance(Set labels) { + GetLabelsResponse request = + Records.newRecord(GetLabelsResponse.class); + request.setLabels(labels); + return request; + } + + @Public + @Evolving + public abstract void setLabels(Set labels); + + @Public + @Evolving + public abstract Set getLabels(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsRequest.java new file mode 100644 index 0000000..56d6587 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsRequest.java @@ -0,0 +1,27 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.yarn.util.Records; + +public abstract class GetNodeToLabelsRequest { + public static GetNodeToLabelsRequest newInstance() { + return Records.newRecord(GetNodeToLabelsRequest.class); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsResponse.java new file mode 100644 index 0000000..f3a6d07 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/GetNodeToLabelsResponse.java @@ -0,0 +1,44 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +public abstract class GetNodeToLabelsResponse { + public static GetNodeToLabelsResponse newInstance( + Map> map) { + GetNodeToLabelsResponse response = + Records.newRecord(GetNodeToLabelsResponse.class); + response.setNodeToLabels(map); + return response; + } + + @Public + @Evolving + public abstract void setNodeToLabels(Map> map); + + @Public + @Evolving + public abstract Map> getNodeToLabels(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveLabelsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveLabelsRequest.java new file mode 100644 index 0000000..35e8d1b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveLabelsRequest.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class RemoveLabelsRequest { + public static RemoveLabelsRequest newInstance(Set partitions) { + RemoveLabelsRequest request = + Records.newRecord(RemoveLabelsRequest.class); + request.setLabels(partitions); + return request; + } + + @Public + @Evolving + public abstract void setLabels(Set partitions); + + @Public + @Evolving + public abstract Set getLabels(); +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveLabelsResponse.java new file mode 100644 index 0000000..efd7d9b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveLabelsResponse.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class RemoveLabelsResponse { + public static RemoveLabelsResponse newInstance() { + return Records.newRecord(RemoveLabelsResponse.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/SetNodeToLabelsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/SetNodeToLabelsRequest.java new file mode 100644 index 0000000..490a2e9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/SetNodeToLabelsRequest.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class SetNodeToLabelsRequest { + public static SetNodeToLabelsRequest newInstance( + Map> map) { + SetNodeToLabelsRequest request = + Records.newRecord(SetNodeToLabelsRequest.class); + request.setNodeToLabels(map); + return request; + } + + @Public + @Evolving + public abstract void setNodeToLabels(Map> map); + + @Public + @Evolving + public abstract Map> getNodeToLabels(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/SetNodeToLabelsResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/SetNodeToLabelsResponse.java new file mode 100644 index 0000000..c3f016d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/SetNodeToLabelsResponse.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.yarn.util.Records; + +@Public +@Evolving +public abstract class SetNodeToLabelsResponse { + public static SetNodeToLabelsResponse newInstance() { + return Records.newRecord(SetNodeToLabelsResponse.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto index 47a6cf7..2d9fabb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto @@ -39,4 +39,10 @@ service ResourceManagerAdministrationProtocolService { rpc refreshServiceAcls(RefreshServiceAclsRequestProto) returns (RefreshServiceAclsResponseProto); rpc getGroupsForUser(GetGroupsForUserRequestProto) returns (GetGroupsForUserResponseProto); rpc updateNodeResource (UpdateNodeResourceRequestProto) returns (UpdateNodeResourceResponseProto); + rpc addLabels(AddLabelsRequestProto) returns (AddLabelsResponseProto); + rpc removeLabels(RemoveLabelsRequestProto) returns (RemoveLabelsResponseProto); + rpc setNodeToLabels(SetNodeToLabelsRequestProto) returns (SetNodeToLabelsResponseProto); + rpc getNodeToLabels(GetNodeToLabelsRequestProto) returns (GetNodeToLabelsResponseProto); + rpc getLabels(GetLabelsRequestProto) returns (GetLabelsResponseProto); + rpc clearAllLabels(ClearAllLabelsRequestProto) returns (ClearAllLabelsResponseProto); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto index 4637f03..fa2add6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto @@ -75,6 +75,52 @@ message UpdateNodeResourceRequestProto { message UpdateNodeResourceResponseProto { } +message AddLabelsRequestProto { + repeated string labels = 1; +} + +message AddLabelsResponseProto { +} + +message RemoveLabelsRequestProto { + repeated string labels = 1; +} + +message RemoveLabelsResponseProto { +} + +message NodeToLabelsProto { + optional string node = 1; + repeated string labels = 2; +} + +message SetNodeToLabelsRequestProto { + repeated NodeToLabelsProto nodeToLabels = 1; +} + +message SetNodeToLabelsResponseProto { + +} + +message GetNodeToLabelsRequestProto { +} + +message GetNodeToLabelsResponseProto { + repeated NodeToLabelsProto nodeToLabels = 1; +} + +message GetLabelsRequestProto { +} + +message GetLabelsResponseProto { + repeated string labels = 1; +} + +message ClearAllLabelsRequestProto { +} + +message ClearAllLabelsResponseProto { +} ////////////////////////////////////////////////////////////////// ///////////// RM Failover related records //////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 3f1fa6c..281d575 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -238,6 +238,7 @@ message ResourceRequestProto { optional ResourceProto capability = 3; optional int32 num_containers = 4; optional bool relax_locality = 5 [default = true]; + optional string label_expression = 6; } enum AMCommandProto { @@ -288,6 +289,8 @@ message ApplicationSubmissionContextProto { optional string applicationType = 10 [default = "YARN"]; optional bool keep_containers_across_application_attempts = 11 [default = false]; repeated string applicationTags = 12; + optional string app_label_expression = 13; + optional string am_container_label_expression = 14; } enum ApplicationAccessTypeProto { @@ -317,6 +320,8 @@ message QueueInfoProto { optional QueueStateProto state = 5; repeated QueueInfoProto childQueues = 6; repeated ApplicationReportProto applications = 7; + repeated string labels = 8; + optional string defaultLabelExpression = 9; } enum QueueACLProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index a86b521..24df4fa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -75,7 +75,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.util.ConverterUtils; -import org.apache.hadoop.yarn.util.Records; /** * Client for Distributed Shell application submission to YARN. @@ -114,6 +113,9 @@ private static final Log LOG = LogFactory.getLog(Client.class); + // && is a special character in shell, we need escape it + public final static String AMP = "?amp"; + // Configuration private Configuration conf; private YarnClient yarnClient; @@ -150,6 +152,7 @@ private int containerVirtualCores = 1; // No. of containers in which the shell script needs to be executed private int numContainers = 1; + private String labelExpression = null; // log4j.properties file // if available, add to local resources and set into classpath @@ -250,7 +253,7 @@ public Client(Configuration conf) throws Exception { " the new application attempt "); opts.addOption("debug", false, "Dump out debug information"); opts.addOption("help", false, "Print usage"); - + opts.addOption("label_expression", true, "Set label expression will be used by this application"); } /** @@ -361,6 +364,7 @@ public boolean init(String[] args) throws ParseException { containerMemory = Integer.parseInt(cliParser.getOptionValue("container_memory", "10")); containerVirtualCores = Integer.parseInt(cliParser.getOptionValue("container_vcores", "1")); numContainers = Integer.parseInt(cliParser.getOptionValue("num_containers", "1")); + if (containerMemory < 0 || containerVirtualCores < 0 || numContainers < 1) { throw new IllegalArgumentException("Invalid no. of containers or container memory/vcores specified," @@ -369,6 +373,8 @@ public boolean init(String[] args) throws ParseException { + ", containerVirtualCores=" + containerVirtualCores + ", numContainer=" + numContainers); } + + labelExpression = cliParser.getOptionValue("label_expression", null); clientTimeout = Integer.parseInt(cliParser.getOptionValue("timeout", "600000")); @@ -559,6 +565,9 @@ public boolean run() throws IOException, YarnException { vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); vargs.add("--num_containers " + String.valueOf(numContainers)); + if (null != labelExpression) { + appContext.setAppLabelExpression(labelExpression); + } vargs.add("--priority " + String.valueOf(shellCmdPriority)); for (Map.Entry entry : shellEnv.entrySet()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java index f41c018..db86a3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java @@ -105,6 +105,7 @@ protected AMRMClient(String name) { final List racks; final Priority priority; final boolean relaxLocality; + final String labels; /** * Instantiates a {@link ContainerRequest} with the given constraints and @@ -124,9 +125,9 @@ protected AMRMClient(String name) { */ public ContainerRequest(Resource capability, String[] nodes, String[] racks, Priority priority) { - this(capability, nodes, racks, priority, true); + this(capability, nodes, racks, priority, true, null); } - + /** * Instantiates a {@link ContainerRequest} with the given constraints. * @@ -147,6 +148,32 @@ public ContainerRequest(Resource capability, String[] nodes, */ public ContainerRequest(Resource capability, String[] nodes, String[] racks, Priority priority, boolean relaxLocality) { + this(capability, nodes, racks, priority, relaxLocality, null); + } + + /** + * Instantiates a {@link ContainerRequest} with the given constraints. + * + * @param capability + * The {@link Resource} to be requested for each container. + * @param nodes + * Any hosts to request that the containers are placed on. + * @param racks + * Any racks to request that the containers are placed on. The + * racks corresponding to any hosts requested will be automatically + * added to this list. + * @param priority + * The priority at which to request the containers. Higher + * priorities have lower numerical values. + * @param relaxLocality + * If true, containers for this request may be assigned on hosts + * and racks other than the ones explicitly requested. + * @param labels + * Set node labels to allocate resource + */ + public ContainerRequest(Resource capability, String[] nodes, + String[] racks, Priority priority, boolean relaxLocality, + String labels) { // Validate request Preconditions.checkArgument(capability != null, "The Resource to be requested for each container " + @@ -163,6 +190,7 @@ public ContainerRequest(Resource capability, String[] nodes, this.racks = (racks != null ? ImmutableList.copyOf(racks) : null); this.priority = priority; this.relaxLocality = relaxLocality; + this.labels = labels; } public Resource getCapability() { @@ -185,6 +213,10 @@ public boolean getRelaxLocality() { return relaxLocality; } + public String getLabelExpression() { + return labels; + } + public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Capability[").append(capability).append("]"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index e36d7ad..d31f14e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -251,7 +251,7 @@ public AllocateResponse allocate(float progressIndicator) // RPC layer is using it to send info across askList.add(ResourceRequest.newInstance(r.getPriority(), r.getResourceName(), r.getCapability(), r.getNumContainers(), - r.getRelaxLocality())); + r.getRelaxLocality(), r.getLabelExpression())); } releaseList = new ArrayList(release); // optimistically clear this collection assuming no RPC failure @@ -436,25 +436,25 @@ public synchronized void addContainerRequest(T req) { } for (String node : dedupedNodes) { addResourceRequest(req.getPriority(), node, req.getCapability(), req, - true); + true, req.getLabelExpression()); } } for (String rack : dedupedRacks) { addResourceRequest(req.getPriority(), rack, req.getCapability(), req, - true); + true, req.getLabelExpression()); } // Ensure node requests are accompanied by requests for // corresponding rack for (String rack : inferredRacks) { addResourceRequest(req.getPriority(), rack, req.getCapability(), req, - req.getRelaxLocality()); + req.getRelaxLocality(), req.getLabelExpression()); } // Off-switch addResourceRequest(req.getPriority(), ResourceRequest.ANY, - req.getCapability(), req, req.getRelaxLocality()); + req.getCapability(), req, req.getRelaxLocality(), req.getLabelExpression()); } @Override @@ -608,8 +608,10 @@ private void addResourceRequestToAsk(ResourceRequest remoteRequest) { ask.add(remoteRequest); } - private void addResourceRequest(Priority priority, String resourceName, - Resource capability, T req, boolean relaxLocality) { + private void + addResourceRequest(Priority priority, String resourceName, + Resource capability, T req, boolean relaxLocality, + String labelExpression) { Map> remoteRequests = this.remoteRequestsTable.get(priority); if (remoteRequests == null) { @@ -642,6 +644,8 @@ private void addResourceRequest(Priority priority, String resourceName, if (relaxLocality) { resourceRequestInfo.containerRequests.add(req); } + + resourceRequestInfo.remoteRequest.setLabelExpression(labelExpression); // Note this down for next interaction with ResourceManager addResourceRequestToAsk(resourceRequestInfo.remoteRequest); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java index 50e5825..e2267ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java @@ -21,9 +21,13 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; -import com.google.common.collect.ImmutableMap; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; @@ -42,12 +46,22 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ClearAllLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsRequest; + +import com.google.common.collect.ImmutableMap; @Private @Unstable @@ -78,7 +92,20 @@ .put("-help", new UsageInfo("[cmd]", "Displays help for the given command or all commands if none " + "is specified.")) - .build(); + .put("-addLabels", + new UsageInfo("[labels splitted by ',']", "Add labels")) + .put("-removeLabels", + new UsageInfo("[labels splitted by ',']", "Remove labels")) + .put("-setNodeToLabels", + new UsageInfo("[node1:label1,label2,label3;node2:label2,label3]", + "set node to labels")) + .put("-getNodeToLabels", new UsageInfo("", + "Get node to label mappings")) + .put("-getLabels", new UsageInfo("", "Get labels in the cluster")) + .put("-clearAllLabels", + new UsageInfo("", + "DANGEROUS: Clear all labels and related mappings in the cluster")) + .build(); public RMAdminCLI() { super(); @@ -285,6 +312,87 @@ private int getGroups(String[] usernames) throws IOException { return 0; } + private int addLabels(String args) throws IOException, YarnException { + ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); + Set labels = new HashSet(); + for (String p : args.split(",")) { + labels.add(p); + } + AddLabelsRequest request = AddLabelsRequest.newInstance(labels); + adminProtocol.addLabels(request); + return 0; + } + + private int removeLabels(String args) throws IOException, YarnException { + ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); + Set labels = new HashSet(); + for (String p : args.split(",")) { + labels.add(p); + } + RemoveLabelsRequest request = + RemoveLabelsRequest.newInstance(labels); + adminProtocol.removeLabels(request); + return 0; + } + + private int getNodeToLabels() throws IOException, YarnException { + ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); + GetNodeToLabelsResponse response = adminProtocol + .getNodeToLabels(GetNodeToLabelsRequest.newInstance()); + for (Entry> entry : response.getNodeToLabels() + .entrySet()) { + System.out.println(String.format("Host=%s, Labels=[%s]", entry.getKey(), + StringUtils.join(entry.getValue().iterator(), ","))); + } + return 0; + } + + private int getLabels() throws IOException, YarnException { + ResourceManagerAdministrationProtocol adminProto = createAdminProtocol(); + GetLabelsResponse response = adminProto.getLabels(GetLabelsRequest + .newInstance()); + System.out.println(String.format("Labels=%s", + StringUtils.join(response.getLabels().iterator(), ","))); + return 0; + } + + private int clearAllLabels() throws IOException, YarnException { + ResourceManagerAdministrationProtocol adminProto = createAdminProtocol(); + adminProto.clearAllLabels(ClearAllLabelsRequest.newInstance()); + return 0; + } + + private int setNodeToLabels(String args) throws IOException, + YarnException { + ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); + Map> map = new HashMap>(); + + for (String nodeToLabels : args.split(";")) { + String[] split = nodeToLabels.split(":"); + if (split.length != 2) { + throw new IOException( + "Format is incorrect, should be node:label1,label2..."); + } + String node = split[0]; + String labels = split[1]; + + if (node.trim().isEmpty()) { + throw new IOException("node name cannot be empty"); + } + + map.put(node, new HashSet()); + for (String label : labels.split(",")) { + if (!label.trim().isEmpty()) { + map.get(node).add(label.trim().toLowerCase()); + } + } + } + SetNodeToLabelsRequest request = + SetNodeToLabelsRequest.newInstance(map); + adminProtocol.setNodeToLabels(request); + return 0; + } + @Override public int run(String[] args) throws Exception { YarnConfiguration yarnConf = @@ -351,6 +459,18 @@ public int run(String[] args) throws Exception { } else if ("-getGroups".equals(cmd)) { String[] usernames = Arrays.copyOfRange(args, i, args.length); exitCode = getGroups(usernames); + } else if ("-addLabels".equals(cmd)) { + exitCode = addLabels(args[i]); + } else if ("-removeLabels".equals(cmd)) { + exitCode = removeLabels(args[i]); + } else if ("-setNodeToLabels".equals(cmd)) { + exitCode = setNodeToLabels(args[i]); + } else if ("-getNodeToLabels".equals(cmd)) { + exitCode = getNodeToLabels(); + } else if ("-getLabels".equals(cmd)) { + exitCode = getLabels(); + } else if ("-clearAllLabels".equals(cmd)) { + exitCode = clearAllLabels(); } else { exitCode = -1; System.err.println(cmd.substring(1) + ": Unknown command"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java index 38dbf79..fc37ade 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java @@ -664,6 +664,28 @@ public void testAMRMClient() throws YarnException, IOException { } } } + + @Test (timeout=30000) + public void testAskWithLabels() { + AMRMClientImpl client = + new AMRMClientImpl(); + + // add x, y to ANY + client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, + 1), null, null, Priority.UNDEFINED, true, "x && y")); + Assert.assertEquals(1, client.ask.size()); + Assert.assertEquals("x && y", client.ask.iterator().next() + .getLabelExpression()); + + // add x, y and a, b to ANY, only a, b should be kept + client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, + 1), null, null, Priority.UNDEFINED, true, "x && y")); + client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, + 1), null, null, Priority.UNDEFINED, true, "a && b")); + Assert.assertEquals(1, client.ask.size()); + Assert.assertEquals("a && b", client.ask.iterator().next() + .getLabelExpression()); + } private void testAllocation(final AMRMClientImpl amClient) throws YarnException, IOException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java index c2f3268..d3aaa61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.api.records.impl.pb; import com.google.common.base.CharMatcher; + import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -402,4 +403,42 @@ private ResourcePBImpl convertFromProtoFormat(ResourceProto p) { private ResourceProto convertToProtoFormat(Resource t) { return ((ResourcePBImpl)t).getProto(); } + + @Override + public String getAppLabelExpression() { + ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasAppLabelExpression()) { + return null; + } + return p.getAppLabelExpression(); + } + + @Override + public void setAppLabelExpression(String labelExpression) { + maybeInitBuilder(); + if (labelExpression == null) { + builder.clearAppLabelExpression(); + return; + } + builder.setAppLabelExpression(labelExpression); + } + + @Override + public String getAMContainerLabelExpression() { + ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasAmContainerLabelExpression()) { + return null; + } + return p.getAmContainerLabelExpression(); + } + + @Override + public void setAMContainerLabelExpression(String labelExpression) { + maybeInitBuilder(); + if (labelExpression == null) { + builder.clearAmContainerLabelExpression(); + return; + } + builder.setAmContainerLabelExpression(labelExpression); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeToLabelsPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeToLabelsPBImpl.java new file mode 100644 index 0000000..ba36379 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeToLabelsPBImpl.java @@ -0,0 +1,96 @@ +package org.apache.hadoop.yarn.api.records.impl.pb; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.yarn.api.records.NodeToLabels; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeToLabelsProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeToLabelsProtoOrBuilder; + +public class NodeToLabelsPBImpl extends NodeToLabels { + List labels; + NodeToLabelsProto proto = NodeToLabelsProto + .getDefaultInstance(); + NodeToLabelsProto.Builder builder = null; + boolean viaProto = false; + + public NodeToLabelsPBImpl() { + this.builder = NodeToLabelsProto.newBuilder(); + } + + public NodeToLabelsPBImpl(NodeToLabelsProto proto) { + this.proto = proto; + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = NodeToLabelsProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.labels != null && !this.labels.isEmpty()) { + builder.addAllLabels(this.labels); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public NodeToLabelsProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void initLabels() { + if (this.labels != null) { + return; + } + NodeToLabelsProtoOrBuilder p = viaProto ? proto : builder; + this.labels = new ArrayList(); + this.labels.addAll(p.getLabelsList()); + } + + @Override + public void setLabels(List labels) { + maybeInitBuilder(); + if (labels == null || labels.isEmpty()) { + builder.clearLabels(); + } + this.labels = labels; + } + + @Override + public List getLabels() { + initLabels(); + return this.labels; + } + + @Override + public void setNode(String node) { + maybeInitBuilder(); + if (node == null) { + builder.clearNode(); + return; + } + builder.setNode(node); + } + + @Override + public String getNode() { + NodeToLabelsProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasNode()) { + return null; + } + return (p.getNode()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java index 56a5b58..137220f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java @@ -19,8 +19,10 @@ package org.apache.hadoop.yarn.api.records.impl.pb; import java.util.ArrayList; +import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -44,6 +46,7 @@ List applicationsList; List childQueuesList; + Set labels; public QueueInfoPBImpl() { builder = QueueInfoProto.newBuilder(); @@ -281,6 +284,9 @@ private void mergeLocalToBuilder() { if (this.applicationsList != null) { addApplicationsToProto(); } + if (this.labels != null) { + builder.addAllLabels(this.labels); + } } private void mergeLocalToProto() { @@ -322,5 +328,45 @@ private QueueState convertFromProtoFormat(QueueStateProto q) { private QueueStateProto convertToProtoFormat(QueueState queueState) { return ProtoUtils.convertToProtoFormat(queueState); } + + @Override + public void setLabels(Set labels) { + maybeInitBuilder(); + if (labels == null || labels.isEmpty()) { + builder.clearLabels(); + } + this.labels = labels; + } + + private void initLabels() { + if (this.labels != null) { + return; + } + QueueInfoProtoOrBuilder p = viaProto ? proto : builder; + this.labels = new HashSet(); + this.labels.addAll(p.getLabelsList()); + } + @Override + public Set getLabels() { + initLabels(); + return this.labels; + } + + @Override + public String getDefaultLabelExpression() { + QueueInfoProtoOrBuilder p = viaProto ? proto : builder; + return (p.hasDefaultLabelExpression()) ? p.getDefaultLabelExpression() + : null; + } + + @Override + public void setDefaultLabelExpression(String defaultLabelExpression) { + maybeInitBuilder(); + if (defaultLabelExpression == null) { + builder.clearDefaultLabelExpression(); + return; + } + builder.setDefaultLabelExpression(defaultLabelExpression); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java index 22863ac..ca052c2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java @@ -186,4 +186,23 @@ public String toString() { + ", Location: " + getResourceName() + ", Relax Locality: " + getRelaxLocality() + "}"; } + + @Override + public String getLabelExpression() { + ResourceRequestProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasLabelExpression()) { + return null; + } + return (p.getLabelExpression()); + } + + @Override + public void setLabelExpression(String labelExpression) { + maybeInitBuilder(); + if (labelExpression == null) { + builder.clearLabelExpression(); + return; + } + builder.setLabelExpression(labelExpression); + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java index ccffaed..1e348fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java @@ -29,17 +29,31 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ClearAllLabelsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.SetNodeToLabelsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ClearAllLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ClearAllLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; @@ -52,8 +66,20 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ClearAllLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ClearAllLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl; @@ -66,6 +92,10 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SetNodeToLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SetNodeToLabelsResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl; @@ -205,5 +235,88 @@ public UpdateNodeResourceResponse updateNodeResource( return null; } } + + @Override + public AddLabelsResponse addLabels(AddLabelsRequest request) + throws YarnException, IOException { + AddLabelsRequestProto requestProto = + ((AddLabelsRequestPBImpl) request).getProto(); + try { + return new AddLabelsResponsePBImpl(proxy.addLabels(null, + requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public RemoveLabelsResponse removeLabels( + RemoveLabelsRequest request) throws YarnException, IOException { + RemoveLabelsRequestProto requestProto = + ((RemoveLabelsRequestPBImpl) request).getProto(); + try { + return new RemoveLabelsResponsePBImpl(proxy.removeLabels(null, + requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public SetNodeToLabelsResponse setNodeToLabels( + SetNodeToLabelsRequest request) throws YarnException, IOException { + SetNodeToLabelsRequestProto requestProto = + ((SetNodeToLabelsRequestPBImpl) request).getProto(); + try { + return new SetNodeToLabelsResponsePBImpl(proxy.setNodeToLabels( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetNodeToLabelsResponse getNodeToLabels(GetNodeToLabelsRequest request) + throws YarnException, IOException { + GetNodeToLabelsRequestProto requestProto = + ((GetNodeToLabelsRequestPBImpl) request).getProto(); + try { + return new GetNodeToLabelsResponsePBImpl(proxy.getNodeToLabels( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetLabelsResponse getLabels(GetLabelsRequest request) + throws YarnException, IOException { + GetLabelsRequestProto requestProto = + ((GetLabelsRequestPBImpl) request).getProto(); + try { + return new GetLabelsResponsePBImpl(proxy.getLabels( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + @Override + public ClearAllLabelsResponse clearAllLabels(ClearAllLabelsRequest request) + throws YarnException, IOException { + ClearAllLabelsRequestProto requestProto = + ((ClearAllLabelsRequestPBImpl) request).getProto(); + try { + return new ClearAllLabelsResponsePBImpl(proxy.clearAllLabels( + null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java index d1f71fe..c6fd8f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java @@ -22,8 +22,16 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ClearAllLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ClearAllLabelsResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; @@ -36,17 +44,35 @@ import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.SetNodeToLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.SetNodeToLabelsResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ClearAllLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ClearAllLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ClearAllLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl; @@ -59,6 +85,10 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshSuperUserGroupsConfigurationResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshUserToGroupsMappingsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SetNodeToLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SetNodeToLabelsResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl; @@ -204,4 +234,94 @@ public UpdateNodeResourceResponseProto updateNodeResource(RpcController controll } } + @Override + public AddLabelsResponseProto addLabels(RpcController controller, + AddLabelsRequestProto proto) throws ServiceException { + AddLabelsRequestPBImpl request = new AddLabelsRequestPBImpl(proto); + try { + AddLabelsResponse response = real.addLabels(request); + return ((AddLabelsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public RemoveLabelsResponseProto removeLabels( + RpcController controller, RemoveLabelsRequestProto proto) + throws ServiceException { + RemoveLabelsRequestPBImpl request = + new RemoveLabelsRequestPBImpl(proto); + try { + RemoveLabelsResponse response = real.removeLabels(request); + return ((RemoveLabelsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public SetNodeToLabelsResponseProto setNodeToLabels( + RpcController controller, SetNodeToLabelsRequestProto proto) + throws ServiceException { + SetNodeToLabelsRequestPBImpl request = + new SetNodeToLabelsRequestPBImpl(proto); + try { + SetNodeToLabelsResponse response = real.setNodeToLabels(request); + return ((SetNodeToLabelsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetNodeToLabelsResponseProto getNodeToLabels(RpcController controller, + GetNodeToLabelsRequestProto proto) throws ServiceException { + GetNodeToLabelsRequestPBImpl request = + new GetNodeToLabelsRequestPBImpl(proto); + try { + GetNodeToLabelsResponse response = real.getNodeToLabels(request); + return ((GetNodeToLabelsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public GetLabelsResponseProto getLabels(RpcController controller, + GetLabelsRequestProto proto) throws ServiceException { + GetLabelsRequestPBImpl request = + new GetLabelsRequestPBImpl(proto); + try { + GetLabelsResponse response = real.getLabels(request); + return ((GetLabelsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public ClearAllLabelsResponseProto clearAllLabels(RpcController controller, + ClearAllLabelsRequestProto proto) throws ServiceException { + ClearAllLabelsRequestPBImpl request = + new ClearAllLabelsRequestPBImpl(proto); + try { + ClearAllLabelsResponse response = real.clearAllLabels(request); + return ((ClearAllLabelsResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddLabelsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddLabelsRequestPBImpl.java new file mode 100644 index 0000000..26dbe7a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddLabelsRequestPBImpl.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddLabelsRequestProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsRequest; + +public class AddLabelsRequestPBImpl extends AddLabelsRequest { + Set labels; + AddLabelsRequestProto proto = AddLabelsRequestProto + .getDefaultInstance(); + AddLabelsRequestProto.Builder builder = null; + boolean viaProto = false; + + public AddLabelsRequestPBImpl() { + this.builder = AddLabelsRequestProto.newBuilder(); + } + + public AddLabelsRequestPBImpl(AddLabelsRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = AddLabelsRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.labels != null && !this.labels.isEmpty()) { + builder.addAllLabels(this.labels); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public AddLabelsRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void initLabels() { + if (this.labels != null) { + return; + } + AddLabelsRequestProtoOrBuilder p = viaProto ? proto : builder; + this.labels = new HashSet(); + this.labels.addAll(p.getLabelsList()); + } + + @Override + public void setLabels(Set labels) { + maybeInitBuilder(); + if (labels == null || labels.isEmpty()) { + builder.clearLabels(); + } + this.labels = labels; + } + + @Override + public Set getLabels() { + initLabels(); + return this.labels; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddLabelsResponsePBImpl.java new file mode 100644 index 0000000..74aa930 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/AddLabelsResponsePBImpl.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddLabelsResponseProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsResponse; + +import com.google.protobuf.TextFormat; + +public class AddLabelsResponsePBImpl extends AddLabelsResponse { + + AddLabelsResponseProto proto = AddLabelsResponseProto + .getDefaultInstance(); + AddLabelsResponseProto.Builder builder = null; + boolean viaProto = false; + + public AddLabelsResponsePBImpl() { + builder = AddLabelsResponseProto.newBuilder(); + } + + public AddLabelsResponsePBImpl(AddLabelsResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public AddLabelsResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ClearAllLabelsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ClearAllLabelsRequestPBImpl.java new file mode 100644 index 0000000..82956d7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ClearAllLabelsRequestPBImpl.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ClearAllLabelsRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.ClearAllLabelsRequest; + +import com.google.protobuf.TextFormat; + +public class ClearAllLabelsRequestPBImpl extends ClearAllLabelsRequest { + + ClearAllLabelsRequestProto proto = ClearAllLabelsRequestProto + .getDefaultInstance(); + ClearAllLabelsRequestProto.Builder builder = null; + boolean viaProto = false; + + public ClearAllLabelsRequestPBImpl() { + builder = ClearAllLabelsRequestProto.newBuilder(); + } + + public ClearAllLabelsRequestPBImpl(ClearAllLabelsRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public ClearAllLabelsRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ClearAllLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ClearAllLabelsResponsePBImpl.java new file mode 100644 index 0000000..2277710 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ClearAllLabelsResponsePBImpl.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ClearAllLabelsResponseProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.ClearAllLabelsResponse; + +import com.google.protobuf.TextFormat; + +public class ClearAllLabelsResponsePBImpl extends ClearAllLabelsResponse { + + ClearAllLabelsResponseProto proto = ClearAllLabelsResponseProto + .getDefaultInstance(); + ClearAllLabelsResponseProto.Builder builder = null; + boolean viaProto = false; + + public ClearAllLabelsResponsePBImpl() { + builder = ClearAllLabelsResponseProto.newBuilder(); + } + + public ClearAllLabelsResponsePBImpl(ClearAllLabelsResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public ClearAllLabelsResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetLabelsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetLabelsRequestPBImpl.java new file mode 100644 index 0000000..975875c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetLabelsRequestPBImpl.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetLabelsRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetLabelsRequest; + +import com.google.protobuf.TextFormat; + +public class GetLabelsRequestPBImpl extends GetLabelsRequest { + + GetLabelsRequestProto proto = GetLabelsRequestProto + .getDefaultInstance(); + GetLabelsRequestProto.Builder builder = null; + boolean viaProto = false; + + public GetLabelsRequestPBImpl() { + builder = GetLabelsRequestProto.newBuilder(); + } + + public GetLabelsRequestPBImpl(GetLabelsRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetLabelsRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetLabelsResponsePBImpl.java new file mode 100644 index 0000000..a67e636 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetLabelsResponsePBImpl.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetLabelsResponseProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetLabelsResponse; + +public class GetLabelsResponsePBImpl extends GetLabelsResponse { + Set labels; + GetLabelsResponseProto proto = GetLabelsResponseProto + .getDefaultInstance(); + GetLabelsResponseProto.Builder builder = null; + boolean viaProto = false; + + public GetLabelsResponsePBImpl() { + this.builder = GetLabelsResponseProto.newBuilder(); + } + + public GetLabelsResponsePBImpl(GetLabelsResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetLabelsResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.labels != null && !this.labels.isEmpty()) { + builder.addAllLabels(this.labels); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public GetLabelsResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void initLabels() { + if (this.labels != null) { + return; + } + GetLabelsResponseProtoOrBuilder p = viaProto ? proto : builder; + this.labels = new HashSet(); + this.labels.addAll(p.getLabelsList()); + } + + @Override + public void setLabels(Set labels) { + maybeInitBuilder(); + if (labels == null || labels.isEmpty()) { + builder.clearLabels(); + } + this.labels = labels; + } + + @Override + public Set getLabels() { + initLabels(); + return this.labels; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsRequestPBImpl.java new file mode 100644 index 0000000..057f41b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsRequestPBImpl.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsRequest; + +import com.google.protobuf.TextFormat; + +public class GetNodeToLabelsRequestPBImpl extends GetNodeToLabelsRequest { + + GetNodeToLabelsRequestProto proto = GetNodeToLabelsRequestProto + .getDefaultInstance(); + GetNodeToLabelsRequestProto.Builder builder = null; + boolean viaProto = false; + + public GetNodeToLabelsRequestPBImpl() { + builder = GetNodeToLabelsRequestProto.newBuilder(); + } + + public GetNodeToLabelsRequestPBImpl(GetNodeToLabelsRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetNodeToLabelsRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsResponsePBImpl.java new file mode 100644 index 0000000..9cdf037 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/GetNodeToLabelsResponsePBImpl.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetNodeToLabelsResponseProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeToLabelsProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsResponse; + +import com.google.common.collect.Sets; + +public class GetNodeToLabelsResponsePBImpl extends + GetNodeToLabelsResponse { + GetNodeToLabelsResponseProto proto = GetNodeToLabelsResponseProto + .getDefaultInstance(); + GetNodeToLabelsResponseProto.Builder builder = null; + boolean viaProto = false; + + private Map> nodeToLabels; + + public GetNodeToLabelsResponsePBImpl() { + this.builder = GetNodeToLabelsResponseProto.newBuilder(); + } + + public GetNodeToLabelsResponsePBImpl(GetNodeToLabelsResponseProto proto) { + this.proto = proto; + this.viaProto = true; + } + + private void initNodeToLabels() { + if (this.nodeToLabels != null) { + return; + } + GetNodeToLabelsResponseProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getNodeToLabelsList(); + this.nodeToLabels = new HashMap>(); + + for (NodeToLabelsProto c : list) { + this.nodeToLabels + .put(c.getNode(), Sets.newHashSet(c.getLabelsList())); + } + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetNodeToLabelsResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void addNodeToLabelsToProto() { + maybeInitBuilder(); + builder.clearNodeToLabels(); + if (nodeToLabels == null) { + return; + } + Iterable iterable = new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + + Iterator>> iter = nodeToLabels.entrySet() + .iterator(); + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + @Override + public NodeToLabelsProto next() { + Entry> now = iter.next(); + return NodeToLabelsProto.newBuilder().setNode(now.getKey()) + .addAllLabels(now.getValue()).build(); + } + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + }; + } + }; + builder.addAllNodeToLabels(iterable); + } + + private void mergeLocalToBuilder() { + if (this.nodeToLabels != null) { + addNodeToLabelsToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public GetNodeToLabelsResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public Map> getNodeToLabels() { + initNodeToLabels(); + return this.nodeToLabels; + } + + @Override + public void setNodeToLabels(Map> map) { + initNodeToLabels(); + nodeToLabels.clear(); + nodeToLabels.putAll(map); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveLabelsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveLabelsRequestPBImpl.java new file mode 100644 index 0000000..b86d2cb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveLabelsRequestPBImpl.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveLabelsRequestProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsRequest; + +public class RemoveLabelsRequestPBImpl extends RemoveLabelsRequest { + Set labels; + RemoveLabelsRequestProto proto = RemoveLabelsRequestProto + .getDefaultInstance(); + RemoveLabelsRequestProto.Builder builder = null; + boolean viaProto = false; + + public RemoveLabelsRequestPBImpl() { + this.builder = RemoveLabelsRequestProto.newBuilder(); + } + + public RemoveLabelsRequestPBImpl(RemoveLabelsRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = RemoveLabelsRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (this.labels != null && !this.labels.isEmpty()) { + builder.addAllLabels(this.labels); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public RemoveLabelsRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void initLabels() { + if (this.labels != null) { + return; + } + RemoveLabelsRequestProtoOrBuilder p = viaProto ? proto : builder; + this.labels = new HashSet(); + this.labels.addAll(p.getLabelsList()); + } + + @Override + public void setLabels(Set partitions) { + maybeInitBuilder(); + if (partitions == null || partitions.isEmpty()) { + builder.clearLabels(); + } + this.labels = partitions; + } + + @Override + public Set getLabels() { + initLabels(); + return this.labels; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveLabelsResponsePBImpl.java new file mode 100644 index 0000000..935d8f0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveLabelsResponsePBImpl.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveLabelsResponseProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsResponse; + +import com.google.protobuf.TextFormat; + +public class RemoveLabelsResponsePBImpl extends RemoveLabelsResponse { + + RemoveLabelsResponseProto proto = RemoveLabelsResponseProto + .getDefaultInstance(); + RemoveLabelsResponseProto.Builder builder = null; + boolean viaProto = false; + + public RemoveLabelsResponsePBImpl() { + builder = RemoveLabelsResponseProto.newBuilder(); + } + + public RemoveLabelsResponsePBImpl(RemoveLabelsResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public RemoveLabelsResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/SetNodeToLabelsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/SetNodeToLabelsRequestPBImpl.java new file mode 100644 index 0000000..50a8ccb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/SetNodeToLabelsRequestPBImpl.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodeToLabelsProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.SetNodeToLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.SetNodeToLabelsRequestProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsRequest; + +import com.google.common.collect.Sets; + +public class SetNodeToLabelsRequestPBImpl extends + SetNodeToLabelsRequest { + SetNodeToLabelsRequestProto proto = SetNodeToLabelsRequestProto + .getDefaultInstance(); + SetNodeToLabelsRequestProto.Builder builder = null; + boolean viaProto = false; + + private Map> nodeToLabels; + + public SetNodeToLabelsRequestPBImpl() { + this.builder = SetNodeToLabelsRequestProto.newBuilder(); + } + + public SetNodeToLabelsRequestPBImpl(SetNodeToLabelsRequestProto proto) { + this.proto = proto; + this.viaProto = true; + } + + private void initNodeToLabels() { + if (this.nodeToLabels != null) { + return; + } + SetNodeToLabelsRequestProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getNodeToLabelsList(); + this.nodeToLabels = new HashMap>(); + + for (NodeToLabelsProto c : list) { + this.nodeToLabels + .put(c.getNode(), Sets.newHashSet(c.getLabelsList())); + } + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = SetNodeToLabelsRequestProto.newBuilder(proto); + } + viaProto = false; + } + + private void addNodeToLabelsToProto() { + maybeInitBuilder(); + builder.clearNodeToLabels(); + if (nodeToLabels == null) { + return; + } + Iterable iterable = new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + + Iterator>> iter = nodeToLabels.entrySet() + .iterator(); + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + @Override + public NodeToLabelsProto next() { + Entry> now = iter.next(); + return NodeToLabelsProto.newBuilder().setNode(now.getKey()) + .addAllLabels(now.getValue()).build(); + } + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + }; + } + }; + builder.addAllNodeToLabels(iterable); + } + + private void mergeLocalToBuilder() { + if (this.nodeToLabels != null) { + addNodeToLabelsToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + public SetNodeToLabelsRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public Map> getNodeToLabels() { + initNodeToLabels(); + return this.nodeToLabels; + } + + @Override + public void setNodeToLabels(Map> map) { + initNodeToLabels(); + nodeToLabels.clear(); + nodeToLabels.putAll(map); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/SetNodeToLabelsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/SetNodeToLabelsResponsePBImpl.java new file mode 100644 index 0000000..053004d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/SetNodeToLabelsResponsePBImpl.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.SetNodeToLabelsResponseProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsResponse; + +import com.google.protobuf.TextFormat; + +public class SetNodeToLabelsResponsePBImpl extends SetNodeToLabelsResponse { + + SetNodeToLabelsResponseProto proto = SetNodeToLabelsResponseProto + .getDefaultInstance(); + SetNodeToLabelsResponseProto.Builder builder = null; + boolean viaProto = false; + + public SetNodeToLabelsResponsePBImpl() { + builder = SetNodeToLabelsResponseProto.newBuilder(); + } + + public SetNodeToLabelsResponsePBImpl(SetNodeToLabelsResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public SetNodeToLabelsResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java index d445996..0e05e73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateNodeResourceRequestPBImpl.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; -import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index ff0a249..43232d4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -57,6 +57,14 @@ import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.ClearAllLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.ClearAllLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.GetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; @@ -69,8 +77,14 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.GetNodeToLabelsResponsePBImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider; @@ -199,7 +213,8 @@ void resetLeaderElection() { } private UserGroupInformation checkAccess(String method) throws IOException { - return RMServerUtils.verifyAccess(adminAcl, method, LOG); + return RMServerUtils.verifyAccess(adminAcl, method, + AdminService.class.getName(), LOG); } private UserGroupInformation checkAcls(String method) throws YarnException { @@ -612,4 +627,111 @@ public AccessControlList getAccessControlList() { public Server getServer() { return this.server; } + + @Override + public AddLabelsResponse addLabels(AddLabelsRequest request) + throws YarnException, IOException { + String argName = "addLabels"; + UserGroupInformation user = checkAcls(argName); + + if (!isRMActive()) { + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", + "ResourceManager is not active. Can not add labels."); + throwStandbyException(); + } + + AddLabelsResponse response = + recordFactory.newRecordInstance(AddLabelsResponse.class); + try { + rmContext.getNodeLabelManager().addLabels(request.getLabels()); + RMAuditLogger + .logSuccess(user.getShortUserName(), argName, "AdminService"); + return response; + } catch (IOException ioe) { + LOG.info("Exception add labels", ioe); + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", "Exception add label"); + throw RPCUtil.getRemoteException(ioe); + } + } + + @Override + public RemoveLabelsResponse removeLabels( + RemoveLabelsRequest request) throws YarnException, IOException { + String argName = "removeLabels"; + UserGroupInformation user = checkAcls(argName); + + if (!isRMActive()) { + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", + "ResourceManager is not active. Can not remove labels."); + throwStandbyException(); + } + + RemoveLabelsResponse response = + recordFactory.newRecordInstance(RemoveLabelsResponse.class); + try { + rmContext.getNodeLabelManager().removeLabels(request.getLabels()); + RMAuditLogger + .logSuccess(user.getShortUserName(), argName, "AdminService"); + return response; + } catch (IOException ioe) { + LOG.info("Exception remove labels", ioe); + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", "Exception remove label"); + throw RPCUtil.getRemoteException(ioe); + } + } + + @Override + public SetNodeToLabelsResponse setNodeToLabels( + SetNodeToLabelsRequest request) throws YarnException, IOException { + String argName = "setNodeToLabels"; + UserGroupInformation user = checkAcls(argName); + + if (!isRMActive()) { + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", + "ResourceManager is not active. Can not set node to labels."); + throwStandbyException(); + } + + SetNodeToLabelsResponse response = + recordFactory.newRecordInstance(SetNodeToLabelsResponse.class); + try { + rmContext.getNodeLabelManager().setLabelsOnMultipleNodes( + request.getNodeToLabels()); + RMAuditLogger + .logSuccess(user.getShortUserName(), argName, "AdminService"); + return response; + } catch (IOException ioe) { + LOG.info("Exception set node to labels. ", ioe); + RMAuditLogger.logFailure(user.getShortUserName(), argName, + adminAcl.toString(), "AdminService", + "Exception set node to labels."); + throw RPCUtil.getRemoteException(ioe); + } + } + + @Override + public GetNodeToLabelsResponse getNodeToLabels(GetNodeToLabelsRequest request) + throws YarnException, IOException { + return GetNodeToLabelsResponsePBImpl.newInstance(rmContext + .getNodeLabelManager().getNodesToLabels()); + } + + @Override + public GetLabelsResponse getLabels(GetLabelsRequest request) + throws YarnException, IOException { + return GetLabelsResponsePBImpl.newInstance(rmContext.getNodeLabelManager() + .getLabels()); + } + + @Override + public ClearAllLabelsResponse clearAllLabels(ClearAllLabelsRequest request) + throws YarnException, IOException { + rmContext.getNodeLabelManager().clearAllLabels(); + return ClearAllLabelsResponse.newInstance(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java index d77180c..9de1dd9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java @@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.AMCommand; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NMToken; @@ -481,11 +482,22 @@ public AllocateResponse allocate(AllocateRequest request) List blacklistRemovals = (blacklistRequest != null) ? blacklistRequest.getBlacklistRemovals() : Collections.EMPTY_LIST; - + RMApp app = + this.rmContext.getRMApps().get(appAttemptId.getApplicationId()); + + // set label expression for Resource Requests + ApplicationSubmissionContext asc = app.getApplicationSubmissionContext(); + for (ResourceRequest req : ask) { + if (null == req.getLabelExpression()) { + req.setLabelExpression(asc.getAppLabelExpression()); + } + } + // sanity check try { RMServerUtils.validateResourceRequests(ask, - rScheduler.getMaximumResourceCapability()); + rScheduler.getMaximumResourceCapability(), app.getQueue(), + rScheduler); } catch (InvalidResourceRequestException e) { LOG.warn("Invalid resource ask by application " + appAttemptId, e); throw e; @@ -498,8 +510,6 @@ public AllocateResponse allocate(AllocateRequest request) throw e; } - RMApp app = - this.rmContext.getRMApps().get(appAttemptId.getApplicationId()); // In the case of work-preserving AM restart, it's possible for the // AM to release containers from the earlier attempt. if (!app.getApplicationSubmissionContext() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index 51024cf..faf6358 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -381,9 +381,17 @@ private void validateResourceRequest( ResourceRequest amReq = BuilderUtils.newResourceRequest( RMAppAttemptImpl.AM_CONTAINER_PRIORITY, ResourceRequest.ANY, submissionContext.getResource(), 1); + + // set label expression for AM container + if (null == submissionContext.getAMContainerLabelExpression()) { + amReq.setLabelExpression(submissionContext + .getAppLabelExpression()); + } + try { SchedulerUtils.validateResourceRequest(amReq, - scheduler.getMaximumResourceCapability()); + scheduler.getMaximumResourceCapability(), + submissionContext.getQueue(), scheduler); } catch (InvalidResourceRequestException e) { LOG.warn("RM app submission failed in validating AM resource request" + " for application " + submissionContext.getApplicationId(), e); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java index 01d5064..94ecaf6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java @@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.conf.ConfigurationProvider; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; @@ -102,5 +103,9 @@ void setRMApplicationHistoryWriter( boolean isWorkPreservingRecoveryEnabled(); + NodeLabelManager getNodeLabelManager(); + + public void setNodeLabelManager(NodeLabelManager mgr); + int getEpoch(); } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index f72ef30..5b50173 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; @@ -83,6 +84,7 @@ private RMApplicationHistoryWriter rmApplicationHistoryWriter; private ConfigurationProvider configurationProvider; private int epoch; + private NodeLabelManager nodeLabelManager; /** * Default constructor. To be used in conjunction with setter methods for @@ -366,7 +368,17 @@ public int getEpoch() { return this.epoch; } - void setEpoch(int epoch) { + void setEpoch(int epoch) { this.epoch = epoch; } + + @Override + public NodeLabelManager getNodeLabelManager() { + return nodeLabelManager; + } + + @Override + public void setNodeLabelManager(NodeLabelManager mgr) { + nodeLabelManager = mgr; + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java index 0629c70..7fc9f2d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMFatalEventType.java @@ -27,5 +27,8 @@ STATE_STORE_OP_FAILED, // Source <- Embedded Elector - EMBEDDED_ELECTOR_FAILED + EMBEDDED_ELECTOR_FAILED, + + // Source <- NodeLabelManager + NODE_LABEL_STORE_FAILED } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java index d93c45d..bc759cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java @@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.resource.Resources; @@ -84,9 +85,11 @@ * requested memory/vcore is non-negative and not greater than max */ public static void validateResourceRequests(List ask, - Resource maximumResource) throws InvalidResourceRequestException { + Resource maximumResource, String queueName, YarnScheduler scheduler) + throws InvalidResourceRequestException { for (ResourceRequest resReq : ask) { - SchedulerUtils.validateResourceRequest(resReq, maximumResource); + SchedulerUtils.validateResourceRequest(resReq, maximumResource, + queueName, scheduler); } } @@ -137,12 +140,13 @@ public static void validateBlacklistRequest( * passed {@link AccessControlList} * @param acl the {@link AccessControlList} to check against * @param method the method name to be logged + * @param module, like AdminService or NodeLabelManager * @param LOG the logger to use * @return {@link UserGroupInformation} of the current user * @throws IOException */ public static UserGroupInformation verifyAccess( - AccessControlList acl, String method, final Log LOG) + AccessControlList acl, String method, String module, final Log LOG) throws IOException { UserGroupInformation user; try { @@ -159,7 +163,7 @@ public static UserGroupInformation verifyAccess( " to call '" + method + "'"); RMAuditLogger.logFailure(user.getShortUserName(), method, - acl.toString(), "AdminService", + acl.toString(), module, RMAuditLogger.AuditConstants.UNAUTHORIZED_USER); throw new AccessControlException("User " + user.getShortUserName() + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index f315702..e518745 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -64,6 +64,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; +import org.apache.hadoop.yarn.server.resourcemanager.label.FileSystemNodeLabelManager; +import org.apache.hadoop.yarn.server.resourcemanager.label.MemoryNodeLabelManager; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManagerFactory; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingMonitor; import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore; @@ -293,6 +297,10 @@ protected AMLivelinessMonitor createAMLivelinessMonitor() { return new AMLivelinessMonitor(this.rmDispatcher); } + protected NodeLabelManager createNodeLabelManager() { + return NodeLabelManagerFactory.getNodeLabelManager(conf); + } + protected DelegationTokenRenewer createDelegationTokenRenewer() { return new DelegationTokenRenewer(); } @@ -368,6 +376,11 @@ protected void serviceInit(Configuration configuration) throws Exception { AMLivelinessMonitor amFinishingMonitor = createAMLivelinessMonitor(); addService(amFinishingMonitor); rmContext.setAMFinishingMonitor(amFinishingMonitor); + + NodeLabelManager nlm = createNodeLabelManager(); + nlm.setRMDispatcher(rmDispatcher); + addService(nlm); + rmContext.setNodeLabelManager(nlm); boolean isRecoveryEnabled = conf.getBoolean( YarnConfiguration.RECOVERY_ENABLED, @@ -913,7 +926,7 @@ protected void startWepApp() { * instance of {@link RMActiveServices} and initializes it. * @throws Exception */ - void createAndInitActiveServices() throws Exception { + protected void createAndInitActiveServices() throws Exception { activeServices = new RMActiveServices(); activeServices.init(conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/FileSystemNodeLabelManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/FileSystemNodeLabelManager.java new file mode 100644 index 0000000..58367c6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/FileSystemNodeLabelManager.java @@ -0,0 +1,283 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label; + +import java.io.EOFException; +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.SetNodeToLabelsRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.AddLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.SetNodeToLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.SetNodeToLabelsRequestPBImpl; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelConfiguration.LoadStrategy; + +import com.google.common.collect.Sets; + +public class FileSystemNodeLabelManager extends NodeLabelManager { + protected static final String ROOT_DIR_NAME = "FSNodeLabelManagerRoot"; + protected static final String MIRROR_FILENAME = "nodelabel.mirror"; + protected static final String EDITLOG_FILENAME = "nodelabel.editlog"; + + Path fsWorkingPath; + Path rootDirPath; + FileSystem fs; + FSDataOutputStream editlogOs; + Path editLogPath; + boolean localFS = false; + + @Override + protected void serviceInit(Configuration conf) throws Exception { + fsWorkingPath = + new Path(conf.get(YarnConfiguration.FS_NODE_LABEL_STORE_URI, ".")); + rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); + + setFileSystem(conf); + + // mkdir of root dir path + fs.mkdirs(rootDirPath); + + super.serviceInit(conf); + } + + @Override + protected void serviceStop() throws Exception { + try { + fs.close(); + editlogOs.close(); + } catch (Exception e) { + LOG.warn("Exception happened whiling shutting down,", e); + } + + super.serviceStop(); + } + + private void setFileSystem(Configuration conf) throws IOException { + Configuration confCopy = new Configuration(conf); + confCopy.setBoolean("dfs.client.retry.policy.enabled", true); + String retryPolicy = + confCopy.get(YarnConfiguration.FS_NODE_LABEL_STORE_RETRY_POLICY_SPEC, + YarnConfiguration.DEFAULT_FS_NODE_LABEL_STORE_RETRY_POLICY_SPEC); + confCopy.set("dfs.client.retry.policy.spec", retryPolicy); + fs = fsWorkingPath.getFileSystem(confCopy); + + // if it's local file system, use RawLocalFileSystem instead of + // LocalFileSystem, the latter one doesn't support append. + if (fs.getScheme().equals("file")) { + fs = ((LocalFileSystem)fs).getRaw(); + localFS = true; + } + } + + private void ensureAppendEditlogFile() throws IOException { + if (localFS) { + editlogOs = fs.append(editLogPath); + } + } + + private void ensureFlushEditlogFile() throws IOException { + if (localFS) { + // Hadoop's local FS cannot make sure hsync/flush will sync file on disk, + // so just close it + editlogOs.close(); + } else { + editlogOs.hsync(); + } + } + + @Override + protected void persistNodeToLabelsChanges( + Map> nodeToLabels) throws IOException { + ensureAppendEditlogFile(); + editlogOs.writeInt(SerializedLogType.NODE_TO_LABELS.ordinal()); + ((SetNodeToLabelsRequestPBImpl) SetNodeToLabelsRequest + .newInstance(nodeToLabels)).getProto().writeDelimitedTo(editlogOs); + ensureFlushEditlogFile(); + } + + @Override + protected void persistAddingLabels(Set labels) + throws IOException { + ensureAppendEditlogFile(); + editlogOs.writeInt(SerializedLogType.ADD_LABELS.ordinal()); + ((AddLabelsRequestPBImpl) AddLabelsRequest.newInstance(labels)).getProto() + .writeDelimitedTo(editlogOs); + ensureFlushEditlogFile(); + } + + @Override + protected void persistRemovingLabels(Collection labels) + throws IOException { + ensureAppendEditlogFile(); + editlogOs.writeInt(SerializedLogType.REMOVE_LABELS.ordinal()); + ((RemoveLabelsRequestPBImpl) RemoveLabelsRequest.newInstance(Sets + .newHashSet(labels.iterator()))).getProto().writeDelimitedTo(editlogOs); + ensureFlushEditlogFile(); + } + + @Override + public void recover(Map> defaultNodeToLabels, + Set defaultLabels, LoadStrategy loadStrategy) + throws IOException { + boolean anythingRecovered = false; + /* + * Steps of recover + * 1) Read from last mirror (from mirror or mirror.old) + * 2) Read from last edit log, and apply such edit log + * 3) Write new mirror to mirror.writing + * 4) Rename mirror to mirror.old + * 5) Move mirror.writing to mirror + * 6) Remove mirror.old + * 7) Remove edit log and create a new empty edit log + */ + + // Open mirror from serialized file + Path mirrorPath = new Path(rootDirPath, MIRROR_FILENAME); + Path oldMirrorPath = new Path(rootDirPath, MIRROR_FILENAME + ".old"); + + FSDataInputStream is = null; + if (fs.exists(mirrorPath)) { + is = fs.open(mirrorPath); + } else if (fs.exists(oldMirrorPath)) { + is = fs.open(oldMirrorPath); + } + + if (null != is) { + anythingRecovered = true; + Set labels = + new AddLabelsRequestPBImpl( + AddLabelsRequestProto.parseDelimitedFrom(is)).getLabels(); + Map> nodeToLabels = + new SetNodeToLabelsRequestPBImpl( + SetNodeToLabelsRequestProto.parseDelimitedFrom(is)) + .getNodeToLabels(); + addLabels(labels); + setLabelsOnMultipleNodes(nodeToLabels); + is.close(); + } + + // Open and process editlog + editLogPath = new Path(rootDirPath, EDITLOG_FILENAME); + if (fs.exists(editLogPath)) { + anythingRecovered = true; + is = fs.open(editLogPath); + + while (true) { + try { + // read edit log one by one + SerializedLogType type = SerializedLogType.values()[is.readInt()]; + + switch (type) { + case ADD_LABELS: { + Collection partitions = + AddLabelsRequestProto.parseDelimitedFrom(is) + .getLabelsList(); + addLabels(Sets.newHashSet(partitions.iterator())); + break; + } + case REMOVE_LABELS: { + Collection partitions = + RemoveLabelsRequestProto.parseDelimitedFrom(is) + .getLabelsList(); + removeLabels(partitions); + break; + } + case NODE_TO_LABELS: { + Map> map = + new SetNodeToLabelsRequestPBImpl( + SetNodeToLabelsRequestProto.parseDelimitedFrom(is)) + .getNodeToLabels(); + setLabelsOnMultipleNodes(map); + break; + } + } + } catch (EOFException e) { + // EOF hit, break + break; + } + } + } + + // if we recovered nothing, and default node to labels is not null, we will + // add default node->labels to repository + switch (loadStrategy) { + case INITIAL: + if (!anythingRecovered) { + addLabels(defaultLabels); + setLabelsOnMultipleNodes(defaultNodeToLabels); + } + break; + case REPLACE: + removeLabels(getLabels()); + addLabels(defaultLabels); + setLabelsOnMultipleNodes(defaultNodeToLabels); + break; + case MERGE: + addLabels(defaultLabels); + setLabelsOnMultipleNodes(defaultNodeToLabels); + break; + case CLEAR: + clearAllLabels(); + break; + } + + // Serialize current mirror to mirror.writing + Path writingMirrorPath = new Path(rootDirPath, MIRROR_FILENAME + ".writing"); + FSDataOutputStream os = fs.create(writingMirrorPath, true); + ((AddLabelsRequestPBImpl) AddLabelsRequestPBImpl + .newInstance(super.existingLabels)).getProto().writeDelimitedTo(os); + ((SetNodeToLabelsRequestPBImpl) SetNodeToLabelsRequest + .newInstance(super.nodeToLabels)).getProto().writeDelimitedTo(os); + os.close(); + + // Move mirror to mirror.old + if (fs.exists(mirrorPath)) { + fs.delete(oldMirrorPath, false); + fs.rename(mirrorPath, oldMirrorPath); + } + + // move mirror.writing to mirror + fs.rename(writingMirrorPath, mirrorPath); + fs.delete(writingMirrorPath, false); + + // remove mirror.old + fs.delete(oldMirrorPath, false); + + // create a new editlog file + editlogOs = fs.create(editLogPath, true); + editlogOs.close(); + + LOG.info("Finished write mirror at:" + mirrorPath.toString()); + LOG.info("Finished create editlog file at:" + editLogPath.toString()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/MemoryNodeLabelManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/MemoryNodeLabelManager.java new file mode 100644 index 0000000..8602f5d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/MemoryNodeLabelManager.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelConfiguration.LoadStrategy; + +public class MemoryNodeLabelManager extends NodeLabelManager { + @Override + public void recover(Map> defaultNodeToLabels, + Set defaultLabels, LoadStrategy loadStrategy) + throws IOException { + if (loadStrategy != LoadStrategy.CLEAR) { + addLabels(defaultLabels); + setLabelsOnMultipleNodes(defaultNodeToLabels); + } + } + + @Override + protected void persistNodeToLabelsChanges( + Map> nodeToPartitions) throws IOException { + // do nothing + } + + @Override + protected void persistAddingLabels(Set partition) throws IOException { + // do nothing + } + + @Override + protected void persistRemovingLabels(Collection partitions) + throws IOException { + // do nothing + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelConfiguration.java new file mode 100644 index 0000000..85da5b0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelConfiguration.java @@ -0,0 +1,82 @@ +package org.apache.hadoop.yarn.server.resourcemanager.label; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; + +public class NodeLabelConfiguration extends Configuration { + public static String CONF_FILENAME = "node-label.xml"; + public static String PREFIX = "yarn.node-label."; + + public static String LABELS_KEY = PREFIX + "labels"; + public static String NODES_KEY = PREFIX + "nodes"; + + public static String NODE_LABELS_SUFFIX = ".labels"; + public static String LOAD_STRATEGY_KEY = PREFIX + "load-strategy"; + + public static enum LoadStrategy { + INITIAL, REPLACE, MERGE, CLEAR + } + + public NodeLabelConfiguration() { + this(new Configuration()); + } + + public NodeLabelConfiguration(Configuration conf) { + super(conf); + addResource(CONF_FILENAME); + } + + public Set getLabels() { + Set labelsSet = new HashSet(); + String[] labels = getStrings(LABELS_KEY); + if (null != labels) { + for (String l : labels) { + if (l.trim().isEmpty()) { + continue; + } + labelsSet.add(l); + } + } + return labelsSet; + } + + public Map> getNodeToLabels() { + Map> nodeToLabels = new HashMap>(); + + String[] nodes = getStrings(NODES_KEY); + if (null != nodes) { + for (String n : nodes) { + if (n.trim().isEmpty()) { + continue; + } + String[] labels = getStrings(NODES_KEY + "." + n + NODE_LABELS_SUFFIX); + nodeToLabels.put(n, new HashSet()); + + if (labels != null) { + for (String l : labels) { + if (l.trim().isEmpty()) { + continue; + } + nodeToLabels.get(n).add(l); + } + } + } + } + + return nodeToLabels; + } + + public LoadStrategy getLoadStrategy() { + String strategy = get(LOAD_STRATEGY_KEY); + + if (null == strategy) { + return LoadStrategy.INITIAL; + } + + return LoadStrategy.valueOf(strategy.toUpperCase()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelManager.java new file mode 100644 index 0000000..a6a257f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelManager.java @@ -0,0 +1,1136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEvent; +import org.apache.hadoop.yarn.server.resourcemanager.RMFatalEventType; +import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelConfiguration.LoadStrategy; +import org.apache.hadoop.yarn.server.resourcemanager.label.event.AddLabelsEvent; +import org.apache.hadoop.yarn.server.resourcemanager.label.event.NodeLabelManagerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.label.event.NodeLabelManagerEventType; +import org.apache.hadoop.yarn.server.resourcemanager.label.event.RemoveLabelsEvent; +import org.apache.hadoop.yarn.server.resourcemanager.label.event.StoreNodeToLabelsEvent; +import org.apache.hadoop.yarn.state.InvalidStateTransitonException; +import org.apache.hadoop.yarn.state.SingleArcTransition; +import org.apache.hadoop.yarn.state.StateMachine; +import org.apache.hadoop.yarn.state.StateMachineFactory; +import org.apache.hadoop.yarn.util.resource.Resources; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; + +public abstract class NodeLabelManager extends AbstractService { + protected static final Log LOG = LogFactory.getLog(NodeLabelManager.class); + private static final int MAX_LABEL_LENGTH = 255; + public static final Set EMPTY_STRING_SET = Collections + .unmodifiableSet(new HashSet(0)); + public static final String ANY = "*"; + public static final Set ACCESS_ANY_LABEL_SET = ImmutableSet.of(ANY); + + /** + * If a user doesn't specify label of a queue or node, it belongs + * DEFAULT_LABEL + */ + public static final String NO_LABEL = ""; + + private enum NodeLabelManagerState { + DEFAULT + }; + + protected enum SerializedLogType { + ADD_LABELS, NODE_TO_LABELS, REMOVE_LABELS + } + + private static final StateMachineFactory stateMachineFactory = + new StateMachineFactory( + NodeLabelManagerState.DEFAULT) + .addTransition(NodeLabelManagerState.DEFAULT, + NodeLabelManagerState.DEFAULT, + NodeLabelManagerEventType.STORE_NODE_TO_LABELS, + new StoreNodeToLabelsTransition()) + .addTransition(NodeLabelManagerState.DEFAULT, + NodeLabelManagerState.DEFAULT, + NodeLabelManagerEventType.ADD_LABELS, new AddLabelsTransition()) + .addTransition(NodeLabelManagerState.DEFAULT, + NodeLabelManagerState.DEFAULT, + NodeLabelManagerEventType.REMOVE_LABELS, + new RemoveLabelsTransition()); + + private final StateMachine stateMachine; + + private static class StoreNodeToLabelsTransition implements + SingleArcTransition { + @Override + public void transition(NodeLabelManager store, NodeLabelManagerEvent event) { + if (!(event instanceof StoreNodeToLabelsEvent)) { + // should never happen + LOG.error("Illegal event type: " + event.getClass()); + return; + } + StoreNodeToLabelsEvent e = (StoreNodeToLabelsEvent) event; + try { + store.persistNodeToLabelsChanges(e.getNodeToLabels()); + } catch (IOException ioe) { + LOG.error("Error removing store node to label:" + ioe.getMessage()); + store.notifyStoreOperationFailed(ioe); + } + }; + } + + private static class AddLabelsTransition implements + SingleArcTransition { + @Override + public void transition(NodeLabelManager store, NodeLabelManagerEvent event) { + if (!(event instanceof AddLabelsEvent)) { + // should never happen + LOG.error("Illegal event type: " + event.getClass()); + return; + } + AddLabelsEvent e = (AddLabelsEvent) event; + try { + store.persistAddingLabels(e.getLabels()); + } catch (IOException ioe) { + LOG.error("Error storing new label:" + ioe.getMessage()); + store.notifyStoreOperationFailed(ioe); + } + }; + } + + private static class RemoveLabelsTransition implements + SingleArcTransition { + @Override + public void transition(NodeLabelManager store, NodeLabelManagerEvent event) { + if (!(event instanceof RemoveLabelsEvent)) { + // should never happen + LOG.error("Illegal event type: " + event.getClass()); + return; + } + RemoveLabelsEvent e = (RemoveLabelsEvent) event; + try { + store.persistRemovingLabels(e.getLabels()); + } catch (IOException ioe) { + LOG.error("Error removing label on filesystem:" + ioe.getMessage()); + store.notifyStoreOperationFailed(ioe); + } + }; + } + + protected Dispatcher dispatcher; + + // existing labels in the cluster + protected Set existingLabels = new ConcurrentSkipListSet(); + + // node to labels and label to nodes + protected Map> nodeToLabels = + new ConcurrentHashMap>(); + private Map> labelToNodes = + new ConcurrentHashMap>(); + + // running node and label to running nodes + private ConcurrentMap> labelToActiveNodes = + new ConcurrentHashMap>(); + private Set runningNodes = new ConcurrentSkipListSet(); + + // recording label to queues and queue to Resource + private ConcurrentMap> queueToLabels = + new ConcurrentHashMap>(); + private ConcurrentMap queueToResource = + new ConcurrentHashMap(); + + // node name -> map + // This is used to calculate how much resource in each node, use a nested map + // because it is possible multiple NMs launch in a node + private Map> nodeToResource = + new ConcurrentHashMap>(); + private Map labelToResource = + new ConcurrentHashMap(); + + private Dispatcher rmDispatcher; + private final ReadLock readLock; + private final WriteLock writeLock; + private AccessControlList adminAcl; + + public NodeLabelManager() { + super(NodeLabelManager.class.getName()); + stateMachine = stateMachineFactory.make(this); + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + readLock = lock.readLock(); + writeLock = lock.writeLock(); + } + + public void setRMDispatcher(Dispatcher dispatcher) { + this.rmDispatcher = dispatcher; + } + + // for UT purpose + protected void initDispatcher(Configuration conf) { + // create async handler + dispatcher = new AsyncDispatcher(); + AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher; + asyncDispatcher.init(conf); + asyncDispatcher.setDrainEventsOnStop(); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + adminAcl = new AccessControlList(conf.get( + YarnConfiguration.YARN_ADMIN_ACL, + YarnConfiguration.DEFAULT_YARN_ADMIN_ACL)); + + NodeLabelConfiguration nodeLabelConf = new NodeLabelConfiguration(conf); + Map> defaultNodeToLabels = nodeLabelConf + .getNodeToLabels(); + Set defaultLabels = nodeLabelConf.getLabels(); + LoadStrategy loadStrategy = nodeLabelConf.getLoadStrategy(); + + // recover from previous state + recover(defaultNodeToLabels, defaultLabels, loadStrategy); + } + + private UserGroupInformation checkAccess(String method) throws IOException { + return RMServerUtils.verifyAccess(adminAcl, method, + NodeLabelManager.class.getName(), LOG); + } + + Map> getDefaultNodeToLabels(NodeLabelConfiguration conf) + throws IOException { + return conf.getNodeToLabels(); + } + + protected void addDefaultNodeToLabels( + Map> defaultNodeToLabels) throws IOException { + Set labels = new HashSet(); + for (Set t : defaultNodeToLabels.values()) { + labels.addAll(t); + } + addLabels(labels); + + setLabelsOnMultipleNodes(defaultNodeToLabels); + } + + // for UT purpose + protected void startDispatcher() { + // start dispatcher + AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher; + asyncDispatcher.start(); + } + + @Override + protected void serviceStart() throws Exception { + // init dispatcher only when service start, because recover will happen in + // service init, we don't want to trigger any event handling at that time. + initDispatcher(getConfig()); + + dispatcher.register(NodeLabelManagerEventType.class, + new ForwardingEventHandler()); + + startDispatcher(); + } + + /** + * Store node -> label to filesystem + */ + protected abstract void persistNodeToLabelsChanges( + Map> nodeToLabels) throws IOException; + + /** + * Store new label to filesystem + */ + protected abstract void persistAddingLabels(Set label) + throws IOException; + + /* + * Remove label from filesystem + */ + protected abstract void persistRemovingLabels(Collection labels) + throws IOException; + + /** + * Recover node label from file system, + * + * @param defaultNodeToLabels + * default node to labels map if nothing recovered + * @param defaultLabels + * default labels + * @param loadStrategy + * loadStrategy + * @throws IOException + */ + public abstract void recover(Map> defaultNodeToLabels, + Set defaultLabels, LoadStrategy loadStrategy) throws IOException; + + private boolean isAlphabetic(char c) { + if (('a' <= c && 'z' >= c) || ('A' <= c && 'Z' >= c)) { + return true; + } + return false; + } + + protected void checkLabelName(String label) throws IOException { + if (label == null || label.isEmpty() || label.length() > MAX_LABEL_LENGTH) { + throw new IOException("label added is empty or exceeds " + + MAX_LABEL_LENGTH + " charactors"); + } + + boolean valid = true; + for (int i = 0; i < label.length(); i++) { + if (!(isAlphabetic(label.charAt(i)) || Character.isDigit(label.charAt(i)) + || label.charAt(i) == '-' || label.charAt(i) == '_')) { + valid = false; + break; + } + } + + if (!valid) { + throw new IOException("label contains charactor besides digits," + + " alphabetics, \"-\" and \"_\", label=" + label); + } + + if (label.startsWith("-") || label.startsWith("_")) { + throw new IOException("Label shouldn't start with \"-\" or \"_\", label=" + + label); + } + } + + protected String normalizeLabel(String label) { + if (label != null) { + return label.toLowerCase(); + } + return NO_LABEL; + } + + protected Set normalizeLabels(Set labels) { + Set newLabels = new HashSet(); + for (String label : labels) { + newLabels.add(normalizeLabel(label)); + } + return newLabels; + } + + /** + * Add a label to repository + * + * @param label + * label label + */ + public void addLabel(String label) throws IOException { + checkLabelName(label); + addLabels(ImmutableSet.of(label)); + } + + /** + * Add multiple labels to repository + * + * @param existingLabels + * label labels + */ + @SuppressWarnings("unchecked") + public void addLabels(Set labels) throws IOException { + checkAccess("addLabels"); + + if (null == labels || labels.isEmpty()) { + return; + } + + try { + writeLock.lock(); + Set normalizedLabels = new HashSet(); + for (String label : labels) { + checkLabelName(label); + String normalizedLabel = normalizeLabel(label); + this.existingLabels.add(normalizedLabel); + normalizedLabels.add(normalizedLabel); + } + if (null != dispatcher) { + dispatcher.getEventHandler().handle( + new AddLabelsEvent(normalizedLabels)); + } + + LOG.info("Add labels: [" + StringUtils.join(labels.iterator(), ",") + "]"); + } finally { + writeLock.unlock(); + } + } + + /** + * Remove a label from repository + * + * @param labelToRemove + * @throws IOException + */ + public void removeLabel(String labelToRemove) throws IOException { + removeLabels(Arrays.asList(labelToRemove)); + } + + private void addNMInNodeAlreadyHasNM(Set labels, Resource newNMRes) { + try { + writeLock.lock(); + for (String label : labels) { + Resource originalRes = labelToResource.get(label); + labelToResource.put(label, Resources.add(newNMRes, originalRes)); + } + for (String queueName : queueToLabels.keySet()) { + if (isNodeUsableByQueue(labels, queueName)) { + Resource res = queueToResource.get(queueName); + Resources.addTo(res, newNMRes); + } + } + } finally { + writeLock.unlock(); + } + } + + private void + removeNMToNodeAlreadyHasNM(Set labels, Resource newNMRes) { + try { + writeLock.lock(); + for (String label : labels) { + Resource originalRes = labelToResource.get(label); + labelToResource.put(label, Resources.subtract(originalRes, newNMRes)); + } + for (String queueName : queueToLabels.keySet()) { + if (isNodeUsableByQueue(labels, queueName)) { + Resource res = queueToResource.get(queueName); + Resources.subtractFrom(res, newNMRes); + } + } + } finally { + writeLock.unlock(); + } + } + + private enum UpdateLabelResourceType { + ACTIVE, + DEACTIVE, + UPDATE_LABEL + } + + private void updateLabelResource(Map> addLabelToNodes, + Map> removeLabelToNodes, + Map> originalNodeToLabels, + UpdateLabelResourceType updateType) { + try { + writeLock.lock(); + + // process add label to nodes + if (addLabelToNodes != null) { + for (Entry> entry : addLabelToNodes.entrySet()) { + String label = entry.getKey(); + Set nodes = entry.getValue(); + + // update label to active nodes + labelToActiveNodes.putIfAbsent(label, new HashSet()); + labelToActiveNodes.get(label).addAll(addLabelToNodes.get(label)); + + // update label to resource + Resource res = Resource.newInstance(0, 0); + for (String node : nodes) { + Resources.addTo(res, getResourceOfNode(node)); + } + Resource originalRes = labelToResource.get(label); + labelToResource.put(label, + originalRes == null ? res : Resources.add(res, originalRes)); + } + } + + // process remove label to nodes + if (removeLabelToNodes != null) { + for (Entry> entry : removeLabelToNodes.entrySet()) { + String label = entry.getKey(); + Set nodes = entry.getValue(); + + // update label to active nodes + labelToActiveNodes.get(label).removeAll(nodes); + + // update label to resource + Resource res = Resource.newInstance(0, 0); + for (String node : nodes) { + Resources.addTo(res, getResourceOfNode(node)); + } + Resource originalRes = labelToResource.get(label); + labelToResource.put(label, Resources.subtract(originalRes, res)); + } + } + + // update queue to resource + for (Entry> originEntry : originalNodeToLabels + .entrySet()) { + String node = originEntry.getKey(); + Set originLabels = originEntry.getValue(); + Set nowLabels = nodeToLabels.get(node); + + for (String q : queueToResource.keySet()) { + Resource queueResource = queueToResource.get(q); + boolean pastUsable = isNodeUsableByQueue(originLabels, q); + boolean nowUsable = isNodeUsableByQueue(nowLabels, q); + + if (updateType == UpdateLabelResourceType.UPDATE_LABEL) { + if (pastUsable && !nowUsable) { + Resources.subtractFrom(queueResource, getResourceOfNode(node)); + } else if (!pastUsable && nowUsable) { + Resources.addTo(queueResource, getResourceOfNode(node)); + } + } else if (updateType == UpdateLabelResourceType.ACTIVE) { + if (nowUsable) { + Resources.addTo(queueResource, getResourceOfNode(node)); + } + } else if (updateType == UpdateLabelResourceType.DEACTIVE) { + if (nowUsable) { + Resources.subtractFrom(queueResource, getResourceOfNode(node)); + } + } + } + } + } finally { + writeLock.unlock(); + } + } + + private boolean isNodeUsableByQueue(Set nodeLabels, String queueName) { + // node without any labels can be accessed by any queue + if (nodeLabels == null || nodeLabels.isEmpty() + || (nodeLabels.size() == 1 && nodeLabels.contains(NO_LABEL))) { + return true; + } + + for (String label : nodeLabels) { + if (queueToLabels.containsKey(queueName) + && queueToLabels.get(queueName).contains(label)) { + return true; + } + } + + return false; + } + + private void removeAll(Map> map, String key, + Set set) { + if (set == null) { + return; + } + if (!map.containsKey(key)) { + return; + } + map.get(key).remove(set); + } + + private void remove(Map> map, String key, String value) { + if (value == null) { + return; + } + if (!map.containsKey(key)) { + return; + } + map.get(key).remove(value); + if (map.get(key).isEmpty()) { + map.remove(key); + } + } + + private void add(Map> map, String key, String value) { + if (value == null) { + return; + } + if (!map.containsKey(key)) { + map.put(key, new HashSet()); + } + map.get(key).add(value); + } + + /** + * Remove multiple labels labels from repository + * + * @param labelsToRemove + * @throws IOException + */ + @SuppressWarnings("unchecked") + public void removeLabels(Collection labelsToRemove) + throws IOException { + checkAccess("removeLabels"); + + if (null == labelsToRemove || labelsToRemove.isEmpty()) { + return; + } + + try { + writeLock.lock(); + + Map> labelToActiveNodeAdded = + new HashMap>(); + Map> labelToActiveNodeRemoved = + new HashMap>(); + Map> originalNodeToLabels = + new HashMap>(); + + for (String label : labelsToRemove) { + label = normalizeLabel(label); + if (label == null || label.isEmpty() || !existingLabels.contains(label)) { + throw new IOException("Label to be removed is null or empty"); + } + + // remove it from label + this.existingLabels.remove(label); + + // remove it from labelToActiveNodes + Set activeNodes = labelToActiveNodes.remove(label); + removeAll(labelToActiveNodeRemoved, label, activeNodes); + + // update node -> labels + Set nodes = labelToNodes.remove(label); + + // update node to labels + if (nodes != null) { + for (String node : nodes) { + if (!originalNodeToLabels.containsKey(node) + && nodeToLabels.containsKey(node)) { + Set originalLabels = + Sets.newHashSet(nodeToLabels.get(node)); + originalNodeToLabels.put(node, originalLabels); + } + remove(nodeToLabels, node, label); + // if we don't have any labels in a node now, we will mark this node + // as no label + if (runningNodes.contains(node) && nodeToLabels.get(node).isEmpty()) { + add(labelToActiveNodeAdded, NO_LABEL, node); + } + } + } + } + + // update resource + updateLabelResource(labelToActiveNodeAdded, labelToActiveNodeRemoved, + originalNodeToLabels, UpdateLabelResourceType.UPDATE_LABEL); + + // create event to remove labels + if (null != dispatcher) { + dispatcher.getEventHandler().handle( + new RemoveLabelsEvent(labelsToRemove)); + } + + LOG.info("Remove labels: [" + + StringUtils.join(labelsToRemove.iterator(), ",") + "]"); + } finally { + writeLock.unlock(); + } + } + + private void verifyNodeLabel(String node, String label) throws IOException { + if (node == null || node.isEmpty()) { + throw new IOException( + "Trying to change label on a node, but node is null or empty"); + } + if (label != null && !label.isEmpty() && !existingLabels.contains(label)) { + throw new IOException("Label doesn't exist in repository, " + + "have you added it before? label=" + label); + } + } + + private Set emptyWhenNull(Set s) { + if (s == null) { + return new HashSet(); + } + return s; + } + + /** + * Set node -> label, if label is null or empty, it means remove label on node + * + * @param nodeToLabel + * node -> label map + */ + @SuppressWarnings("unchecked") + public void + setLabelsOnMultipleNodes(Map> newNodeToLabels) + throws IOException { + checkAccess("setLabelsOnMultipleNodes"); + + if (null == newNodeToLabels || newNodeToLabels.isEmpty()) { + return; + } + + try { + writeLock.lock(); + + Map> labelToActiveNodeAdded = + new HashMap>(); + Map> labelToActiveNodeRemoved = + new HashMap>(); + Map> originalNodeToLabels = + new HashMap>(); + + for (Entry> e : newNodeToLabels.entrySet()) { + String node = e.getKey(); + Set labels = e.getValue(); + + // normalize and verify + labels = normalizeLabels(labels); + for (String label : labels) { + verifyNodeLabel(node, label); + } + + // handling labels removed + Set originalLabels = emptyWhenNull(nodeToLabels.get(node)); + Set difference = Sets.difference(originalLabels, labels); + for (String removedLabel : difference) { + remove(labelToNodes, removedLabel, node); + if (runningNodes.contains(node)) { + add(labelToActiveNodeRemoved, removedLabel, node); + } + } + + // Mark this node as "no-label" if we set a empty set of label + if (labels.isEmpty() && !originalLabels.isEmpty() + && runningNodes.contains(node)) { + add(labelToActiveNodeAdded, NO_LABEL, node); + } + + // handling labels added + for (String addedLabel : Sets.difference(labels, originalLabels)) { + add(labelToNodes, addedLabel, node); + if (runningNodes.contains(node)) { + add(labelToActiveNodeAdded, addedLabel, node); + } + } + + // Mark this node not "no-label" if we set a non-empty set of label + if (!labels.isEmpty() && originalLabels.isEmpty() + && runningNodes.contains(node)) { + add(labelToActiveNodeRemoved, NO_LABEL, node); + } + } + + // save original node to labels + for (String node : newNodeToLabels.keySet()) { + if (!originalNodeToLabels.containsKey(node) + && nodeToLabels.containsKey(node)) { + Set originalLabels = Sets.newHashSet(nodeToLabels.get(node)); + originalNodeToLabels.put(node, originalLabels); + } + } + + // update node to labels and label to nodes + nodeToLabels.putAll(newNodeToLabels); + + updateLabelResource(labelToActiveNodeAdded, labelToActiveNodeRemoved, + originalNodeToLabels, UpdateLabelResourceType.UPDATE_LABEL); + + if (null != dispatcher) { + dispatcher.getEventHandler().handle( + new StoreNodeToLabelsEvent(newNodeToLabels)); + } + + // shows node->labels we added + LOG.info("setLabelsOnMultipleNodes:"); + for (String host : newNodeToLabels.keySet()) { + Set labels = newNodeToLabels.get(host); + LOG.info(" host=" + host + ", labels=[" + + StringUtils.join(labels.iterator(), ",") + "]"); + } + } finally { + writeLock.unlock(); + } + } + + public void setLabelsOnSingleNode(String node, Set labels) + throws IOException { + setLabelsOnMultipleNodes(ImmutableMap.of(node, labels)); + } + + private Resource getResourceOfNode(String node) { + Resource res = Resource.newInstance(0, 0); + if (nodeToResource.containsKey(node)) { + for (Resource r : nodeToResource.get(node).values()) { + Resources.addTo(res, r); + } + } + return res; + } + + /** + * Set label on node, if label is null or empty, it means remove label on node + * + * @param node + * @param labels + */ + public void removeLabelsOnNodes(String node, Set labels) + throws IOException { + setLabelsOnMultipleNodes(ImmutableMap.of(node, labels)); + } + + public Resource getResourceWithNoLabel() throws IOException { + return getResourceWithLabel(NO_LABEL); + } + + public Resource getResourceWithLabel(String label) { + label = normalizeLabel(label); + try { + readLock.lock(); + Resource res = labelToResource.get(label); + return res == null ? Resources.none() : res; + } finally { + readLock.unlock(); + } + } + + /* + * Following methods are used for setting if a node is up and running, which + * will be used by this#getActiveNodesByLabel and getLabelResource + */ + public void activeNode(NodeId node, Resource resource) { + try { + writeLock.lock(); + String nodeName = node.getHost(); + + // put this node to nodeToResource + if (!nodeToResource.containsKey(nodeName)) { + nodeToResource.put(nodeName, new ConcurrentHashMap()); + } + + if (null != nodeToResource.get(nodeName).put(node, resource)) { + String msg = + "This shouldn't happen, trying to active node," + + " but there's already a node here, " + + "please check what happened. NodeId=" + node.toString(); + LOG.error(msg); + notifyStoreOperationFailed(new IOException(msg)); + } + + // add add it to running node + runningNodes.add(nodeName); + + // update resources + Set labels = nodeToLabels.get(nodeName); + labels = + (labels == null || labels.isEmpty()) ? ImmutableSet.of(NO_LABEL) + : labels; + + if (nodeToResource.get(nodeName).size() <= 1) { + Map> labelToActiveNodeAdded = + new HashMap>(); + for (String label : labels) { + labelToActiveNodeAdded.put(label, ImmutableSet.of(nodeName)); + } + Map> originalNodeTolabels = + new HashMap>(); + if (nodeToLabels.containsKey(nodeName)) { + originalNodeTolabels.put(nodeName, nodeToLabels.get(nodeName)); + } else { + originalNodeTolabels.put(nodeName, NodeLabelManager.EMPTY_STRING_SET); + } + updateLabelResource(labelToActiveNodeAdded, null, originalNodeTolabels, + UpdateLabelResourceType.ACTIVE); + } else { + // Support more than two NMs in a same node + addNMInNodeAlreadyHasNM(labels, resource); + } + } finally { + writeLock.unlock(); + } + } + + public void deactiveNode(NodeId node) { + try { + writeLock.lock(); + String nodeName = node.getHost(); + Resource res = null; + + // add add it to running node + runningNodes.add(nodeName); + + // update resources + Set labels = nodeToLabels.get(nodeName); + labels = + labels == null || labels.isEmpty() ? ImmutableSet.of(NO_LABEL) + : labels; + + // this is last NM in this node + if (nodeToResource.get(nodeName).size() == 1) { + Map> labelToActiveNodeRemoved = + new HashMap>(); + for (String label : labels) { + labelToActiveNodeRemoved.put(label, ImmutableSet.of(nodeName)); + labelToActiveNodes.get(label).remove(nodeName); + } + Map> originalNodeTolabels = + new HashMap>(); + if (nodeToLabels.containsKey(nodeName)) { + originalNodeTolabels.put(nodeName, nodeToLabels.get(nodeName)); + } else { + originalNodeTolabels.put(nodeName, NodeLabelManager.EMPTY_STRING_SET); + } + updateLabelResource(null, labelToActiveNodeRemoved, + originalNodeTolabels, UpdateLabelResourceType.DEACTIVE); + } + + // update node to resource + if (null == (res = nodeToResource.get(nodeName).remove(node))) { + String msg = + "This shouldn't happen, trying to active node," + + " but there's already a node here, " + + "please check what happened. NodeId=" + node.toString(); + LOG.error(msg); + notifyStoreOperationFailed(new IOException(msg)); + } + + // if there's more NM remains + if (nodeToResource.get(nodeName).size() > 0) { + // Support more than two NMs in a same node + removeNMToNodeAlreadyHasNM(labels, res); + } + } finally { + writeLock.unlock(); + } + } + + public void updateNodeResource(NodeId node, Resource newResource) { + deactiveNode(node); + activeNode(node, newResource); + } + + /** + * Remove labels on given nodes + * + * @param nodes + * to remove labels + */ + public void removeLabelsOnNodes(Collection nodes) throws IOException { + Map> map = + new HashMap>(nodes.size()); + for (String node : nodes) { + map.put(node, EMPTY_STRING_SET); + } + setLabelsOnMultipleNodes(map); + } + + /** + * Remove label on given node + * + * @param node + * to remove label + */ + public void removeLabelOnNode(String node) throws IOException { + removeLabelsOnNodes(Arrays.asList(node)); + } + + /** + * Clear all labels and related mapping from NodeLabelManager + * @throws IOException + */ + public void clearAllLabels() throws IOException { + try { + writeLock.lock(); + Set dupLabels = Sets.newHashSet(getLabels()); + removeLabels(dupLabels); + } finally { + writeLock.unlock(); + } + } + + /** + * Get nodes by given label + * + * @param label + * @return nodes has assigned give label label + */ + public Collection getActiveNodesByLabel(String label) + throws IOException { + label = normalizeLabel(label); + try { + readLock.lock(); + return Collections.unmodifiableCollection(labelToActiveNodes.get(label)); + } finally { + readLock.unlock(); + } + } + + /** + * Get number of nodes by given label + * + * @param label + * @return Get number of nodes by given label + */ + public int getNumOfNodesByLabel(String label) throws IOException { + label = normalizeLabel(label); + try { + readLock.lock(); + Collection nodes = labelToActiveNodes.get(label); + return nodes == null ? 0 : nodes.size(); + } finally { + readLock.unlock(); + } + } + + /** + * Get mapping of nodes to labels + * + * @return nodes to labels map + */ + public Map> getNodesToLabels() throws IOException { + try { + readLock.lock(); + return Collections.unmodifiableMap(nodeToLabels); + } finally { + readLock.unlock(); + } + } + + public Set getLabelsOnNode(String node) { + Set label = nodeToLabels.get(node); + return label == null ? EMPTY_STRING_SET : Collections + .unmodifiableSet(label); + } + + /** + * Get existing valid labels in repository + * + * @return existing valid labels in repository + */ + public Set getLabels() throws IOException { + try { + readLock.lock(); + return Collections.unmodifiableSet(existingLabels); + } finally { + readLock.unlock(); + } + } + + public boolean containsLabel(String label) { + try { + readLock.lock(); + return label != null + && (label.isEmpty() || existingLabels.contains(label.toLowerCase())); + } finally { + readLock.unlock(); + } + } + + public void reinitializeQueueLabels(Map> queueToLabels) { + try { + writeLock.lock(); + // clear before set + this.queueToLabels.clear(); + queueToResource.clear(); + + for (Entry> entry : queueToLabels.entrySet()) { + String queue = entry.getKey(); + Set labels = entry.getValue(); + labels = labels.isEmpty() ? ImmutableSet.of(NO_LABEL) : labels; + if (labels.contains(ANY)) { + continue; + } + + this.queueToLabels.put(queue, labels); + + // empty label node can be accessed by any queue + Set dupLabels = new HashSet(labels); + dupLabels.add(""); + Set accessedNodes = new HashSet(); + Resource totalResource = Resource.newInstance(0, 0); + for (String label : dupLabels) { + if (labelToActiveNodes.containsKey(label)) { + for (String node : labelToActiveNodes.get(label)) { + if (!accessedNodes.contains(node)) { + accessedNodes.add(node); + Resources.addTo(totalResource, getResourceOfNode(node)); + } + } + } + } + queueToResource.put(queue, totalResource); + } + } finally { + writeLock.unlock(); + } + } + + public Resource getQueueResource(String queueName, Set queueLabels, + Resource clusterResource) { + if (queueLabels.contains(ANY)) { + return clusterResource; + } + Resource res = queueToResource.get(queueName); + return res == null ? clusterResource : res; + } + + // Dispatcher related code + protected void handleStoreEvent(NodeLabelManagerEvent event) { + try { + this.stateMachine.doTransition(event.getType(), event); + } catch (InvalidStateTransitonException e) { + LOG.error("Can't handle this event at current state", e); + } + } + + private final class ForwardingEventHandler implements + EventHandler { + + @Override + public void handle(NodeLabelManagerEvent event) { + if (isInState(STATE.STARTED)) { + handleStoreEvent(event); + } + } + } + + /** + * This method is called to notify the ResourceManager that the store + * operation has failed. + * + * @param failureCause + * the exception due to which the operation failed + */ + @SuppressWarnings("unchecked") + protected void notifyStoreOperationFailed(Exception failureCause) { + rmDispatcher.getEventHandler() + .handle( + new RMFatalEvent(RMFatalEventType.NODE_LABEL_STORE_FAILED, + failureCause)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelManagerFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelManagerFactory.java new file mode 100644 index 0000000..72c1498 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelManagerFactory.java @@ -0,0 +1,16 @@ +package org.apache.hadoop.yarn.server.resourcemanager.label; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.yarn.conf.YarnConfiguration; + +public class NodeLabelManagerFactory { + + public static NodeLabelManager getNodeLabelManager(Configuration conf) { + NodeLabelManager mgr = ReflectionUtils.newInstance( + conf.getClass(YarnConfiguration.RM_NODE_LABEL_MANAGER_CLS, + MemoryNodeLabelManager.class, NodeLabelManager.class), + conf); + return mgr; + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelUtils.java new file mode 100644 index 0000000..5e8674e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelUtils.java @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import com.google.gson.JsonArray; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.JsonParser; + +public class NodeLabelUtils { + private static final String PARSE_FAILED_MSG = + "Failed to parse node-> labels json"; + private static final String LABELS_KEY = + "labels"; + + /** + * Get node to labels from JSON like: + * + * { + * "host1": { + * "labels": [ + * "x", + * "y", + * "z" + * ] + * }, + * "host2": { + * "labels": [ + * "a", + * "b", + * "c" + * ] + * }, + * "host3": { + * "labels": [] + * } + * } + * + * @param json + * @return + */ + public static Map> getNodeToLabelsFromJson(String json) + throws IOException { + Map> nodeToLabels = new HashMap>(); + + if (json == null || json.trim().isEmpty()) { + return nodeToLabels; + } + + JsonParser parser = new JsonParser(); + JsonElement node; + try { + node = parser.parse(json); + } catch (JsonParseException e) { + throw new IOException(e); + } + + if (node.isJsonObject()) { + JsonObject obj = node.getAsJsonObject(); + for (Map.Entry entry : obj.entrySet()) { + String nodeName = entry.getKey().trim(); + if (nodeName.isEmpty()) { + throw new IOException(PARSE_FAILED_MSG); + } + nodeToLabels.put(nodeName, new HashSet()); + + if (entry.getValue().isJsonObject()) { + JsonObject labelObj = entry.getValue().getAsJsonObject(); + if (labelObj.entrySet().size() > 0) { + JsonElement labelsElement = labelObj.get(LABELS_KEY); + if (labelsElement == null || !labelsElement.isJsonArray()) { + throw new IOException(PARSE_FAILED_MSG); + } + JsonArray labelsArray = labelsElement.getAsJsonArray(); + for (JsonElement item : labelsArray) { + nodeToLabels.get(nodeName).add(item.getAsString()); + } + } + } else { + throw new IOException(PARSE_FAILED_MSG); + } + } + } else { + throw new IOException(PARSE_FAILED_MSG); + } + + return nodeToLabels; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/AddLabelsEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/AddLabelsEvent.java new file mode 100644 index 0000000..6c1d07a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/AddLabelsEvent.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label.event; + +import java.util.Set; + +public class AddLabelsEvent extends NodeLabelManagerEvent { + private Set partitions; + + public AddLabelsEvent(Set partitions) { + super(NodeLabelManagerEventType.ADD_LABELS); + this.partitions = partitions; + } + + public Set getLabels() { + return partitions; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/NodeLabelManagerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/NodeLabelManagerEvent.java new file mode 100644 index 0000000..ad45d4b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/NodeLabelManagerEvent.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label.event; + +import org.apache.hadoop.yarn.event.AbstractEvent; + +public class NodeLabelManagerEvent extends + AbstractEvent { + public NodeLabelManagerEvent(NodeLabelManagerEventType type) { + super(type); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/NodeLabelManagerEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/NodeLabelManagerEventType.java new file mode 100644 index 0000000..0372cc9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/NodeLabelManagerEventType.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label.event; + +public enum NodeLabelManagerEventType { + REMOVE_LABELS, + ADD_LABELS, + STORE_NODE_TO_LABELS +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/RemoveLabelsEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/RemoveLabelsEvent.java new file mode 100644 index 0000000..5466ee0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/RemoveLabelsEvent.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label.event; + +import java.util.Collection; + +public class RemoveLabelsEvent extends NodeLabelManagerEvent { + private Collection labels; + + public RemoveLabelsEvent(Collection labels) { + super(NodeLabelManagerEventType.REMOVE_LABELS); + this.labels = labels; + } + + public Collection getLabels() { + return labels; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/StoreNodeToLabelsEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/StoreNodeToLabelsEvent.java new file mode 100644 index 0000000..0bb7dac --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/label/event/StoreNodeToLabelsEvent.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label.event; + +import java.util.Map; +import java.util.Set; + +public class StoreNodeToLabelsEvent extends NodeLabelManagerEvent { + private Map> nodeToLabels; + + public StoreNodeToLabelsEvent(Map> nodeToLabels) { + super(NodeLabelManagerEventType.STORE_NODE_TO_LABELS); + this.nodeToLabels = nodeToLabels; + } + + public Map> getNodeToLabels() { + return nodeToLabels; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java index 1ebc19f..536caf0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java @@ -19,14 +19,16 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.event.EventHandler; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEvent; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; public interface SchedulingEditPolicy { public void init(Configuration config, EventHandler dispatcher, - PreemptableResourceScheduler scheduler); + PreemptableResourceScheduler scheduler, + NodeLabelManager labelManager); /** * This method is invoked at regular intervals. Internally the policy is diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java index 1682f7d..9d11365 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java @@ -57,7 +57,8 @@ public synchronized SchedulingEditPolicy getSchedulingEditPolicy() { @SuppressWarnings("unchecked") public void serviceInit(Configuration conf) throws Exception { scheduleEditPolicy.init(conf, rmContext.getDispatcher().getEventHandler(), - (PreemptableResourceScheduler) rmContext.getScheduler()); + (PreemptableResourceScheduler) rmContext.getScheduler(), + rmContext.getNodeLabelManager()); this.monitorInterval = scheduleEditPolicy.getMonitoringInterval(); super.serviceInit(conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java index cea3d7c..ffa845a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java @@ -33,10 +33,12 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEvent; @@ -125,6 +127,30 @@ private float percentageClusterPreemptionAllowed; private double naturalTerminationFactor; private boolean observeOnly; + private NodeLabelManager labelManager; + + /* + * Variables for considering labels while preempting resource When considering + * preemption resource, + * + * When build queue tree in cloneQueue(), qA's resToBePreempted + * + * resToBePreempted = min(guaranteed - current, pending) + * And we will add it to totalResourceToBePreempted and + * totalResourceToBePreempted when resToBePreempted > 0: + * totalResourceToBePreempted += resToBePreempted + * labelToResourceToBePreempted[label belongs to qA] += resToBePreempted + * + * When trying to preempt a containerX from nodeY First will check + * totalResToBePreempted > 0 If it's < 0, no more resource need to be + * preempted. Else: + * if (labelToResourceToBePreempted[any label belongs to nodeY] > 0): + * labelToResourceToBePreempted[label belongs to nodeY] -= containerX.res + * totalResourceToBePreempted -= containerX.res + * mark containerX will be preempted + */ + Resource totalResourceToBePreempted; + Map labelToResourceToBePreempted; public ProportionalCapacityPreemptionPolicy() { clock = new SystemClock(); @@ -132,20 +158,22 @@ public ProportionalCapacityPreemptionPolicy() { public ProportionalCapacityPreemptionPolicy(Configuration config, EventHandler dispatcher, - CapacityScheduler scheduler) { - this(config, dispatcher, scheduler, new SystemClock()); + CapacityScheduler scheduler, NodeLabelManager labelManager) { + this(config, dispatcher, scheduler, new SystemClock(), labelManager); } public ProportionalCapacityPreemptionPolicy(Configuration config, EventHandler dispatcher, - CapacityScheduler scheduler, Clock clock) { - init(config, dispatcher, scheduler); + CapacityScheduler scheduler, Clock clock, NodeLabelManager labelManager) { + init(config, dispatcher, scheduler, labelManager); this.clock = clock; } + @Override public void init(Configuration config, EventHandler disp, - PreemptableResourceScheduler sched) { + PreemptableResourceScheduler sched, + NodeLabelManager labelManager) { LOG.info("Preemption monitor:" + this.getClass().getCanonicalName()); assert null == scheduler : "Unexpected duplicate call to init"; if (!(sched instanceof CapacityScheduler)) { @@ -164,19 +192,28 @@ public void init(Configuration config, config.getFloat(TOTAL_PREEMPTION_PER_ROUND, (float) 0.1); observeOnly = config.getBoolean(OBSERVE_ONLY, false); rc = scheduler.getResourceCalculator(); + this.labelManager = labelManager; + labelToResourceToBePreempted = new HashMap(); } @VisibleForTesting public ResourceCalculator getResourceCalculator() { return rc; } + + @VisibleForTesting + public void setNodeLabelManager(NodeLabelManager mgr) { + this.labelManager = mgr; + } @Override public void editSchedule(){ + totalResourceToBePreempted = Resource.newInstance(0, 0); CSQueue root = scheduler.getRootQueue(); Resource clusterResources = Resources.clone(scheduler.getClusterResource()); containerBasedPreemptOrKill(root, clusterResources); + labelToResourceToBePreempted.clear(); } /** @@ -202,6 +239,12 @@ private void containerBasedPreemptOrKill(CSQueue root, percentageClusterPreemptionAllowed); List queues = recursivelyComputeIdealAssignment(tRoot, totalPreemptionAllowed); + for (TempQueue queue : queues) { + // set totalResourceToBePreempted and label-to-resource-to-be-preempted + if (queue.leafQueue != null) { + addResourceToBePreempted(queue, clusterResources); + } + } // based on ideal allocation select containers to be preempted from each // queue and each application @@ -514,6 +557,10 @@ private void preemptAMContainers(Resource clusterResource, maxAMCapacityForThisQueue)) { break; } + if (!possiblePendingRequestOnNode(clusterResource, c.getContainer() + .getNodeId(), c.getContainer().getResource())) { + continue; + } Set contToPrempt = preemptMap.get(c .getApplicationAttemptId()); if (null == contToPrempt) { @@ -578,12 +625,50 @@ private void preemptAMContainers(Resource clusterResource, Resources.addTo(skippedAMSize, c.getContainer().getResource()); continue; } - ret.add(c); - Resources.subtractFrom(rsrcPreempt, c.getContainer().getResource()); + + if (possiblePendingRequestOnNode(clusterResource, c.getContainer() + .getNodeId(), c.getContainer().getResource())) { + ret.add(c); + Resources.subtractFrom(rsrcPreempt, c.getContainer().getResource()); + } } return ret; } + + protected boolean possiblePendingRequestOnNode(Resource clusterResource, + NodeId nodeId, Resource containerRes) { + if (labelManager == null) { + return true; + } + + if (!Resources.greaterThan(rc, clusterResource, totalResourceToBePreempted, + Resources.none())) { + return false; + } + + Set labels = labelManager.getLabelsOnNode(nodeId.getHost()); + + if (labels != null && !labels.isEmpty()) { + boolean isPossible = false; + // there're some labels on this node, so we will check if any of + // labelToResourceToBePreempted[label belongs to the node] > 0 + for (String label : labels) { + Resource res = labelToResourceToBePreempted.get(label); + res = res == null ? Resources.none() : res; + if (Resources.greaterThan(rc, clusterResource, res, Resources.none())) { + Resources.subtractFrom(res, containerRes); + isPossible = true; + } + } + + if (!isPossible) { + return false; + } + } + Resources.subtractFrom(totalResourceToBePreempted, containerRes); + return true; + } /** * Compare by reversed priority order first, and then reversed containerId @@ -618,6 +703,26 @@ public String getPolicyName() { return "ProportionalCapacityPreemptionPolicy"; } + private void addResourceToBePreempted(TempQueue leafQueue, + Resource clusterResources) { + Resource toBePreempted = + Resources.min(rc, clusterResources, + Resources.subtract(leafQueue.idealAssigned, leafQueue.current), + leafQueue.pending); + if (Resources.greaterThan(rc, clusterResources, toBePreempted, + Resources.none())) { + Resources.addTo(totalResourceToBePreempted, toBePreempted); + if (leafQueue.labels != null) { + for (String label : leafQueue.labels) { + if (!labelToResourceToBePreempted.containsKey(label)) { + labelToResourceToBePreempted.put(label, Resource.newInstance(0, 0)); + } + Resources.addTo(labelToResourceToBePreempted.get(label), + toBePreempted); + } + } + } + } /** * This method walks a tree of CSQueue and clones the portion of the state @@ -643,14 +748,28 @@ private TempQueue cloneQueues(CSQueue root, Resource clusterResources) { if (root instanceof LeafQueue) { LeafQueue l = (LeafQueue) root; Resource pending = l.getTotalResourcePending(); + + // it is possible queue's guaranteed resource cannot be satisfied because + // of labels, set min(guaranteed, resourceConsiderLabels) as guaranteed + // resource + if (labelManager != null) { + Resource queueResRespectLabels = + labelManager.getQueueResource(l.getQueueName(), l.getLabels(), + clusterResources); + guaranteed = + Resources.min(rc, clusterResources, queueResRespectLabels, + guaranteed); + maxCapacity = + Resources.min(rc, clusterResources, queueResRespectLabels, + maxCapacity); + } ret = new TempQueue(queueName, current, pending, guaranteed, - maxCapacity); - + maxCapacity, l.getLabels()); ret.setLeafQueue(l); } else { Resource pending = Resource.newInstance(0, 0); ret = new TempQueue(root.getQueueName(), current, pending, guaranteed, - maxCapacity); + maxCapacity, root.getLabels()); for (CSQueue c : root.getChildQueues()) { ret.addChild(cloneQueues(c, clusterResources)); } @@ -696,9 +815,10 @@ public int compare(TempQueue o1, TempQueue o2) { final ArrayList children; LeafQueue leafQueue; + Set labels; TempQueue(String queueName, Resource current, Resource pending, - Resource guaranteed, Resource maxCapacity) { + Resource guaranteed, Resource maxCapacity, Set labels) { this.queueName = queueName; this.current = current; this.pending = pending; @@ -709,6 +829,7 @@ public int compare(TempQueue o1, TempQueue o2) { this.toBePreempted = Resource.newInstance(0, 0); this.normalizedGuarantee = Float.NaN; this.children = new ArrayList(); + this.labels = labels; } public void setLeafQueue(LeafQueue l){ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 19fc800..dbaa5ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -42,7 +42,6 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -165,6 +164,7 @@ private Object transitionTodo; private RMAppAttemptMetrics attemptMetrics = null; + private String appLabelExpression = null; private static final StateMachineFactory pullContainerUpdates(); + + /** + * Get set of labels in this node + * + * @return labels in this node + */ + public Set getLabels(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 3ce6416..9287d9c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -800,4 +800,12 @@ public int getQueueSize() { public Set getLaunchedContainers() { return this.launchedContainers; } + + @Override + public Set getLabels() { + if (context.getNodeLabelManager() == null) { + return null; + } + return context.getNodeLabelManager().getLabelsOnNode(hostName); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java index 0bc8ca1..8996d70 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Evolving; @@ -71,4 +72,22 @@ */ public void recoverContainer(Resource clusterResource, SchedulerApplicationAttempt schedulerAttempt, RMContainer rmContainer); + + /** + * Get labels can be accessed of this queue + * labels={*}, means this queue can access any label + * labels={ }, means this queue cannot access any label + * labels={a, b, c} means this queue can access a or b or c + * @return labels + */ + public Set getLabels(); + + /** + * Get default label expression of this queue. If label expression of + * ApplicationSubmissionContext and label expression of Resource Request not + * set, this will be used. + * + * @return default label expression + */ + public String getDefaultLabelExpression(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index ac37c2f..e690066 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -17,23 +17,31 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; +import java.io.IOException; import java.util.List; +import java.util.Set; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; +import com.google.common.collect.Sets; + /** * Utilities shared by schedulers. */ @@ -190,7 +198,8 @@ public static void normalizeRequest( * request */ public static void validateResourceRequest(ResourceRequest resReq, - Resource maximumResource) throws InvalidResourceRequestException { + Resource maximumResource, String queueName, YarnScheduler scheduler) + throws InvalidResourceRequestException { if (resReq.getCapability().getMemory() < 0 || resReq.getCapability().getMemory() > maximumResource.getMemory()) { throw new InvalidResourceRequestException("Invalid resource request" @@ -209,5 +218,117 @@ public static void validateResourceRequest(ResourceRequest resReq, + resReq.getCapability().getVirtualCores() + ", maxVirtualCores=" + maximumResource.getVirtualCores()); } + + // Get queue from scheduler + QueueInfo queueInfo; + try { + queueInfo = scheduler.getQueueInfo(queueName, false, false); + } catch (IOException e) { + throw new InvalidResourceRequestException( + "Failed to getQueueInfo while validateResourceRequest", e); + } + + // check labels in the resource request. + String labelExp = resReq.getLabelExpression(); + + // if queue has default label expression, and RR doesn't have, use the + // default label expression of queue + if (labelExp == null) { + labelExp = queueInfo.getDefaultLabelExpression(); + resReq.setLabelExpression(labelExp); + } + + if (labelExp != null && !labelExp.trim().isEmpty()) { + if (!checkQueueLabelExpression(queueInfo.getLabels(), + labelExp)) { + throw new InvalidResourceRequestException("Invalid resource request" + + ", queue=" + queueInfo.getQueueName() + + " doesn't have permission to access all labels " + + "in resource request. labelExpression of resource request=" + + (labelExp == null ? "" + : labelExp) + ". Queue labels=" + (queueInfo.getLabels() == null ? "" + : StringUtils.join(queueInfo.getLabels().iterator(), ','))); + } + } + } + + public static boolean checkQueueAccessToNode(Set queueLabels, + Set nodeLabels) { + // if queue's label is *, it can access any node + if (queueLabels.contains(NodeLabelManager.ANY)) { + return true; + } + // any queue can access to a node without label + if (nodeLabels == null || nodeLabels.isEmpty()) { + return true; + } + // a queue can access to a node only if it contains any label of the node + if (Sets.intersection(queueLabels, nodeLabels).size() > 0) { + return true; + } + // sorry, you cannot access + return false; + } + + public static void checkAndThrowIfLabelNotIncluded(NodeLabelManager mgr, + Set labels) throws IOException { + if (mgr == null) { + if (labels != null && !labels.isEmpty()) { + throw new IOException("NodeLabelManager is null, please check"); + } + return; + } + + if (labels != null) { + for (String label : labels) { + if (!mgr.containsLabel(label)) { + throw new IOException("NodeLabelManager doesn't include label = " + + label + ", please check."); + } + } + } + } + + public static boolean checkNodeLabelExpression(Set nodeLabels, + String labelExpression) { + // if label expression is empty, we can allocate container on any node + if (labelExpression == null) { + return true; + } + + // empty label expression can only allocate on node with empty labels + if (labelExpression.trim().isEmpty()) { + if (!nodeLabels.isEmpty()) { + return false; + } + } + + for (String str : labelExpression.split("&&")) { + if (!str.trim().isEmpty() + && (nodeLabels == null || !nodeLabels.contains(str.trim() + .toLowerCase()))) { + return false; + } + } + return true; + } + + public static boolean checkQueueLabelExpression(Set queueLabels, + String labelExpression) { + if (queueLabels.contains(NodeLabelManager.ANY)) { + return true; + } + // if label expression is empty, we can allocate container on any node + if (labelExpression == null) { + return true; + } + for (String str : labelExpression.split("&&")) { + if (!str.trim().isEmpty() + && (queueLabels == null || !queueLabels.contains(str.trim() + .toLowerCase()))) { + return false; + } + } + return true; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java index 04c2fd5..711b26b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java @@ -72,9 +72,18 @@ /** * Get the configured capacity of the queue. - * @return queue capacity + * @return configured queue capacity */ public float getCapacity(); + + /** + * Get actual capacity of the queue, this may be different from + * configured capacity when mis-config take place, like add labels to the + * cluster + * + * @return actual queue capacity + */ + public float getAbsActualCapacity(); /** * Get capacity of the parent of the queue as a function of the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java index 737062b..04b3442 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java @@ -19,7 +19,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.utils.Lock; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index a8ef942..fe63d7b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -57,6 +58,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.*; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; @@ -189,6 +191,7 @@ public Configuration getConf() { private boolean scheduleAsynchronously; private AsyncScheduleThread asyncSchedulerThread; + private NodeLabelManager labelManager; /** * EXPERT @@ -273,6 +276,8 @@ private synchronized void initScheduler(Configuration configuration) throws this.applications = new ConcurrentHashMap>(); + this.labelManager = rmContext.getNodeLabelManager(); + initializeQueues(this.conf); scheduleAsynchronously = this.conf.getScheduleAynschronously(); @@ -440,11 +445,13 @@ private void initializeQueueMappings() throws IOException { @Lock(CapacityScheduler.class) private void initializeQueues(CapacitySchedulerConfiguration conf) throws IOException { - + Map> queueToLabels = new HashMap>(); root = parseQueue(this, conf, null, CapacitySchedulerConfiguration.ROOT, - queues, queues, noop); - + queues, queues, noop, queueToLabels); + if (labelManager != null) { + labelManager.reinitializeQueueLabels(queueToLabels); + } LOG.info("Initialized root queue " + root); initializeQueueMappings(); } @@ -454,9 +461,10 @@ private void reinitializeQueues(CapacitySchedulerConfiguration conf) throws IOException { // Parse new queues Map newQueues = new HashMap(); + Map> queueToLabels = new HashMap>(); CSQueue newRoot = parseQueue(this, conf, null, CapacitySchedulerConfiguration.ROOT, - newQueues, queues, noop); + newQueues, queues, noop, queueToLabels); // Ensure all existing queues are still present validateExistingQueues(queues, newQueues); @@ -467,6 +475,10 @@ private void reinitializeQueues(CapacitySchedulerConfiguration conf) // Re-configure queues root.reinitialize(newRoot, clusterResource); initializeQueueMappings(); + + if (labelManager != null) { + labelManager.reinitializeQueueLabels(queueToLabels); + } } /** @@ -506,11 +518,12 @@ private void addNewQueues( @Lock(CapacityScheduler.class) static CSQueue parseQueue( - CapacitySchedulerContext csContext, + CapacitySchedulerContext csContext, CapacitySchedulerConfiguration conf, CSQueue parent, String queueName, Map queues, Map oldQueues, - QueueHook hook) throws IOException { + QueueHook hook, + Map> queueToLabels) throws IOException { CSQueue queue; String[] childQueueNames = conf.getQueues((parent == null) ? @@ -521,13 +534,13 @@ static CSQueue parseQueue( "Queue configuration missing child queue names for " + queueName); } queue = - new LeafQueue(csContext, queueName, parent,oldQueues.get(queueName)); + new LeafQueue(csContext, queueName, parent, oldQueues.get(queueName)); // Used only for unit tests queue = hook.hook(queue); } else { ParentQueue parentQueue = - new ParentQueue(csContext, queueName, parent,oldQueues.get(queueName)); + new ParentQueue(csContext, queueName, parent, oldQueues.get(queueName)); // Used only for unit tests queue = hook.hook(parentQueue); @@ -536,7 +549,7 @@ static CSQueue parseQueue( for (String childQueueName : childQueueNames) { CSQueue childQueue = parseQueue(csContext, conf, queue, childQueueName, - queues, oldQueues, hook); + queues, oldQueues, hook, queueToLabels); childQueues.add(childQueue); } parentQueue.setChildQueues(childQueues); @@ -548,6 +561,9 @@ static CSQueue parseQueue( + ". Leaf queue names must be distinct"); } queues.put(queueName, queue); + if (queueToLabels != null) { + queueToLabels.put(queueName, queue.getLabels()); + } LOG.info("Initialized queue: " + queue); return queue; @@ -1044,11 +1060,18 @@ public void handle(SchedulerEvent event) { } private synchronized void addNode(RMNode nodeManager) { + // update this node to node label manager + if (labelManager != null) { + labelManager.activeNode(nodeManager.getNodeID(), + nodeManager.getTotalCapability()); + } + this.nodes.put(nodeManager.getNodeID(), new FiCaSchedulerNode(nodeManager, usePortForNodeName)); Resources.addTo(clusterResource, nodeManager.getTotalCapability()); root.updateClusterResource(clusterResource); int numNodes = numNodeManagers.incrementAndGet(); + LOG.info("Added node " + nodeManager.getNodeAddress() + " clusterResource: " + clusterResource); @@ -1058,6 +1081,11 @@ private synchronized void addNode(RMNode nodeManager) { } private synchronized void removeNode(RMNode nodeInfo) { + // update this node to node label manager + if (labelManager != null) { + labelManager.deactiveNode(nodeInfo.getNodeID()); + } + FiCaSchedulerNode node = nodes.get(nodeInfo.getNodeID()); if (node == null) { return; @@ -1091,6 +1119,7 @@ private synchronized void removeNode(RMNode nodeInfo) { } this.nodes.remove(nodeInfo.getNodeID()); + LOG.info("Removed node " + nodeInfo.getNodeAddress() + " clusterResource: " + clusterResource); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index af6bdc3..6492832 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -18,7 +18,15 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.StringTokenizer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -31,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; @@ -81,6 +90,12 @@ @Private public static final String STATE = "state"; + + @Private + public static final String LABELS = "labels"; + + @Private + public static final String DEFAULT_LABEL_EXPRESSION = "default-label-expression"; @Private public static final int DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS = 10000; @@ -308,6 +323,42 @@ public QueueState getState(String queue) { QueueState.valueOf(state.toUpperCase()) : QueueState.RUNNING; } + public void setLabels(String queue, Set labels) { + if (labels == null) { + return; + } + String str = StringUtils.join(",", labels); + set(getQueuePrefix(queue) + LABELS, str); + } + + public Set getLabels(String queue) { + String labelStr = get(getQueuePrefix(queue) + LABELS); + if (labelStr == null) { + return queue.equals(ROOT) ? NodeLabelManager.EMPTY_STRING_SET : null; + } else { + Set set = new HashSet(); + for (String str : labelStr.split(",")) { + if (!str.trim().isEmpty()) { + set.add(str.trim().toLowerCase()); + } + } + // if labels contains "*", only leave ANY behind + if (set.contains(NodeLabelManager.ANY)) { + set.clear(); + set.add(NodeLabelManager.ANY); + } + return Collections.unmodifiableSet(set); + } + } + + public String getDefaultLabelExpression(String queue) { + return get(getQueuePrefix(queue) + DEFAULT_LABEL_EXPRESSION); + } + + public void setDefaultLabelExpression(String queue, String exp) { + set(getQueuePrefix(queue) + DEFAULT_LABEL_EXPRESSION, exp); + } + private static String getAclKey(QueueACL acl) { return "acl_" + acl.toString().toLowerCase(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 5c93c5f..1a241c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -30,6 +30,7 @@ import java.util.Set; import java.util.TreeSet; +import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -52,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; @@ -60,6 +62,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.utils.BuilderUtils; @@ -129,12 +132,24 @@ private final ResourceCalculator resourceCalculator; + private Set labels; + + private String defaultLabelExpression; + + private NodeLabelManager labelManager; + + // cache last cluster resource to compute actual capacity + private Resource lastClusterResource = Resources.none(); + public LeafQueue(CapacitySchedulerContext cs, - String queueName, CSQueue parent, CSQueue old) { + String queueName, CSQueue parent, CSQueue old) throws IOException { + this.labelManager = cs.getRMContext().getNodeLabelManager(); this.scheduler = cs; this.queueName = queueName; this.parent = parent; - + this.labels = cs.getConfiguration().getLabels(getQueuePath()); + this.defaultLabelExpression = cs.getConfiguration() + .getDefaultLabelExpression(getQueuePath()); this.resourceCalculator = cs.getResourceCalculator(); // must be after parent and queueName are initialized @@ -196,14 +211,12 @@ public LeafQueue(CapacitySchedulerContext cs, Map acls = cs.getConfiguration().getAcls(getQueuePath()); - setupQueueConfigs( - cs.getClusterResource(), - capacity, absoluteCapacity, - maximumCapacity, absoluteMaxCapacity, - userLimit, userLimitFactor, + setupQueueConfigs(cs.getClusterResource(), capacity, absoluteCapacity, + maximumCapacity, absoluteMaxCapacity, userLimit, userLimitFactor, maxApplications, maxAMResourcePerQueuePercent, maxApplicationsPerUser, maxActiveApplications, maxActiveApplicationsPerUser, state, acls, cs - .getConfiguration().getNodeLocalityDelay()); + .getConfiguration().getNodeLocalityDelay(), labels, + defaultLabelExpression); if(LOG.isDebugEnabled()) { LOG.debug("LeafQueue:" + " name=" + queueName @@ -217,15 +230,14 @@ public LeafQueue(CapacitySchedulerContext cs, this.activeApplications = new TreeSet(applicationComparator); } - private synchronized void setupQueueConfigs( - Resource clusterResource, - float capacity, float absoluteCapacity, - float maximumCapacity, float absoluteMaxCapacity, - int userLimit, float userLimitFactor, + private synchronized void setupQueueConfigs(Resource clusterResource, + float capacity, float absoluteCapacity, float maximumCapacity, + float absoluteMaxCapacity, int userLimit, float userLimitFactor, int maxApplications, float maxAMResourcePerQueuePercent, int maxApplicationsPerUser, int maxActiveApplications, int maxActiveApplicationsPerUser, QueueState state, - Map acls, int nodeLocalityDelay) + Map acls, int nodeLocalityDelay, + Set labels, String defaultLabelExpression) throws IOException { // Sanity check CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity); @@ -251,10 +263,38 @@ private synchronized void setupQueueConfigs( this.state = state; this.acls = acls; + + // set labels + this.labels = labels; + if (this.labels == null) { + this.labels = parent.getLabels(); + } + SchedulerUtils.checkAndThrowIfLabelNotIncluded(labelManager, this.labels); + + // set default label expression + this.defaultLabelExpression = defaultLabelExpression; + if (this.defaultLabelExpression == null) { + this.defaultLabelExpression = parent.getDefaultLabelExpression(); + } + if (!SchedulerUtils.checkQueueLabelExpression(this.labels, + this.defaultLabelExpression)) { + throw new IOException("Invalid default label expression of " + + " queue=" + + queueInfo.getQueueName() + + " doesn't have permission to access all labels " + + "in default label expression. labelExpression of resource request=" + + (this.defaultLabelExpression == null ? "" + : this.defaultLabelExpression) + + ". Queue labels=" + + (queueInfo.getLabels() == null ? "" : StringUtils.join(queueInfo + .getLabels().iterator(), ','))); + } this.queueInfo.setCapacity(this.capacity); this.queueInfo.setMaximumCapacity(this.maximumCapacity); this.queueInfo.setQueueState(this.state); + this.queueInfo.setLabels(this.labels); + this.queueInfo.setDefaultLabelExpression(defaultLabelExpression); this.nodeLocalityDelay = nodeLocalityDelay; @@ -267,6 +307,14 @@ private synchronized void setupQueueConfigs( CSQueueUtils.updateQueueStatistics( resourceCalculator, this, getParent(), clusterResource, minimumAllocation); + + StringBuilder labelStrBuilder = new StringBuilder(); + if (this.labels != null) { + for (String s : this.labels) { + labelStrBuilder.append(s); + labelStrBuilder.append(","); + } + } LOG.info("Initializing " + queueName + "\n" + "capacity = " + capacity + @@ -321,7 +369,8 @@ private synchronized void setupQueueConfigs( " [= configuredState ]" + "\n" + "acls = " + aclsString + " [= configuredAcls ]" + "\n" + - "nodeLocalityDelay = " + nodeLocalityDelay + "\n"); + "nodeLocalityDelay = " + nodeLocalityDelay + "\n" + + "labels=" + labelStrBuilder.toString() + "\n"); } @Override @@ -565,6 +614,11 @@ public String toString() { "numApps=" + getNumApplications() + ", " + "numContainers=" + getNumContainers(); } + + @VisibleForTesting + public synchronized void setNodeLabelManager(NodeLabelManager mgr) { + this.labelManager = mgr; + } @VisibleForTesting public synchronized User getUser(String userName) { @@ -613,7 +667,9 @@ public synchronized void reinitialize( newlyParsedLeafQueue.getMaximumActiveApplications(), newlyParsedLeafQueue.getMaximumActiveApplicationsPerUser(), newlyParsedLeafQueue.state, newlyParsedLeafQueue.acls, - newlyParsedLeafQueue.getNodeLocalityDelay()); + newlyParsedLeafQueue.getNodeLocalityDelay(), + newlyParsedLeafQueue.labels, + newlyParsedLeafQueue.defaultLabelExpression); // queue metrics are updated, more resource may be available // activate the pending applications if possible @@ -804,12 +860,19 @@ private synchronized FiCaSchedulerApp getApplication( @Override public synchronized CSAssignment assignContainers(Resource clusterResource, FiCaSchedulerNode node) { - if(LOG.isDebugEnabled()) { LOG.debug("assignContainers: node=" + node.getNodeName() + " #applications=" + activeApplications.size()); } + // if our queue cannot access this node, just return + if (labelManager != null) { + if (!SchedulerUtils.checkQueueAccessToNode(labels, + labelManager.getLabelsOnNode(node.getNodeName()))) { + return NULL_ASSIGNMENT; + } + } + // Check for reserved resources RMContainer reservedContainer = node.getReservedContainer(); if (reservedContainer != null) { @@ -968,12 +1031,12 @@ private synchronized boolean assignToQueue(Resource clusterResource, @Lock({LeafQueue.class, FiCaSchedulerApp.class}) private Resource computeUserLimitAndSetHeadroom( - FiCaSchedulerApp application, Resource clusterResource, Resource required) { - + FiCaSchedulerApp application, Resource clusterResource, Resource required) { String user = application.getUser(); - /** - * Headroom is min((userLimit, queue-max-cap) - consumed) + /** + * Headroom = min(userLimit, queue-max-cap, max-capacity-consider-label) - + * consumed */ Resource userLimit = // User limit @@ -992,11 +1055,21 @@ private Resource computeUserLimitAndSetHeadroom( absoluteMaxAvailCapacity, minimumAllocation); - Resource userConsumed = getUser(user).getConsumedResources(); - Resource headroom = + // Max possible capacity this queue can access, will consider label only. + Resource maxCapacityConsiderLabel = + labelManager == null ? clusterResource : labelManager.getQueueResource( + queueName, labels, clusterResource); + maxCapacityConsiderLabel = + Resources.roundDown(resourceCalculator, maxCapacityConsiderLabel, + minimumAllocation); + Resource userConsumed = getUser(user).getConsumedResources(); + + Resource headroom = Resources.subtract( - Resources.min(resourceCalculator, clusterResource, - userLimit, queueMaxCap), + Resources.min(resourceCalculator, clusterResource, + Resources.min(resourceCalculator, clusterResource, userLimit, + queueMaxCap), + maxCapacityConsiderLabel), userConsumed); if (LOG.isDebugEnabled()) { @@ -1312,6 +1385,22 @@ private Resource assignContainer(Resource clusterResource, FiCaSchedulerNode nod + " priority=" + priority.getPriority() + " request=" + request + " type=" + type); } + + // check if the resource request can access the label + if (labelManager != null) { + if (!SchedulerUtils.checkNodeLabelExpression( + labelManager.getLabelsOnNode(node.getNodeName()), + request.getLabelExpression())) { + // this is a reserved container, but we cannot allocate it now according + // to label not match. This can be caused by node label changed + // We should un-reserve this container. + if (rmContainer != null) { + unreserve(application, priority, node, rmContainer); + } + return Resources.none(); + } + } + Resource capability = request.getCapability(); Resource available = node.getAvailableResource(); Resource totalResource = node.getTotalResource(); @@ -1501,6 +1590,8 @@ synchronized void releaseResource(Resource clusterResource, @Override public synchronized void updateClusterResource(Resource clusterResource) { + lastClusterResource = clusterResource; + // Update queue properties maxActiveApplications = CSQueueUtils.computeMaxActiveApplications( @@ -1661,4 +1752,32 @@ public void detachContainer(Resource clusterResource, getParent().detachContainer(clusterResource, application, rmContainer); } } + + @Override + public Set getLabels() { + return labels; + } + + @Override + public float getAbsActualCapacity() { + if (Resources.lessThanOrEqual(resourceCalculator, lastClusterResource, + lastClusterResource, Resources.none())) { + return absoluteCapacity; + } + + Resource resourceRespectLabels = + labelManager == null ? lastClusterResource : labelManager + .getQueueResource(queueName, labels, lastClusterResource); + float absActualCapacity = + Resources.divide(resourceCalculator, lastClusterResource, + resourceRespectLabels, lastClusterResource); + + return absActualCapacity > absoluteCapacity ? absoluteCapacity + : absActualCapacity; + } + + @Override + public String getDefaultLabelExpression() { + return defaultLabelExpression; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 8c654b7..0bf29c5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; @@ -53,6 +54,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; @@ -100,11 +102,14 @@ RecordFactoryProvider.getRecordFactory(null); private final ResourceCalculator resourceCalculator; + private Set labels; + private final NodeLabelManager labelManager; + private String defaultLabelExpression; public ParentQueue(CapacitySchedulerContext cs, - String queueName, CSQueue parent, CSQueue old) { + String queueName, CSQueue parent, CSQueue old) throws IOException { minimumAllocation = cs.getMinimumResourceCapability(); - + this.labelManager = cs.getRMContext().getNodeLabelManager(); this.parent = parent; this.queueName = queueName; this.rootQueue = (parent == null); @@ -117,6 +122,9 @@ public ParentQueue(CapacitySchedulerContext cs, cs.getConf()); float rawCapacity = cs.getConfiguration().getCapacity(getQueuePath()); + this.labels = cs.getConfiguration().getLabels(getQueuePath()); + this.defaultLabelExpression = cs.getConfiguration() + .getDefaultLabelExpression(getQueuePath()); if (rootQueue && (rawCapacity != CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE)) { @@ -144,9 +152,9 @@ public ParentQueue(CapacitySchedulerContext cs, this.queueInfo.setQueueName(queueName); this.queueInfo.setChildQueues(new ArrayList()); - setupQueueConfigs(cs.getClusterResource(), - capacity, absoluteCapacity, - maximumCapacity, absoluteMaxCapacity, state, acls); + setupQueueConfigs(cs.getClusterResource(), capacity, absoluteCapacity, + maximumCapacity, absoluteMaxCapacity, state, acls, labels, + defaultLabelExpression); this.queueComparator = cs.getQueueComparator(); this.childQueues = new TreeSet(queueComparator); @@ -156,12 +164,11 @@ public ParentQueue(CapacitySchedulerContext cs, ", fullname=" + getQueuePath()); } - private synchronized void setupQueueConfigs( - Resource clusterResource, - float capacity, float absoluteCapacity, - float maximumCapacity, float absoluteMaxCapacity, - QueueState state, Map acls - ) { + private synchronized void setupQueueConfigs(Resource clusterResource, + float capacity, float absoluteCapacity, float maximumCapacity, + float absoluteMaxCapacity, QueueState state, + Map acls, Set labels, + String defaultLabelExpression) throws IOException { // Sanity check CSQueueUtils.checkMaxCapacity(getQueueName(), capacity, maximumCapacity); CSQueueUtils.checkAbsoluteCapacities(getQueueName(), absoluteCapacity, absoluteMaxCapacity); @@ -176,9 +183,24 @@ private synchronized void setupQueueConfigs( this.acls = acls; + // set labels + this.labels = labels; + if (this.labels == null && parent != null) { + this.labels = parent.getLabels(); + SchedulerUtils.checkAndThrowIfLabelNotIncluded(labelManager, this.labels); + } + + // set label expression + this.defaultLabelExpression = defaultLabelExpression; + if (this.defaultLabelExpression == null && parent != null) { + this.defaultLabelExpression = parent.getDefaultLabelExpression(); + } + + this.queueInfo.setLabels(labels); this.queueInfo.setCapacity(this.capacity); this.queueInfo.setMaximumCapacity(this.maximumCapacity); this.queueInfo.setQueueState(this.state); + this.queueInfo.setDefaultLabelExpression(this.defaultLabelExpression); StringBuilder aclsString = new StringBuilder(); for (Map.Entry e : acls.entrySet()) { @@ -188,6 +210,14 @@ private synchronized void setupQueueConfigs( // Update metrics CSQueueUtils.updateQueueStatistics( resourceCalculator, this, parent, clusterResource, minimumAllocation); + + StringBuilder labelStrBuilder = new StringBuilder(); + if (labels != null) { + for (String s : labels) { + labelStrBuilder.append(s); + labelStrBuilder.append(","); + } + } LOG.info(queueName + ", capacity=" + capacity + @@ -195,7 +225,8 @@ private synchronized void setupQueueConfigs( ", maxCapacity=" + maximumCapacity + ", asboluteMaxCapacity=" + absoluteMaxCapacity + ", state=" + state + - ", acls=" + aclsString); + ", acls=" + aclsString + + ", labels=" + labelStrBuilder.toString() + "\n"); } private static float PRECISION = 0.0005f; // 0.05% precision @@ -383,7 +414,9 @@ public synchronized void reinitialize( newlyParsedParentQueue.maximumCapacity, newlyParsedParentQueue.absoluteMaxCapacity, newlyParsedParentQueue.state, - newlyParsedParentQueue.acls); + newlyParsedParentQueue.acls, + newlyParsedParentQueue.labels, + newlyParsedParentQueue.defaultLabelExpression); // Re-configure existing child queues and add new ones // The CS has already checked to ensure all existing child queues are present! @@ -824,4 +857,20 @@ public void detachContainer(Resource clusterResource, } } } + + public Set getLabels() { + return labels; + } + + @Override + public float getAbsActualCapacity() { + // for now, simply return actual capacity = guaranteed capacity for parent + // queue + return absoluteCapacity; + } + + @Override + public String getDefaultLabelExpression() { + return defaultLabelExpression; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index b9fcc4b..d517cdd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -255,4 +256,16 @@ public String toString() { return String.format("[%s, demand=%s, running=%s, share=%s, w=%s]", getName(), getDemand(), getResourceUsage(), fairShare, getWeights()); } + + @Override + public Set getLabels() { + // TODO, add implementation for FS + return null; + } + + @Override + public String getDefaultLabelExpression() { + // TODO, add implementation for FS + return null; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index d72e796..0bd368a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -25,6 +25,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentSkipListMap; import org.apache.commons.logging.Log; @@ -187,6 +188,18 @@ public void recoverContainer(Resource clusterResource, updateAppHeadRoom(schedulerAttempt); updateAvailableResourcesMetrics(); } + + @Override + public Set getLabels() { + // TODO add implementation for FIFO scheduler + return null; + } + + @Override + public String getDefaultLabelExpression() { + // TODO add implementation for FIFO scheduler + return null; + } }; public FifoScheduler() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java index a53ad98..eabd5c2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java @@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.webapp.ResponseInfo; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.A; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL; @@ -49,8 +50,10 @@ static final float Q_STATS_POS = Q_MAX_WIDTH + 0.05f; static final String Q_END = "left:101%"; static final String Q_GIVEN = "left:0%;background:none;border:1px dashed rgba(0,0,0,0.25)"; + static final String Q_ACTUAL = "left:0%;background:none;border:1px dashed rgba(255,0,0,0.8)"; static final String Q_OVER = "background:rgba(255, 140, 0, 0.8)"; static final String Q_UNDER = "background:rgba(50, 205, 50, 0.8)"; + static final String Q_MAX_LESS_ACTUAL = "background: rgba(255, 255, 0, 0.3)"; @RequestScoped static class CSQInfo { @@ -120,7 +123,9 @@ protected void render(Block html) { _("Configured Max Capacity:", percent(lqinfo.getMaxCapacity() / 100)). _("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%"). _("Configured User Limit Factor:", String.format("%.1f", lqinfo.getUserLimitFactor())). - _r("Active users: ", activeUserList.toString()); + _("Active users: ", activeUserList.toString()). + _("Actual Absolute Capacity:", percent(lqinfo.getAbsActualCapacity() / 100)). + _r("Labels Can Access:", StringUtils.join(",", lqinfo.getLabels())); html._(InfoBlock.class); @@ -147,18 +152,31 @@ public void render(Block html) { float absCap = info.getAbsoluteCapacity() / 100; float absMaxCap = info.getAbsoluteMaxCapacity() / 100; float absUsedCap = info.getAbsoluteUsedCapacity() / 100; - LI> li = ul. - li(). - a(_Q).$style(width(absMaxCap * Q_MAX_WIDTH)). - $title(join("Absolute Capacity:", percent(absCap))). - span().$style(join(Q_GIVEN, ";font-size:1px;", width(absCap/absMaxCap))). - _('.')._(). - span().$style(join(width(absUsedCap/absMaxCap), - ";font-size:1px;left:0%;", absUsedCap > absCap ? Q_OVER : Q_UNDER)). - _('.')._(). - span(".q", info.getQueuePath().substring(5))._(). - span().$class("qstats").$style(left(Q_STATS_POS)). - _(join(percent(used), " used"))._(); + float absActualCap = info.getAbsActualCapacity() / 100; + + A>> a = ul. + li().a(_Q).$style(width(absMaxCap * Q_MAX_WIDTH)). + $title(join("Absolute Capacity:", percent(absCap))); + + if (absActualCap < absCap) { + a = a.span().$style(join(width(1), + ";font-size:1px;left:0%;", Q_MAX_LESS_ACTUAL)). + _('.')._(); + } + + LI> li = a. + span().$style(join(absActualCap < absCap ? Q_ACTUAL :Q_GIVEN, + ";font-size:1px;", + (absActualCap < absCap ? + width(absActualCap / absMaxCap) : + width(absCap / absMaxCap)))). + _('.')._(). + span().$style(join(width(absUsedCap/absMaxCap), + ";font-size:1px;left:0%;", absUsedCap > absCap ? Q_OVER : Q_UNDER)). + _('.')._(). + span(".q", info.getQueuePath().substring(5))._(). + span().$class("qstats").$style(left(Q_STATS_POS)). + _(join(percent(used), " used"))._(); csqinfo.qinfo = info; if (info.getQueues() == null) { @@ -209,12 +227,16 @@ public void render(Block html) { span().$style("font-weight: bold")._("Legend:")._(). span().$class("qlegend ui-corner-all").$style(Q_GIVEN). _("Capacity")._(). + span().$class("qlegend ui-corner-all").$style(Q_ACTUAL). + _("Actual Capacity (< Capacity)")._(). span().$class("qlegend ui-corner-all").$style(Q_UNDER). _("Used")._(). span().$class("qlegend ui-corner-all").$style(Q_OVER). _("Used (over capacity)")._(). span().$class("qlegend ui-corner-all ui-state-default"). _("Max Capacity")._(). + span().$class("qlegend ui-corner-all").$style(Q_MAX_LESS_ACTUAL). + _("Max Capacity (< Capacity)")._(). _(). li(). a(_Q).$style(width(Q_MAX_WIDTH)). diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java index f10e255..b4447ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java @@ -66,6 +66,7 @@ protected void render(Block html) { TBODY> tbody = html.table("#nodes"). thead(). tr(). + th(".label", "Labels"). th(".rack", "Rack"). th(".state", "Node State"). th(".nodeaddress", "Node Address"). @@ -113,6 +114,7 @@ protected void render(Block html) { int usedMemory = (int)info.getUsedMemory(); int availableMemory = (int)info.getAvailableMemory(); TR>> row = tbody.tr(). + td(StringUtils.join(",", info.getLabels())). td(info.getRack()). td(info.getState()). td(info.getNodeId()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index 24a90bd..ad9a1ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -132,6 +132,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.StatisticsItemInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelNamesInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesToLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.webapp.BadRequestException; @@ -714,6 +719,146 @@ public Response updateAppState(AppState targetState, return Response.status(Status.OK).entity(ret).build(); } + + @GET + @Path("/labels/all-labels") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public LabelNamesInfo getLabels(@Context HttpServletRequest hsr) throws AuthorizationException, IOException { + init(); + + LabelNamesInfo ret = new LabelNamesInfo(rm.getRMContext().getNodeLabelManager().getLabels()); + + return ret; + } + + @GET + @Path("/labels/all-nodes-to-labels") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public NodesToLabelsInfo getNodesToLabels(@Context HttpServletRequest hsr, + @QueryParam("labels") Set labelsQuery) throws AuthorizationException, IOException { + init(); + + NodesToLabelsInfo nodesToLabelsInfo = new NodesToLabelsInfo(); + + Map> nodesToLabels = rm.getRMContext().getNodeLabelManager().getNodesToLabels(); + + boolean filterLabels = false; + if (labelsQuery != null && !labelsQuery.isEmpty()) { + filterLabels = true; + } + + for (Map.Entry> nlEntry : nodesToLabels.entrySet()) { + Set nodeLabels = nlEntry.getValue(); + if (filterLabels) { + Set labelIntersect = new HashSet(nodeLabels); + labelIntersect.retainAll(labelsQuery); + if (!labelIntersect.isEmpty()) { + nodesToLabelsInfo.add(new NodeToLabelsInfo(nlEntry.getKey(), labelIntersect)); + } + } else { + nodesToLabelsInfo.add(new NodeToLabelsInfo(nlEntry.getKey(), nlEntry.getValue())); + } + } + + return nodesToLabelsInfo; + } + + @POST + @Path("/labels/add-labels") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public Response addLabels(final LabelNamesInfo newLabels, + @Context HttpServletRequest hsr) + throws Exception { + init(); + + UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true); + if (callerUGI == null) { + String msg = "Unable to obtain user name, user not authenticated"; + throw new AuthorizationException(msg); + } + callerUGI + .doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + + rm.getRMContext().getNodeLabelManager().addLabels(new HashSet(newLabels.getLabels())); + + return null; + } + }); + + return Response.status(Status.OK).build(); + + } + + @POST + @Path("/labels/remove-labels") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public Response removeLabels(final LabelNamesInfo oldLabels, + @Context HttpServletRequest hsr) + throws Exception { + init(); + + UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true); + if (callerUGI == null) { + String msg = "Unable to obtain user name, user not authenticated"; + throw new AuthorizationException(msg); + } + callerUGI + .doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + + rm.getRMContext().getNodeLabelManager().removeLabels(new HashSet(oldLabels.getLabels())); + + return null; + } + }); + + + return Response.status(Status.OK).build(); + + } + + @POST + @Path("/labels/set-node-to-labels") + @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) + public Response addLabels(NodesToLabelsInfo newNodesToLabelsInfo, + @Context HttpServletRequest hsr) + throws Exception { + init(); + + final Map> newNodeToLabels = new HashMap>(); + + for (NodeToLabelsInfo nodeToLabelsInfo : newNodesToLabelsInfo.getNodeToLabelsInfos()) { + //It's a list, the same node could be specified > once + Set labels = newNodeToLabels.get(nodeToLabelsInfo.getNode()); + if (labels == null) { + labels = new HashSet(); + newNodeToLabels.put(nodeToLabelsInfo.getNode(), labels); + } + labels.addAll(nodeToLabelsInfo.getLabels()); + } + + UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true); + if (callerUGI == null) { + String msg = "Unable to obtain user name, user not authenticated"; + throw new AuthorizationException(msg); + } + callerUGI + .doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + + rm.getRMContext().getNodeLabelManager().setLabelsOnMultipleNodes(newNodeToLabels); + + return null; + } + }); + + return Response.status(Status.OK).build(); + + } protected Response killApp(RMApp app, UserGroupInformation callerUGI, HttpServletRequest hsr) throws IOException, InterruptedException { @@ -964,7 +1109,9 @@ protected ApplicationSubmissionContext createAppSubmissionContext( newApp.getCancelTokensWhenComplete(), newApp.getMaxAppAttempts(), createAppSubmissionContextResource(newApp), newApp.getApplicationType(), - newApp.getKeepContainersAcrossApplicationAttempts()); + newApp.getKeepContainersAcrossApplicationAttempts(), + newApp.getAppLabelExpression(), + newApp.getAMContainerLabelExpression()); appContext.setApplicationTags(newApp.getApplicationTags()); return appContext; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java index f7233e6..d8d93e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationSubmissionContextInfo.java @@ -71,6 +71,12 @@ @XmlElementWrapper(name = "application-tags") @XmlElement(name = "tag") Set tags; + + @XmlElement(name = "app-label-expression") + String appLabelExpression; + + @XmlElement(name = "am-container-label-expression") + String amContainerLabelExpression; public ApplicationSubmissionContextInfo() { applicationId = ""; @@ -83,6 +89,8 @@ public ApplicationSubmissionContextInfo() { keepContainers = false; applicationType = ""; tags = new HashSet(); + appLabelExpression = ""; + amContainerLabelExpression = ""; } public String getApplicationId() { @@ -132,6 +140,14 @@ public boolean getKeepContainersAcrossApplicationAttempts() { public Set getApplicationTags() { return tags; } + + public String getAppLabelExpression() { + return appLabelExpression; + } + + public String getAMContainerLabelExpression() { + return amContainerLabelExpression; + } public void setApplicationId(String applicationId) { this.applicationId = applicationId; @@ -182,5 +198,12 @@ public void setApplicationType(String applicationType) { public void setApplicationTags(Set tags) { this.tags = tags; } + + public void setAppLabelExpression(String appLabelExpression) { + this.appLabelExpression = appLabelExpression; + } + public void setAMContainerLabelExpression(String labelExpression) { + this.amContainerLabelExpression = labelExpression; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java index ac16ce0..4f38d2d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Set; + import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @@ -48,6 +52,8 @@ protected QueueState state; protected CapacitySchedulerQueueInfoList queues; protected ResourceInfo resourcesUsed; + protected float absActualCapacity; + protected ArrayList labels = new ArrayList(); CapacitySchedulerQueueInfo() { }; @@ -69,6 +75,14 @@ queueName = q.getQueueName(); state = q.getState(); resourcesUsed = new ResourceInfo(q.getUsedResources()); + absActualCapacity = cap(q.getAbsActualCapacity(), 0f, 1f) * 100; + + // add labels + Set labelSet = q.getLabels(); + if (labelSet != null) { + labels.addAll(labelSet); + Collections.sort(labels); + } } public float getCapacity() { @@ -94,6 +108,10 @@ public float getAbsoluteMaxCapacity() { public float getAbsoluteUsedCapacity() { return absoluteUsedCapacity; } + + public float getAbsActualCapacity() { + return absActualCapacity; + } public int getNumApplications() { return numApplications; @@ -118,6 +136,10 @@ public CapacitySchedulerQueueInfoList getQueues() { public ResourceInfo getResourcesUsed() { return resourcesUsed; } + + public ArrayList getLabels() { + return labels; + } /** * Limit a value to a specified range. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelInfo.java new file mode 100644 index 0000000..bd1f926 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelInfo.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.*; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "labelInfo") +@XmlAccessorType(XmlAccessType.FIELD) +public class LabelInfo { + + protected String labelName; + protected ArrayList activeNodes = new ArrayList(); + protected ArrayList inactiveNodes = new ArrayList(); + + public LabelInfo() { + } // JAXB needs this + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelNamesInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelNamesInfo.java new file mode 100644 index 0000000..e71954d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelNamesInfo.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.*; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "labelNamesInfo") +@XmlAccessorType(XmlAccessType.FIELD) +public class LabelNamesInfo { + + protected ArrayList labels = new ArrayList(); + + public LabelNamesInfo() { + } // JAXB needs this + + public LabelNamesInfo(Set labelSet) { + labels.addAll(labelSet); + //labels.add("foo"); + } + + public ArrayList getLabels() { + return labels; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsInfo.java new file mode 100644 index 0000000..481fe40 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsInfo.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.*; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "labelsInfo") +@XmlAccessorType(XmlAccessType.FIELD) +public class LabelsInfo { + + protected ArrayList labels = new ArrayList(); + + public LabelsInfo() { + } // JAXB needs this + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java index 73a2db1..bdfc6dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java @@ -18,6 +18,10 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Set; + import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; @@ -45,6 +49,7 @@ protected long availMemoryMB; protected long usedVirtualCores; protected long availableVirtualCores; + protected ArrayList labels = new ArrayList(); public NodeInfo() { } // JAXB needs this @@ -70,6 +75,13 @@ public NodeInfo(RMNode ni, ResourceScheduler sched) { this.lastHealthUpdate = ni.getLastHealthReportTime(); this.healthReport = String.valueOf(ni.getHealthReport()); this.version = ni.getNodeManagerVersion(); + + // add labels + Set labelSet = ni.getLabels(); + if (labelSet != null) { + labels.addAll(labelSet); + Collections.sort(labels); + } } public String getRack() { @@ -123,5 +135,9 @@ public long getUsedVirtualCores() { public long getAvailableVirtualCores() { return this.availableVirtualCores; } + + public ArrayList getLabels() { + return this.labels; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeToLabelsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeToLabelsInfo.java new file mode 100644 index 0000000..527d12c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeToLabelsInfo.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.*; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "nodeToLabelsInfo") +@XmlAccessorType(XmlAccessType.FIELD) +public class NodeToLabelsInfo { + + protected String node; + protected ArrayList labels = new ArrayList(); + + public NodeToLabelsInfo() { + } // JAXB needs this + + public NodeToLabelsInfo(String node) { + this.node = node; + } + + public NodeToLabelsInfo(String node, Set labels) { + this.node = node; + this.labels.addAll(labels); + } + + public String getNode() { + return node; + } + + public ArrayList getLabels() { + return labels; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesToLabelsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesToLabelsInfo.java new file mode 100644 index 0000000..3c2eb08 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesToLabelsInfo.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; + +import java.util.*; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "nodesToLabelsInfo") +@XmlAccessorType(XmlAccessType.FIELD) +public class NodesToLabelsInfo { + + protected ArrayList nodeToLabelsInfos = new ArrayList(); + + public NodesToLabelsInfo() { + } // JAXB needs this + + public ArrayList getNodeToLabelsInfos() { + return nodeToLabelsInfos; + } + + public void add(NodeToLabelsInfo nodeToLabelInfo) { + nodeToLabelsInfos.add(nodeToLabelInfo); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java index 91e1905..424be63 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java @@ -135,34 +135,52 @@ public AllocateResponse schedule() throws Exception { public void addContainerToBeReleased(ContainerId containerId) { releases.add(containerId); } + public AllocateResponse allocate( String host, int memory, int numContainers, List releases) throws Exception { - List reqs = createReq(new String[]{host}, memory, 1, numContainers); + return allocate(host, memory, numContainers, releases, null); + } + + public AllocateResponse allocate( + String host, int memory, int numContainers, + List releases, String labelExpression) throws Exception { + List reqs = + createReq(new String[] { host }, memory, 1, numContainers, + labelExpression); return allocate(reqs, releases); } - + public List createReq(String[] hosts, int memory, int priority, int containers) throws Exception { + return createReq(hosts, memory, priority, containers, null); + } + + public List createReq(String[] hosts, int memory, int priority, + int containers, String labelExpression) throws Exception { List reqs = new ArrayList(); for (String host : hosts) { ResourceRequest hostReq = createResourceReq(host, memory, priority, - containers); + containers, labelExpression); reqs.add(hostReq); ResourceRequest rackReq = createResourceReq("/default-rack", memory, - priority, containers); + priority, containers, labelExpression); reqs.add(rackReq); } ResourceRequest offRackReq = createResourceReq(ResourceRequest.ANY, memory, - priority, containers); + priority, containers, labelExpression); reqs.add(offRackReq); return reqs; - } - + public ResourceRequest createResourceReq(String resource, int memory, int priority, int containers) throws Exception { + return createResourceReq(resource, memory, priority, containers, null); + } + + public ResourceRequest createResourceReq(String resource, int memory, int priority, + int containers, String labelExpression) throws Exception { ResourceRequest req = Records.newRecord(ResourceRequest.class); req.setResourceName(resource); req.setNumContainers(containers); @@ -172,6 +190,9 @@ public ResourceRequest createResourceReq(String resource, int memory, int priori Resource capability = Records.newRecord(Resource.class); capability.setMemory(memory); req.setCapability(capability); + if (labelExpression != null) { + req.setLabelExpression(labelExpression); + } return req; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index 79f9098..7f0c8bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Set; import org.apache.hadoop.net.Node; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -202,7 +203,11 @@ public String getHealthReport() { public long getLastHealthReportTime() { return lastHealthReportTime; } - + + @Override + public Set getLabels() { + return null; + } }; private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 3817637..20f576d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -182,27 +182,43 @@ public MockAM waitForNewAMToLaunchAndRegister(ApplicationId appId, int attemptSi return launchAndRegisterAM(app, this, nm); } - public void waitForState(MockNM nm, ContainerId containerId, + public boolean waitForState(MockNM nm, ContainerId containerId, RMContainerState containerState) throws Exception { + // default is wait for 30,000 ms + return waitForState(nm, containerId, containerState, 30 * 1000); + } + + public boolean waitForState(MockNM nm, ContainerId containerId, + RMContainerState containerState, int timeoutMillisecs) throws Exception { RMContainer container = getResourceScheduler().getRMContainer(containerId); int timeoutSecs = 0; - while(container == null && timeoutSecs++ < 100) { + while(container == null && timeoutSecs++ < timeoutMillisecs / 100) { nm.nodeHeartbeat(true); container = getResourceScheduler().getRMContainer(containerId); System.out.println("Waiting for container " + containerId + " to be allocated."); Thread.sleep(100); + + if (timeoutMillisecs <= timeoutSecs * 100) { + return false; + } } Assert.assertNotNull("Container shouldn't be null", container); - timeoutSecs = 0; - while (!containerState.equals(container.getState()) && timeoutSecs++ < 40) { + while (!containerState.equals(container.getState()) + && timeoutSecs++ < timeoutMillisecs / 100) { System.out.println("Container : " + containerId + " State is : " + container.getState() + " Waiting for state : " + containerState); nm.nodeHeartbeat(true); - Thread.sleep(300); + Thread.sleep(100); + + if (timeoutMillisecs <= timeoutSecs * 100) { + return false; + } } + System.out.println("Container State is : " + container.getState()); Assert.assertEquals("Container state is not correct (timedout)", containerState, container.getState()); + return true; } // get new application id diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java index d6af0d7..0cd2c14 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java @@ -129,7 +129,7 @@ public TestWorkPreservingRMRestart(Class schedulerClass) { // Test Strategy: send 3 container recovery reports(AMContainer, running // container, completed container) on NM re-registration, check the states of // SchedulerAttempt, SchedulerNode etc. are updated accordingly. - @Test(timeout = 20000) + @Test(timeout = 2000000) public void testSchedulerRecovery() throws Exception { conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true); conf.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelTestBase.java new file mode 100644 index 0000000..32cbd6f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/NodeLabelTestBase.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label; + +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +import org.junit.Assert; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Sets; + +public class NodeLabelTestBase { + public static void assertMapEquals(Map> m1, + ImmutableMap> m2) { + Assert.assertEquals(m1.size(), m2.size()); + for (String k : m1.keySet()) { + Assert.assertTrue(m2.containsKey(k)); + assertCollectionEquals(m1.get(k), m2.get(k)); + } + } + + public static void assertMapContains(Map> m1, + ImmutableMap> m2) { + for (String k : m2.keySet()) { + Assert.assertTrue(m1.containsKey(k)); + assertCollectionEquals(m1.get(k), m2.get(k)); + } + } + + public static void assertCollectionEquals(Collection c1, + Collection c2) { + Assert.assertEquals(c1.size(), c2.size()); + Iterator i1 = c1.iterator(); + Iterator i2 = c2.iterator(); + while (i1.hasNext()) { + Assert.assertEquals(i1.next(), i2.next()); + } + } + + public static Set toSet(E... elements) { + Set set = Sets.newHashSet(elements); + return set; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/SyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/SyncDispatcher.java new file mode 100644 index 0000000..5303a18 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/SyncDispatcher.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label; + +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.EventHandler; + +public class SyncDispatcher implements Dispatcher { + @SuppressWarnings("rawtypes") + EventHandler handler = null; + + @SuppressWarnings("rawtypes") + @Override + public EventHandler getEventHandler() { + return handler; + } + + @SuppressWarnings("rawtypes") + @Override + public void register(Class eventType, EventHandler handler) { + this.handler = handler; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/TestFileSystemNodeLabelManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/TestFileSystemNodeLabelManager.java new file mode 100644 index 0000000..a47ed3f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/TestFileSystemNodeLabelManager.java @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableMap; + +public class TestFileSystemNodeLabelManager extends NodeLabelTestBase { + MockFileSystemNodeLabelManager mgr = null; + Configuration conf = null; + + private static class MockFileSystemNodeLabelManager extends + FileSystemNodeLabelManager { + @Override + protected void initDispatcher(Configuration conf) { + super.dispatcher = new SyncDispatcher(); + } + + @Override + protected void startDispatcher() { + // do nothing + } + } + + @Before + public void before() throws IOException { + mgr = new MockFileSystemNodeLabelManager(); + conf = new Configuration(); + File tempDir = File.createTempFile("nlb", ".tmp"); + tempDir.delete(); + tempDir.mkdirs(); + tempDir.deleteOnExit(); + conf.set(YarnConfiguration.FS_NODE_LABEL_STORE_URI, + tempDir.getAbsolutePath()); + mgr.init(conf); + mgr.start(); + } + + @After + public void after() throws IOException { + mgr.fs.delete(mgr.rootDirPath, true); + mgr.stop(); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 10000) + public void testRecoverWithMirror() throws Exception { + mgr.addLabels(toSet("p1", "p2", "p3")); + mgr.addLabel("p4"); + mgr.addLabels(toSet("p5", "p6")); + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n1", toSet("p1"), "n2", + toSet("p2"))); + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n3", toSet("p3"), "n4", + toSet("p4"), "n5", toSet("p5"), "n6", toSet("p6"), "n7", toSet("p6"))); + + /* + * node -> partition p1: n1 p2: n2 p3: n3 p4: n4 p5: n5 p6: n6, n7 + */ + + mgr.removeLabel("p1"); + mgr.removeLabels(Arrays.asList("p3", "p5")); + + /* + * After removed p2: n2 p4: n4 p6: n6, n7 + */ + // shutdown mgr and start a new mgr + mgr.stop(); + + mgr = new MockFileSystemNodeLabelManager(); + mgr.init(conf); + + // check variables + Assert.assertEquals(3, mgr.getLabels().size()); + Assert.assertTrue(mgr.getLabels().containsAll( + Arrays.asList("p2", "p4", "p6"))); + + assertMapContains(mgr.getNodesToLabels(), ImmutableMap.of("n2", + toSet("p2"), "n4", toSet("p4"), "n6", toSet("p6"), "n7", toSet("p6"))); + assertMapContains(mgr.getNodesToLabels(), ImmutableMap.of("n1", + NodeLabelManager.EMPTY_STRING_SET, "n3", + NodeLabelManager.EMPTY_STRING_SET, "n5", + NodeLabelManager.EMPTY_STRING_SET)); + + // stutdown mgr and start a new mgr + mgr.stop(); + mgr = new MockFileSystemNodeLabelManager(); + mgr.init(conf); + + // check variables + Assert.assertEquals(3, mgr.getLabels().size()); + Assert.assertTrue(mgr.getLabels().containsAll( + Arrays.asList("p2", "p4", "p6"))); + + assertMapContains(mgr.getNodesToLabels(), ImmutableMap.of("n2", + toSet("p2"), "n4", toSet("p4"), "n6", toSet("p6"), "n7", toSet("p6"))); + assertMapContains(mgr.getNodesToLabels(), ImmutableMap.of("n1", + NodeLabelManager.EMPTY_STRING_SET, "n3", + NodeLabelManager.EMPTY_STRING_SET, "n5", + NodeLabelManager.EMPTY_STRING_SET)); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 10000) + public void testEditlogRecover() throws Exception { + mgr.addLabels(toSet("p1", "p2", "p3")); + mgr.addLabel("p4"); + mgr.addLabels(toSet("p5", "p6")); + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n1", toSet("p1"), "n2", + toSet("p2"))); + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n3", toSet("p3"), "n4", + toSet("p4"), "n5", toSet("p5"), "n6", toSet("p6"), "n7", toSet("p6"))); + + /* + * node -> partition p1: n1 p2: n2 p3: n3 p4: n4 p5: n5 p6: n6, n7 + */ + + mgr.removeLabel("p1"); + mgr.removeLabels(Arrays.asList("p3", "p5")); + + /* + * After removed p2: n2 p4: n4 p6: n6, n7 + */ + // shutdown mgr and start a new mgr + mgr.stop(); + + mgr = new MockFileSystemNodeLabelManager(); + mgr.init(conf); + + // check variables + Assert.assertEquals(3, mgr.getLabels().size()); + Assert.assertTrue(mgr.getLabels().containsAll( + Arrays.asList("p2", "p4", "p6"))); + + assertMapContains(mgr.getNodesToLabels(), ImmutableMap.of( + "n2", toSet("p2"), "n4", toSet("p4"), "n6", toSet("p6"), "n7", + toSet("p6"))); + assertMapContains(mgr.getNodesToLabels(), ImmutableMap.of("n1", + NodeLabelManager.EMPTY_STRING_SET, "n3", + NodeLabelManager.EMPTY_STRING_SET, "n5", + NodeLabelManager.EMPTY_STRING_SET)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/TestNodeLabelManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/TestNodeLabelManager.java new file mode 100644 index 0000000..fffb624 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/TestNodeLabelManager.java @@ -0,0 +1,608 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelConfiguration.LoadStrategy; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Sets; + +public class TestNodeLabelManager extends NodeLabelTestBase { + private final Resource EMPTY_RESOURCE = Resource.newInstance(0, 0); + private final Resource SMALL_NODE = Resource.newInstance(100, 0); + private final Resource LARGE_NODE = Resource.newInstance(1000, 0); + + DumbNodeLabelManager mgr = null; + + private static class DumbNodeLabelManager extends NodeLabelManager { + Map> lastNodeToLabels = null; + Collection lastAddedlabels = null; + Collection lastRemovedlabels = null; + + @Override + protected void persistRemovingLabels(Collection labels) + throws IOException { + lastRemovedlabels = labels; + } + + @Override + public void recover(Map> defaultNodeToLabels, + Set defaultLabels, LoadStrategy loadStrategy) + throws IOException { + // do nothing here + } + + @Override + protected void initDispatcher(Configuration conf) { + super.dispatcher = new SyncDispatcher(); + } + + @Override + protected void startDispatcher() { + // do nothing + } + + @Override + protected void persistNodeToLabelsChanges( + Map> nodeToLabels) throws IOException { + this.lastNodeToLabels = nodeToLabels; + } + + @Override + protected void persistAddingLabels(Set labels) throws IOException { + this.lastAddedlabels = labels; + } + } + + @Before + public void before() { + mgr = new DumbNodeLabelManager(); + mgr.init(new Configuration()); + mgr.start(); + } + + @After + public void after() { + mgr.stop(); + } + + @Test(timeout = 5000) + public void testAddRemovelabel() throws Exception { + // Add some label + mgr.addLabel("hello"); + assertCollectionEquals(mgr.lastAddedlabels, Arrays.asList("hello")); + + mgr.addLabel("world"); + mgr.addLabels(toSet("hello1", "world1")); + assertCollectionEquals(mgr.lastAddedlabels, + Sets.newHashSet("hello1", "world1")); + + Assert.assertTrue(mgr.getLabels().containsAll( + Sets.newHashSet("hello", "world", "hello1", "world1"))); + + // try to remove null, empty and non-existed label, should fail + for (String p : Arrays.asList(null, NodeLabelManager.NO_LABEL, "xx")) { + boolean caught = false; + try { + mgr.removeLabel(p); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("remove label should fail " + + "when label is null/empty/non-existed", caught); + } + + // Remove some label + mgr.removeLabel("hello"); + assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("hello")); + Assert.assertTrue(mgr.getLabels().containsAll( + Arrays.asList("world", "hello1", "world1"))); + + mgr.removeLabels(Arrays.asList("hello1", "world1", "world")); + Assert.assertTrue(mgr.lastRemovedlabels.containsAll(Sets.newHashSet( + "hello1", "world1", "world"))); + Assert.assertTrue(mgr.getLabels().isEmpty()); + } + + @Test(timeout = 5000) + public void testAddRemovelabelIgnoreCase() throws Exception { + // Add some label + mgr.addLabel("HeLlO"); + + assertCollectionEquals(mgr.lastAddedlabels, Arrays.asList("hello")); + Assert.assertFalse(mgr.getLabels().containsAll(Arrays.asList("HeLlO"))); + Assert.assertTrue(mgr.getLabels().containsAll(Arrays.asList("hello"))); + } + + @Test(timeout = 5000) + public void testAddInvalidlabel() throws IOException { + boolean caught = false; + try { + mgr.addLabel(null); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("null label should not add to repo", caught); + + caught = false; + try { + mgr.addLabel(NodeLabelManager.NO_LABEL); + } catch (IOException e) { + caught = true; + } + + Assert.assertTrue("empty label should not add to repo", caught); + + caught = false; + try { + mgr.addLabel("-?"); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("invalid label charactor should not add to repo", caught); + + caught = false; + try { + mgr.addLabel(StringUtils.repeat("c", 257)); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("too long label should not add to repo", caught); + + caught = false; + try { + mgr.addLabel("-aaabbb"); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("label cannot start with \"-\"", caught); + + caught = false; + try { + mgr.addLabel("_aaabbb"); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("label cannot start with \"_\"", caught); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 5000) + public void testSetRemoveLabelsOnNodes() throws Exception { + // set a label on a node, but label doesn't exist + boolean caught = false; + try { + mgr.setLabelsOnSingleNode("node", toSet("label")); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("trying to set a label to a node but " + + "label doesn't exist in repository should fail", caught); + + // set a label on a node, but node is null or empty + try { + mgr.setLabelsOnSingleNode(NodeLabelManager.NO_LABEL, toSet("label")); + } catch (IOException e) { + caught = true; + } + Assert.assertTrue("trying to add a empty node but succeeded", caught); + + // set node->label one by one + mgr.addLabels(toSet("p1", "p2", "p3")); + mgr.setLabelsOnSingleNode("n1", toSet("p1")); + mgr.setLabelsOnSingleNode("n1", toSet("p2")); + mgr.setLabelsOnSingleNode("n2", toSet("p3")); + assertMapEquals(mgr.getNodesToLabels(), + ImmutableMap.of("n1", toSet("p2"), "n2", toSet("p3"))); + assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of("n2", toSet("p3"))); + + // set bunch of node->label + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n3", toSet("p3"), "n1", + toSet("p1"))); + assertMapEquals(mgr.getNodesToLabels(), ImmutableMap.of("n1", toSet("p1"), + "n2", toSet("p3"), "n3", toSet("p3"))); + assertMapEquals(mgr.lastNodeToLabels, + ImmutableMap.of("n3", toSet("p3"), "n1", toSet("p1"))); + + // remove label on node + mgr.removeLabelOnNode("n1"); + assertMapEquals(mgr.getNodesToLabels(), + ImmutableMap.of("n1", NodeLabelManager.EMPTY_STRING_SET, "n2", + toSet("p3"), "n3", toSet("p3"))); + assertMapEquals(mgr.lastNodeToLabels, + ImmutableMap.of("n1", NodeLabelManager.EMPTY_STRING_SET)); + + // remove labels on node + mgr.removeLabelsOnNodes(Arrays.asList("n2", "n3")); + assertMapEquals(mgr.nodeToLabels, ImmutableMap.of("n1", + NodeLabelManager.EMPTY_STRING_SET, "n2", + NodeLabelManager.EMPTY_STRING_SET, "n3", + NodeLabelManager.EMPTY_STRING_SET)); + assertMapEquals(mgr.lastNodeToLabels, ImmutableMap.of("n2", + NodeLabelManager.EMPTY_STRING_SET, "n3", + NodeLabelManager.EMPTY_STRING_SET)); + } + + @Test(timeout = 5000) + public void testRemovelabelWithNodes() throws Exception { + mgr.addLabels(toSet("p1", "p2", "p3")); + mgr.setLabelsOnSingleNode("n1", toSet("p1")); + mgr.setLabelsOnSingleNode("n2", toSet("p2")); + mgr.setLabelsOnSingleNode("n3", toSet("p3")); + + mgr.removeLabel("p1"); + assertMapEquals(mgr.getNodesToLabels(), + ImmutableMap.of("n1", NodeLabelManager.EMPTY_STRING_SET, "n2", + toSet("p2"), "n3", toSet("p3"))); + assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("p1")); + + mgr.removeLabels(Arrays.asList("p2", "p3")); + assertMapEquals(mgr.getNodesToLabels(), ImmutableMap.of("n1", + NodeLabelManager.EMPTY_STRING_SET, "n2", + NodeLabelManager.EMPTY_STRING_SET, "n3", + NodeLabelManager.EMPTY_STRING_SET)); + Assert.assertTrue(mgr.getLabels().isEmpty()); + assertCollectionEquals(mgr.lastRemovedlabels, Arrays.asList("p2", "p3")); + } + + @Test(timeout = 5000) + public void testNodeActiveDeactiveUpdate() throws Exception { + mgr.addLabels(toSet("p1", "p2", "p3")); + mgr.setLabelsOnSingleNode("n1", toSet("p1")); + mgr.setLabelsOnSingleNode("n2", toSet("p2")); + mgr.setLabelsOnSingleNode("n3", toSet("p3")); + + Assert.assertEquals(mgr.getResourceWithLabel("p1"), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getResourceWithLabel("p2"), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getResourceWithLabel("p3"), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getResourceWithLabel(NodeLabelManager.NO_LABEL), + EMPTY_RESOURCE); + + // active two NM to n1, one large and one small + mgr.activeNode(NodeId.newInstance("n1", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("n1", 1), LARGE_NODE); + Assert.assertEquals(mgr.getResourceWithLabel("p1"), + Resources.add(SMALL_NODE, LARGE_NODE)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p1"), 1); + + // change the large NM to small, check if resource updated + mgr.updateNodeResource(NodeId.newInstance("n1", 1), SMALL_NODE); + Assert.assertEquals(mgr.getResourceWithLabel("p1"), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p1"), 1); + + // deactive one NM, and check if resource updated + mgr.deactiveNode(NodeId.newInstance("n1", 1)); + Assert.assertEquals(mgr.getResourceWithLabel("p1"), SMALL_NODE); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p1"), 1); + + // continus deactive, check if resource updated + mgr.deactiveNode(NodeId.newInstance("n1", 0)); + Assert.assertEquals(mgr.getResourceWithLabel("p1"), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p1"), 0); + + // Add two NM to n1 back + mgr.activeNode(NodeId.newInstance("n1", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("n1", 1), LARGE_NODE); + + // And remove p1, now the two NM should come to default label, + mgr.removeLabel("p1"); + Assert.assertEquals(mgr.getResourceWithLabel(NodeLabelManager.NO_LABEL), + Resources.add(SMALL_NODE, LARGE_NODE)); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + @Test(timeout = 5000) + public void testUpdateNodeLabelWithActiveNode() throws Exception { + mgr.addLabels(toSet("p1", "p2", "p3")); + mgr.setLabelsOnSingleNode("n1", toSet("p1")); + mgr.setLabelsOnSingleNode("n2", toSet("p2")); + mgr.setLabelsOnSingleNode("n3", toSet("p3")); + + // active two NM to n1, one large and one small + mgr.activeNode(NodeId.newInstance("n1", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("n2", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("n3", 0), SMALL_NODE); + + // change label of n1 to p2 + mgr.setLabelsOnSingleNode("n1", toSet("p2")); + Assert.assertEquals(mgr.getResourceWithLabel("p1"), EMPTY_RESOURCE); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p1"), 0); + Assert.assertEquals(mgr.getResourceWithLabel("p2"), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p2"), 2); + Assert.assertEquals(mgr.getResourceWithLabel("p3"), SMALL_NODE); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p3"), 1); + + // add more labels + mgr.addLabels(toSet("p4", "p5", "p6")); + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n4", toSet("p1"), "n5", + toSet("p2"), "n6", toSet("p3"), "n7", toSet("p4"), "n8", toSet("p5"))); + + // now node -> label is, + // p1 : n4 + // p2 : n1, n2, n5 + // p3 : n3, n6 + // p4 : n7 + // p5 : n8 + // no-label : n9 + + // active these nodes + mgr.activeNode(NodeId.newInstance("n4", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("n5", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("n6", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("n7", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("n8", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("n9", 0), SMALL_NODE); + + // check varibles + Assert.assertEquals(mgr.getResourceWithLabel("p1"), SMALL_NODE); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p1"), 1); + Assert.assertEquals(mgr.getResourceWithLabel("p2"), + Resources.multiply(SMALL_NODE, 3)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p2"), 3); + Assert.assertEquals(mgr.getResourceWithLabel("p3"), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p3"), 2); + Assert.assertEquals(mgr.getResourceWithLabel("p4"), + Resources.multiply(SMALL_NODE, 1)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p4"), 1); + Assert.assertEquals(mgr.getResourceWithLabel("p5"), + Resources.multiply(SMALL_NODE, 1)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p5"), 1); + Assert.assertEquals(mgr.getResourceWithLabel(""), + Resources.multiply(SMALL_NODE, 1)); + Assert.assertEquals(mgr.getNumOfNodesByLabel(""), 1); + + // change a bunch of nodes -> labels + // n4 -> p2 + // n7 -> empty + // n5 -> p1 + // n8 -> empty + // n9 -> p1 + // + // now become: + // p1 : n5, n9 + // p2 : n1, n2, n4 + // p3 : n3, n6 + // p4 : [ ] + // p5 : [ ] + // no label: n8, n7 + mgr.setLabelsOnMultipleNodes((Map) ImmutableMap.of("n4", toSet("p2"), "n7", + NodeLabelManager.EMPTY_STRING_SET, "n5", toSet("p1"), "n8", + NodeLabelManager.EMPTY_STRING_SET, "n9", toSet("p1"))); + + // check varibles + Assert.assertEquals(mgr.getResourceWithLabel("p1"), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p1"), 2); + Assert.assertEquals(mgr.getResourceWithLabel("p2"), + Resources.multiply(SMALL_NODE, 3)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p2"), 3); + Assert.assertEquals(mgr.getResourceWithLabel("p3"), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p3"), 2); + Assert.assertEquals(mgr.getResourceWithLabel("p4"), + Resources.multiply(SMALL_NODE, 0)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p4"), 0); + Assert.assertEquals(mgr.getResourceWithLabel("p5"), + Resources.multiply(SMALL_NODE, 0)); + Assert.assertEquals(mgr.getNumOfNodesByLabel("p5"), 0); + Assert.assertEquals(mgr.getResourceWithLabel(""), + Resources.multiply(SMALL_NODE, 2)); + Assert.assertEquals(mgr.getNumOfNodesByLabel(""), 2); + } + + @Test + public void testGetQueueResource() throws Exception { + Resource clusterResource = Resource.newInstance(9999, 1); + + /* + * Node->Labels: + * host1 : red, blue + * host2 : blue, yellow + * host3 : yellow + * host4 : + */ + mgr.addLabels(toSet("red", "blue", "yellow")); + mgr.setLabelsOnSingleNode("host1", toSet("red", "blue")); + mgr.setLabelsOnSingleNode("host2", toSet("blue", "yellow")); + mgr.setLabelsOnSingleNode("host3", toSet("yellow")); + + // active two NM to n1, one large and one small + mgr.activeNode(NodeId.newInstance("host1", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("host2", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("host3", 0), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("host4", 0), SMALL_NODE); + + // reinitialize queue + Set q1Label = toSet("red", "blue"); + Set q2Label = toSet("blue", "yellow"); + Set q3Label = toSet("yellow"); + Set q4Label = NodeLabelManager.EMPTY_STRING_SET; + Set q5Label = toSet(NodeLabelManager.ANY); + + Map> queueToLabels = new HashMap>(); + queueToLabels.put("Q1", q1Label); + queueToLabels.put("Q2", q2Label); + queueToLabels.put("Q3", q3Label); + queueToLabels.put("Q4", q4Label); + queueToLabels.put("Q5", q5Label); + + mgr.reinitializeQueueLabels(queueToLabels); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 4), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 1), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Check resource after changes some labels + * Node->Labels: + * host1 : blue + * host2 : + * host3 : red, yellow + * host4 : + */ + mgr.setLabelsOnMultipleNodes(ImmutableMap.of( + "host3", toSet("red", "yellow"), + "host1", toSet("blue"), + "host2", NodeLabelManager.EMPTY_STRING_SET)); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 4), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 4), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Check resource after deactive/active some nodes + * Node->Labels: + * (deactived) host1 : blue + * host2 : + * (deactived and then actived) host3 : red, yellow + * host4 : + */ + mgr.deactiveNode(NodeId.newInstance("host1", 0)); + mgr.deactiveNode(NodeId.newInstance("host3", 0)); + mgr.activeNode(NodeId.newInstance("host3", 0), SMALL_NODE); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Check resource after refresh queue: + * Q1: blue + * Q2: red, blue + * Q3: red + * Q4: + * Q5: ANY + */ + q1Label = toSet("blue"); + q2Label = toSet("blue", "red"); + q3Label = toSet("red"); + q4Label = NodeLabelManager.EMPTY_STRING_SET; + q5Label = toSet(NodeLabelManager.ANY); + + queueToLabels.clear(); + queueToLabels.put("Q1", q1Label); + queueToLabels.put("Q2", q2Label); + queueToLabels.put("Q3", q3Label); + queueToLabels.put("Q4", q4Label); + queueToLabels.put("Q5", q5Label); + + mgr.reinitializeQueueLabels(queueToLabels); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 2), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Active NMs in nodes already have NM + * Node->Labels: + * host2 : + * host3 : red, yellow (3 NMs) + * host4 : (2 NMs) + */ + mgr.activeNode(NodeId.newInstance("host3", 1), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("host3", 2), SMALL_NODE); + mgr.activeNode(NodeId.newInstance("host4", 1), SMALL_NODE); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 6), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 6), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + + /* + * Deactive NMs in nodes already have NMs + * Node->Labels: + * host2 : + * host3 : red, yellow (2 NMs) + * host4 : (0 NMs) + */ + mgr.deactiveNode(NodeId.newInstance("host3", 2)); + mgr.deactiveNode(NodeId.newInstance("host4", 1)); + mgr.deactiveNode(NodeId.newInstance("host4", 0)); + + // check resource + Assert.assertEquals(Resources.multiply(SMALL_NODE, 1), + mgr.getQueueResource("Q1", q1Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q2", q2Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 3), + mgr.getQueueResource("Q3", q3Label, clusterResource)); + Assert.assertEquals(Resources.multiply(SMALL_NODE, 1), + mgr.getQueueResource("Q4", q4Label, clusterResource)); + Assert.assertEquals(clusterResource, + mgr.getQueueResource("Q5", q5Label, clusterResource)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/TestNodeLabelUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/TestNodeLabelUtils.java new file mode 100644 index 0000000..6dee2df --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/label/TestNodeLabelUtils.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.label; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.junit.Assert; +import org.junit.Test; + +import com.google.common.collect.ImmutableMap; + +public class TestNodeLabelUtils extends NodeLabelTestBase { + private void assertParseShouldFail(String json, boolean shouldFail) { + try { + NodeLabelUtils.getNodeToLabelsFromJson(json); + if (shouldFail) { + Assert.fail("should fail:" + json == null ? "" : json); + } + } catch (IOException e) { + if (!shouldFail) { + Assert.fail("shouldn't fail:" + json == null ? "" : json); + } + } + } + + private void assertParseFailed(String json) { + assertParseShouldFail(json, true); + } + + @Test + public void testParseNodeToLabelsFromJson() throws IOException { + // empty and null + assertParseShouldFail(null, false); + assertParseShouldFail("", false); + + // empty host + String json = + "{\"host1\":{\"labels\":[\"x\",\"y\"]}, \"\":{\"labels\":[\"x\",\"y\"]}}"; + assertParseFailed(json); + + // not json object + json = + "[\"host1\":{\"labels\":[\"x\",\"y\"]}, \"\":{\"labels\":[\"x\",\"y\"]}]"; + assertParseFailed(json); + + // don't have labels + json = + "[\"host1\":{\"labels\":[\"x\",\"y\"]}, \"\":{\"tag\":[\"x\",\"y\"]}]"; + assertParseFailed(json); + + // labels is not array + json = "{\"host1\":{\"labels\":{\"x\":\"y\"}}}"; + assertParseFailed(json); + + // not a valid json + json = "[ }"; + assertParseFailed(json); + + // normal case #1 + json = + "{\"host1\":{\"labels\":[\"x\",\"y\"]}, \"host2\":{\"labels\":[\"x\",\"y\"]}}"; + Map> nodeToLabels = + NodeLabelUtils.getNodeToLabelsFromJson(json); + assertMapEquals(nodeToLabels, + ImmutableMap.of("host1", toSet("x", "y"), "host2", toSet("x", "y"))); + + // normal case #2 + json = + "{\"host1\":{\"labels\":[\"x\",\"y\"]}, \"host2\":{\"labels\":[\"a\",\"b\"]}}"; + nodeToLabels = NodeLabelUtils.getNodeToLabelsFromJson(json); + assertMapEquals(nodeToLabels, + ImmutableMap.of("host1", toSet("x", "y"), "host2", toSet("a", "b"))); + + // label is empty #1 + json = "{\"host1\":{\"labels\":[\"x\",\"y\"]}, \"host2\":{\"labels\":[]}}"; + nodeToLabels = NodeLabelUtils.getNodeToLabelsFromJson(json); + assertMapEquals(nodeToLabels, ImmutableMap.of("host1", toSet("x", "y"), + "host2", new HashSet())); + + // label is empty #2 + json = "{\"host1\":{\"labels\":[\"x\",\"y\"]}, \"host2\":{}}"; + nodeToLabels = NodeLabelUtils.getNodeToLabelsFromJson(json); + assertMapEquals(nodeToLabels, ImmutableMap.of("host1", toSet("x", "y"), + "host2", new HashSet())); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java index 8a2840e..d718df9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java @@ -28,6 +28,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.isA; import static org.mockito.Mockito.mock; @@ -39,10 +40,12 @@ import java.util.ArrayList; import java.util.Comparator; import java.util.Deque; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.NavigableSet; import java.util.Random; +import java.util.Set; import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; @@ -51,10 +54,12 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingMonitor; import org.apache.hadoop.yarn.server.resourcemanager.resource.Priority; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; @@ -74,6 +79,11 @@ import org.junit.rules.TestName; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatcher; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import com.google.common.collect.ImmutableSet; public class TestProportionalCapacityPreemptionPolicy { @@ -100,6 +110,7 @@ ApplicationId.newInstance(TS, 4), 0); final ArgumentCaptor evtCaptor = ArgumentCaptor.forClass(ContainerPreemptEvent.class); + NodeLabelManager labelManager = mock(NodeLabelManager.class); @Rule public TestName name = new TestName(); @@ -570,6 +581,127 @@ public void testAMResourcePercentForSkippedAMContainers() { setAMContainer = false; } + @SuppressWarnings("unchecked") + @Test + public void testIgnoreBecauseQueueCannotAccessSomeLabels() { + int[][] qData = new int[][]{ + // / A B C + { 100, 40, 40, 20 }, // abs + { 100, 100, 100, 100 }, // maxCap + { 100, 10, 60, 30 }, // used + { 0, 30, 0, 0 }, // pending + { 0, 0, 0, 0 }, // reserved + { 3, 1, 1, 1 }, // apps + { -1, 1, 1, 1 }, // req granularity + { 3, 0, 0, 0 }, // subqueues + }; + + NodeLabelManager labelManager = mock(NodeLabelManager.class); + when( + labelManager.getQueueResource(any(String.class), any(Set.class), + any(Resource.class))).thenReturn(Resource.newInstance(10, 0), + Resource.newInstance(100, 0), Resource.newInstance(10, 0)); + ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData); + policy.setNodeLabelManager(labelManager); + policy.editSchedule(); + // don't correct imbalances without demand + verify(mDisp, never()).handle(isA(ContainerPreemptEvent.class)); + } + + @SuppressWarnings({ "rawtypes" }) + @Test + public void testPreemptContainerRespectLabels() { + /* + * A: yellow + * B: blue + * C: green, yellow + * D: red + * E: green + * + * All node has labels, so C should only preempt container from A/E + */ + int[][] qData = new int[][]{ + // / A B C D E + { 100, 20, 20, 20, 20, 20 }, // abs + { 100, 100, 100, 100, 100, 100 }, // maxCap + { 100, 25, 25, 0, 25, 25 }, // used + { 0, 0, 0, 20, 0, 0 }, // pending + { 0, 0, 0, 0, 0, 0 }, // reserved + { 5, 1, 1, 1, 1, 1 }, // apps + { -1, 1, 1, 1, 1, 1 }, // req granularity + { 5, 0, 0, 0, 0, 0 }, // subqueues + }; + + Set[] labels = new Set[6]; + labels[1] = ImmutableSet.of("yellow"); + labels[2] = ImmutableSet.of("blue"); + labels[3] = ImmutableSet.of("yellow", "green"); + labels[4] = ImmutableSet.of("red"); + labels[5] = ImmutableSet.of("green"); + + // build policy and run + ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData, labels); + policy.editSchedule(); + + // B,D don't have expected labels, will not preempt resource from them + verify(mDisp, times(0)).handle(argThat(new IsPreemptionRequestFor(appB))); + verify(mDisp, times(0)).handle(argThat(new IsPreemptionRequestFor(appD))); + + // A,E have expected resource, preempt resource from them + verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appA))); + verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appE))); + } + + @SuppressWarnings({ "rawtypes" }) + @Test + public void testPreemptContainerRespectLabelsInHierarchyQueues() { + /* + * A: + * B: yellow + * C: blue + * D: green, yellow + * E: + * F: green + * + * All node has labels, so C should only preempt container from B/F + * + * Queue structure: + * root + * / | \ + * A D E + * / \ \ + * B C F + */ + int[][] qData = new int[][] { + // / A B C D E F + { 100, 50, 25, 25, 25, 25, 25 }, // abs + { 100, 100, 100, 100, 100, 100, 100 }, // maxCap + { 100, 60, 30, 30, 0, 40, 40 }, // used + { 0, 0, 0, 0, 25, 0, 0 }, // pending + { 0, 0, 0, 0, 0, 0, 0 }, // reserved + { 4, 2, 1, 1, 1, 1, 1 }, // apps + { -1, 1, 1, 1, 1, 1, 1 }, // req granularity + { 5, 2, 0, 0, 0, 1, 0 }, // subqueues + }; + + Set[] labels = new Set[7]; + labels[2] = ImmutableSet.of("yellow"); + labels[3] = ImmutableSet.of("blue"); + labels[4] = ImmutableSet.of("yellow", "green"); + labels[6] = ImmutableSet.of("green"); + + // build policy and run + ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData, labels); + policy.editSchedule(); + + // B,D don't have expected labels, will not preempt resource from them + verify(mDisp, times(0)).handle(argThat(new IsPreemptionRequestFor(appB))); + + // A,E have expected resource, preempt resource from them + verify(mDisp, times(5)).handle(argThat(new IsPreemptionRequestFor(appA))); + verify(mDisp, times(15)).handle(argThat(new IsPreemptionRequestFor(appD))); + } + static class IsPreemptionRequestFor extends ArgumentMatcher { private final ApplicationAttemptId appAttId; @@ -592,20 +724,39 @@ public String toString() { return appAttId.toString(); } } - + ProportionalCapacityPreemptionPolicy buildPolicy(int[][] qData) { + return buildPolicy(qData, null); + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + ProportionalCapacityPreemptionPolicy buildPolicy(int[][] qData, Set[] labels) { ProportionalCapacityPreemptionPolicy policy = - new ProportionalCapacityPreemptionPolicy(conf, mDisp, mCS, mClock); - ParentQueue mRoot = buildMockRootQueue(rand, qData); + new ProportionalCapacityPreemptionPolicy(conf, mDisp, mCS, mClock, + labelManager); + ParentQueue mRoot = buildMockRootQueue(rand, labels, qData); when(mCS.getRootQueue()).thenReturn(mRoot); Resource clusterResources = Resource.newInstance(leafAbsCapacities(qData[0], qData[7]), 0); when(mCS.getClusterResource()).thenReturn(clusterResources); + // by default, queue's resource equals clusterResource when no label exists + when( + labelManager.getQueueResource(any(String.class), any(Set.class), + any(Resource.class))).thenReturn(clusterResources); + Mockito.doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + String hostname = (String) invocation.getArguments()[0]; + Set labels = getLabelsFromHostName(hostname); + return labels; + } + }).when(labelManager).getLabelsOnNode(any(String.class)); return policy; } - ParentQueue buildMockRootQueue(Random r, int[]... queueData) { + @SuppressWarnings({ "rawtypes", "unchecked" }) + ParentQueue buildMockRootQueue(Random r, Set[] labels, int[]... queueData) { int[] abs = queueData[0]; int[] maxCap = queueData[1]; int[] used = queueData[2]; @@ -615,14 +766,30 @@ ParentQueue buildMockRootQueue(Random r, int[]... queueData) { int[] gran = queueData[6]; int[] queues = queueData[7]; - return mockNested(abs, maxCap, used, pending, reserved, apps, gran, queues); + return mockNested(abs, maxCap, used, pending, reserved, apps, gran, queues, + labels); } - + ParentQueue mockNested(int[] abs, int[] maxCap, int[] used, int[] pending, int[] reserved, int[] apps, int[] gran, int[] queues) { + return mockNested(abs, maxCap, used, pending, reserved, apps, gran, queues, + null); + } + + @SuppressWarnings("unchecked") + ParentQueue mockNested(int[] abs, int[] maxCap, int[] used, + int[] pending, int[] reserved, int[] apps, int[] gran, int[] queues, + Set[] labels) { + if (labels == null) { + labels = new Set[abs.length]; + for (int i = 0; i < labels.length; i++) { + labels[i] = null; + } + } + float tot = leafAbsCapacities(abs, queues); Deque pqs = new LinkedList(); - ParentQueue root = mockParentQueue(null, queues[0], pqs); + ParentQueue root = mockParentQueue(null, queues[0], pqs, labels[0]); when(root.getQueueName()).thenReturn("/"); when(root.getAbsoluteUsedCapacity()).thenReturn(used[0] / tot); when(root.getAbsoluteCapacity()).thenReturn(abs[0] / tot); @@ -633,9 +800,9 @@ ParentQueue mockNested(int[] abs, int[] maxCap, int[] used, final ParentQueue p = pqs.removeLast(); final String queueName = "queue" + ((char)('A' + i - 1)); if (queues[i] > 0) { - q = mockParentQueue(p, queues[i], pqs); + q = mockParentQueue(p, queues[i], pqs, labels[i]); } else { - q = mockLeafQueue(p, tot, i, abs, used, pending, reserved, apps, gran); + q = mockLeafQueue(p, tot, i, abs, used, pending, reserved, apps, gran, labels[i]); } when(q.getParent()).thenReturn(p); when(q.getQueueName()).thenReturn(queueName); @@ -648,7 +815,7 @@ ParentQueue mockNested(int[] abs, int[] maxCap, int[] used, } ParentQueue mockParentQueue(ParentQueue p, int subqueues, - Deque pqs) { + Deque pqs, Set labels) { ParentQueue pq = mock(ParentQueue.class); List cqs = new ArrayList(); when(pq.getChildQueues()).thenReturn(cqs); @@ -661,11 +828,15 @@ ParentQueue mockParentQueue(ParentQueue p, int subqueues, return pq; } - LeafQueue mockLeafQueue(ParentQueue p, float tot, int i, int[] abs, - int[] used, int[] pending, int[] reserved, int[] apps, int[] gran) { + LeafQueue mockLeafQueue(ParentQueue p, float tot, int i, int[] abs, + int[] used, int[] pending, int[] reserved, int[] apps, int[] gran, + Set labels) { LeafQueue lq = mock(LeafQueue.class); when(lq.getTotalResourcePending()).thenReturn( Resource.newInstance(pending[i], 0)); + if (labels != null) { + when(lq.getLabels()).thenReturn(labels); + } // consider moving where CapacityScheduler::comparator accessible NavigableSet qApps = new TreeSet( new Comparator() { @@ -681,7 +852,8 @@ public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) { int aPending = pending[i] / apps[i]; int aReserve = reserved[i] / apps[i]; for (int a = 0; a < apps[i]; ++a) { - qApps.add(mockApp(i, appAlloc, aUsed, aPending, aReserve, gran[i])); + qApps.add(mockApp(i, appAlloc, aUsed, aPending, aReserve, gran[i], + labels)); ++appAlloc; } } @@ -694,7 +866,7 @@ public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) { } FiCaSchedulerApp mockApp(int qid, int id, int used, int pending, int reserved, - int gran) { + int gran, Set labels) { FiCaSchedulerApp app = mock(FiCaSchedulerApp.class); ApplicationId appId = ApplicationId.newInstance(TS, id); @@ -713,23 +885,55 @@ FiCaSchedulerApp mockApp(int qid, int id, int used, int pending, int reserved, List cLive = new ArrayList(); for (int i = 0; i < used; i += gran) { - if(setAMContainer && i == 0){ - cLive.add(mockContainer(appAttId, cAlloc, unit, 0)); - }else{ - cLive.add(mockContainer(appAttId, cAlloc, unit, 1)); + if (setAMContainer && i == 0) { + cLive.add(mockContainer(appAttId, cAlloc, unit, 0, labels)); + } else { + cLive.add(mockContainer(appAttId, cAlloc, unit, 1, labels)); } ++cAlloc; } when(app.getLiveContainers()).thenReturn(cLive); return app; } - + RMContainer mockContainer(ApplicationAttemptId appAttId, int id, Resource r, int priority) { + return mockContainer(appAttId, id, r, priority, null); + } + + private String createHostNameFromLabel(Set labels) { + StringBuilder sb = new StringBuilder(); + sb.append("host"); + if (labels != null) { + for (String label : labels) { + sb.append("-"); + sb.append(label); + } + } + return sb.toString(); + } + + private Set getLabelsFromHostName(String host) { + Set labels = new HashSet(); + if (host == null) { + return labels; + } + + String[] splits = host.split("-"); + for (int i = 1; i < splits.length; i++) { + labels.add(splits[i]); + } + return labels; + } + + RMContainer mockContainer(ApplicationAttemptId appAttId, int id, + Resource r, int priority, Set labels) { ContainerId cId = ContainerId.newInstance(appAttId, id); Container c = mock(Container.class); when(c.getResource()).thenReturn(r); when(c.getPriority()).thenReturn(Priority.create(priority)); + when(c.getNodeId()).thenReturn( + NodeId.newInstance(createHostNameFromLabel(labels), 0)); RMContainer mC = mock(RMContainer.class); when(mC.getContainerId()).thenReturn(cId); when(mC.getContainer()).thenReturn(c); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index 460f35e..b59f7e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -21,13 +21,18 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedAction; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -46,6 +51,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; @@ -58,6 +64,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMWithAMS; import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MyContainerManager; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; @@ -74,6 +81,8 @@ import org.junit.Assert; import org.junit.Test; +import com.google.common.collect.Sets; + public class TestSchedulerUtils { private static final Log LOG = LogFactory.getLog(TestSchedulerUtils.class); @@ -173,6 +182,142 @@ public void testNormalizeRequestWithDominantResourceCalculator() { assertEquals(1, ask.getCapability().getVirtualCores()); assertEquals(2048, ask.getCapability().getMemory()); } + + @Test (timeout = 30000) + public void testValidateResourceRequestWithErrorLabelsPermission() + throws IOException { + // mock queue and scheduler + YarnScheduler scheduler = mock(YarnScheduler.class); + Set labels = Sets.newHashSet("x", "y"); + QueueInfo queueInfo = mock(QueueInfo.class); + when(queueInfo.getQueueName()).thenReturn("queue"); + when(queueInfo.getLabels()).thenReturn(labels); + when(scheduler.getQueueInfo(any(String.class), anyBoolean(), anyBoolean())) + .thenReturn(queueInfo); + + Resource maxResource = Resources.createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); + + // queue has labels, success + try { + Resource resource = Resources.createResource( + 0, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = BuilderUtils.newResourceRequest( + mock(Priority.class), ResourceRequest.ANY, resource, 1); + resReq.setLabelExpression("x"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setLabelExpression("x && y"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setLabelExpression("y"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setLabelExpression(""); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setLabelExpression(" "); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + } catch (InvalidResourceRequestException e) { + e.printStackTrace(); + fail("Should be valid when request labels is a subset of queue labels"); + } + + // queue has labels, failed + try { + Resource resource = Resources.createResource( + 0, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = BuilderUtils.newResourceRequest( + mock(Priority.class), ResourceRequest.ANY, resource, 1); + resReq.setLabelExpression("z"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + fail("Should fail"); + } catch (InvalidResourceRequestException e) { + } + + try { + Resource resource = Resources.createResource( + 0, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = BuilderUtils.newResourceRequest( + mock(Priority.class), ResourceRequest.ANY, resource, 1); + resReq.setLabelExpression("x && y && z"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + fail("Should fail"); + } catch (InvalidResourceRequestException e) { + } + + // queue doesn't have label, succeed + labels.clear(); + try { + Resource resource = Resources.createResource( + 0, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = BuilderUtils.newResourceRequest( + mock(Priority.class), ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setLabelExpression(""); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setLabelExpression(" "); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + } catch (InvalidResourceRequestException e) { + e.printStackTrace(); + fail("Should be valid when request labels is empty"); + } + + // queue doesn't have label, failed + try { + Resource resource = Resources.createResource( + 0, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = BuilderUtils.newResourceRequest( + mock(Priority.class), ResourceRequest.ANY, resource, 1); + resReq.setLabelExpression("x"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + fail("Should fail"); + } catch (InvalidResourceRequestException e) { + } + + // queue is "*", always succeeded + labels.add(NodeLabelManager.ANY); + try { + Resource resource = Resources.createResource( + 0, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + ResourceRequest resReq = BuilderUtils.newResourceRequest( + mock(Priority.class), ResourceRequest.ANY, resource, 1); + resReq.setLabelExpression("x"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setLabelExpression("x && y && z"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + + resReq.setLabelExpression("z"); + SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", + scheduler); + } catch (InvalidResourceRequestException e) { + e.printStackTrace(); + fail("Should be valid when request labels is empty"); + } + } @Test (timeout = 30000) public void testValidateResourceRequest() { @@ -187,7 +332,7 @@ public void testValidateResourceRequest() { YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, null); } catch (InvalidResourceRequestException e) { fail("Zero memory should be accepted"); } @@ -199,7 +344,7 @@ public void testValidateResourceRequest() { 0); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, null); } catch (InvalidResourceRequestException e) { fail("Zero vcores should be accepted"); } @@ -211,7 +356,7 @@ public void testValidateResourceRequest() { YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, null); } catch (InvalidResourceRequestException e) { fail("Max memory should be accepted"); } @@ -223,7 +368,7 @@ public void testValidateResourceRequest() { YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, null); } catch (InvalidResourceRequestException e) { fail("Max vcores should not be accepted"); } @@ -235,7 +380,7 @@ public void testValidateResourceRequest() { YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, null); fail("Negative memory should not be accepted"); } catch (InvalidResourceRequestException e) { // expected @@ -248,7 +393,7 @@ public void testValidateResourceRequest() { -1); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, null); fail("Negative vcores should not be accepted"); } catch (InvalidResourceRequestException e) { // expected @@ -261,7 +406,7 @@ public void testValidateResourceRequest() { YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, null); fail("More than max memory should not be accepted"); } catch (InvalidResourceRequestException e) { // expected @@ -275,7 +420,7 @@ public void testValidateResourceRequest() { + 1); ResourceRequest resReq = BuilderUtils.newResourceRequest( mock(Priority.class), ResourceRequest.ANY, resource, 1); - SchedulerUtils.validateResourceRequest(resReq, maxResource); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, null); fail("More than max vcores should not be accepted"); } catch (InvalidResourceRequestException e) { // expected diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java index a9a9975..4dc4632 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java @@ -65,6 +65,7 @@ LeafQueue queue; private final ResourceCalculator resourceCalculator = new DefaultResourceCalculator(); + RMContext rmContext = mock(RMContext.class); @Before public void setUp() throws IOException { @@ -72,8 +73,7 @@ public void setUp() throws IOException { new CapacitySchedulerConfiguration(); YarnConfiguration conf = new YarnConfiguration(); setupQueueConfiguration(csConf); - - + CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class); when(csContext.getConfiguration()).thenReturn(csConf); when(csContext.getConf()).thenReturn(conf); @@ -89,6 +89,8 @@ public void setUp() throws IOException { thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()). thenReturn(resourceCalculator); + when(csContext.getRMContext()).thenReturn(rmContext); + RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf); containerTokenSecretManager.rollMasterKey(); @@ -99,7 +101,7 @@ public void setUp() throws IOException { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, "root", queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); queue = spy(new LeafQueue(csContext, A, root, null)); @@ -162,6 +164,7 @@ public void testLimitsComputation() throws Exception { when(csContext.getQueueComparator()). thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()).thenReturn(resourceCalculator); + when(csContext.getRMContext()).thenReturn(rmContext); // Say cluster has 100 nodes of 16G each Resource clusterResource = Resources.createResource(100 * 16 * GB, 100 * 16); @@ -170,7 +173,7 @@ public void testLimitsComputation() throws Exception { Map queues = new HashMap(); CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, "root", - queues, queues, TestUtils.spyHook); + queues, queues, TestUtils.spyHook, null); LeafQueue queue = (LeafQueue)queues.get(A); @@ -259,7 +262,7 @@ public void testLimitsComputation() throws Exception { queues = new HashMap(); root = CapacityScheduler.parseQueue(csContext, csConf, null, "root", - queues, queues, TestUtils.spyHook); + queues, queues, TestUtils.spyHook, null); clusterResource = Resources.createResource(100 * 16 * GB); queue = (LeafQueue)queues.get(A); @@ -285,7 +288,7 @@ public void testLimitsComputation() throws Exception { queues = new HashMap(); root = CapacityScheduler.parseQueue(csContext, csConf, null, "root", - queues, queues, TestUtils.spyHook); + queues, queues, TestUtils.spyHook, null); queue = (LeafQueue)queues.get(A); assertEquals(9999, (int)csConf.getMaximumApplicationsPerQueue(queue.getQueuePath())); @@ -475,6 +478,7 @@ public void testHeadroom() throws Exception { when(csContext.getQueueComparator()). thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()).thenReturn(resourceCalculator); + when(csContext.getRMContext()).thenReturn(rmContext); // Say cluster has 100 nodes of 16G each Resource clusterResource = Resources.createResource(100 * 16 * GB); @@ -482,7 +486,7 @@ public void testHeadroom() throws Exception { Map queues = new HashMap(); CapacityScheduler.parseQueue(csContext, csConf, null, "root", - queues, queues, TestUtils.spyHook); + queues, queues, TestUtils.spyHook, null); // Manipulate queue 'a' LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue)queues.get(A)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSQueueUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSQueueUtils.java index 7260afd..297c551 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSQueueUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSQueueUtils.java @@ -19,38 +19,19 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.reset; import static org.mockito.Mockito.when; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.junit.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; -import org.junit.After; -import org.junit.Before; import org.junit.Test; -import org.mockito.InOrder; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; public class TestCSQueueUtils { @@ -88,6 +69,8 @@ public void runInvalidDivisorTest(boolean useDominant) throws Exception { thenReturn(Resources.createResource(GB, 1)); when(csContext.getMaximumResourceCapability()). thenReturn(Resources.createResource(0, 0)); + RMContext rmContext = mock(RMContext.class); + when(csContext.getRMContext()).thenReturn(rmContext); final String L1Q1 = "L1Q1"; csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {L1Q1}); @@ -129,6 +112,8 @@ public void testAbsoluteMaxAvailCapacityNoUse() throws Exception { thenReturn(Resources.createResource(GB, 1)); when(csContext.getMaximumResourceCapability()). thenReturn(Resources.createResource(16*GB, 32)); + RMContext rmContext = mock(RMContext.class); + when(csContext.getRMContext()).thenReturn(rmContext); final String L1Q1 = "L1Q1"; csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {L1Q1}); @@ -174,6 +159,9 @@ public void testAbsoluteMaxAvailCapacityWithUse() throws Exception { when(csContext.getMaximumResourceCapability()). thenReturn(Resources.createResource(16*GB, 32)); + RMContext rmContext = mock(RMContext.class); + when(csContext.getRMContext()).thenReturn(rmContext); + final String L1Q1 = "L1Q1"; final String L1Q2 = "L1Q2"; final String L2Q1 = "L2Q1"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java index fd14ef6..45e6c5a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java @@ -97,6 +97,7 @@ public void setUp() throws Exception { thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()). thenReturn(resourceComparator); + when(csContext.getRMContext()).thenReturn(rmContext); } private FiCaSchedulerApp getMockApplication(int appId, String user) { @@ -214,7 +215,7 @@ public void testSortedQueues() throws Exception { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); // Setup some nodes final int memoryPerNode = 10; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java index a9bfc2f..c84aa9f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java @@ -20,12 +20,14 @@ import java.util.ArrayList; import java.util.List; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; @@ -39,18 +41,27 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.RMSecretManagerService; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.TestFifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.label.MemoryNodeLabelManager; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; + public class TestContainerAllocation { @@ -251,4 +262,415 @@ protected RMSecretManagerService createRMSecretManagerService() { rm1.waitForState(attempt.getAppAttemptId(), RMAppAttemptState.ALLOCATED); MockRM.launchAndRegisterAM(app1, rm1, nm1); } + + private Configuration getConfigurationWithDefaultQueueLabels( + Configuration config) { + CapacitySchedulerConfiguration conf = + new CapacitySchedulerConfiguration(config); + + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"}); + + // root can access anything + conf.setLabels(CapacitySchedulerConfiguration.ROOT, toSet("*")); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + conf.setCapacity(A, 10); + conf.setMaximumCapacity(A, 15); + conf.setLabels(A, toSet("x")); + conf.setDefaultLabelExpression(A, "x"); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + conf.setCapacity(B, 20); + conf.setLabels(B, toSet("y")); + conf.setDefaultLabelExpression(B, "y"); + + final String C = CapacitySchedulerConfiguration.ROOT + ".c"; + conf.setCapacity(C, 70); + conf.setMaximumCapacity(C, 70); + conf.setLabels(C, NodeLabelManager.EMPTY_STRING_SET); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + conf.setQueues(A, new String[] {"a1"}); + conf.setCapacity(A1, 100); + conf.setMaximumCapacity(A1, 100); + + final String B1 = B + ".b1"; + conf.setQueues(B, new String[] {"b1"}); + conf.setCapacity(B1, 100); + conf.setMaximumCapacity(B1, 100); + + final String C1 = C + ".c1"; + conf.setQueues(C, new String[] {"c1"}); + conf.setCapacity(C1, 100); + conf.setMaximumCapacity(C1, 100); + + return conf; + } + + private Configuration getConfigurationWithQueueLabels(Configuration config) { + CapacitySchedulerConfiguration conf = + new CapacitySchedulerConfiguration(config); + + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"}); + + // root can access anything + conf.setLabels(CapacitySchedulerConfiguration.ROOT, toSet("*")); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + conf.setCapacity(A, 10); + conf.setMaximumCapacity(A, 15); + conf.setLabels(A, toSet("x")); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + conf.setCapacity(B, 20); + conf.setLabels(B, toSet("y")); + + final String C = CapacitySchedulerConfiguration.ROOT + ".c"; + conf.setCapacity(C, 70); + conf.setMaximumCapacity(C, 70); + conf.setLabels(C, NodeLabelManager.EMPTY_STRING_SET); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + conf.setQueues(A, new String[] {"a1"}); + conf.setCapacity(A1, 100); + conf.setMaximumCapacity(A1, 100); + + final String B1 = B + ".b1"; + conf.setQueues(B, new String[] {"b1"}); + conf.setCapacity(B1, 100); + conf.setMaximumCapacity(B1, 100); + + final String C1 = C + ".c1"; + conf.setQueues(C, new String[] {"c1"}); + conf.setCapacity(C1, 100); + conf.setMaximumCapacity(C1, 100); + + return conf; + } + + private void checkTaskContainersHost(ApplicationAttemptId attemptId, + ContainerId containerId, ResourceManager rm, String host) { + YarnScheduler scheduler = rm.getRMContext().getScheduler(); + SchedulerAppReport appReport = scheduler.getSchedulerAppInfo(attemptId); + + Assert.assertTrue(appReport.getLiveContainers().size() > 0); + for (RMContainer c : appReport.getLiveContainers()) { + if (c.getContainerId().equals(containerId)) { + Assert.assertEquals(host, c.getAllocatedNode().getHost()); + } + } + } + + private Set toSet(E... elements) { + Set set = Sets.newHashSet(elements); + return set; + } + + private Configuration getComplexConfigurationWithQueueLabels( + Configuration config) { + CapacitySchedulerConfiguration conf = + new CapacitySchedulerConfiguration(config); + + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"}); + + // root can access anything + conf.setLabels(CapacitySchedulerConfiguration.ROOT, toSet("*")); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + conf.setCapacity(A, 50); + conf.setMaximumCapacity(A, 50); + conf.setLabels(A, toSet("x")); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + conf.setCapacity(B, 50); + conf.setMaximumCapacity(B, 50); + conf.setLabels(B, toSet("y")); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + conf.setQueues(A, new String[] {"a1"}); + conf.setCapacity(A1, 100); + conf.setMaximumCapacity(A1, 100); + conf.setLabels(A1, toSet("x", "y")); + + conf.setQueues(B, new String[] {"b1", "b2"}); + final String B1 = B + ".b1"; + conf.setCapacity(B1, 10); + conf.setMaximumCapacity(B1, 20); + conf.setLabels(B1, NodeLabelManager.EMPTY_STRING_SET); + + final String B2 = B + ".b2"; + conf.setCapacity(B2, 90); + conf.setMaximumCapacity(B2, 90); + conf.setLabels(B2, toSet("y", "z")); + + return conf; + } + + @Test (timeout = 300000) + public void testContainerAllocateWithComplexLabels() throws Exception { + // make it harder .. + final NodeLabelManager mgr = new MemoryNodeLabelManager(); + mgr.init(conf); + + /* + * Queue structure: + * root (*) + * / \ + * a(x) 50% b(y) 50% + * / / \ + * a1 (x,y) b1(NO) b2(y,z) + * 100% 10% 90% + * + * Node structure: + * h1 : x + * h2 : x, y + * h3 : y + * h4 : y, z + * h5 : NO + * + * Each node can only allocate two containers + */ + + // set node -> label + mgr.addLabels(ImmutableSet.of("x", "y", "z")); + mgr.setLabelsOnMultipleNodes(ImmutableMap.of("h1", toSet("x"), "h2", + toSet("x", "y"), "h3", toSet("y"), "h4", toSet("y", "z"), "h5", + NodeLabelManager.EMPTY_STRING_SET)); + + // inject node label manager + MockRM rm1 = new MockRM(getComplexConfigurationWithQueueLabels(conf)) { + @Override + public NodeLabelManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 2048); + MockNM nm2 = rm1.registerNode("h2:1234", 2048); + MockNM nm3 = rm1.registerNode("h3:1234", 2048); + MockNM nm4 = rm1.registerNode("h4:1234", 2048); + MockNM nm5 = rm1.registerNode("h5:1234", 2048); + + ContainerId containerId; + + // launch an app to queue a1 (label = x), and check all container will + // be allocated in h1 + RMApp app1 = rm1.submitApp(1024, "app", "user", null, "a1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // request a container (label = x && y). can only allocate on nm2 + am1.allocate("*", 1024, 1, new ArrayList(), "x && y"); + containerId = + ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // launch an app to queue b1 (label = y), and check all container will + // be allocated in h2 + RMApp app2 = rm1.submitApp(1024, "app", "user", null, "b1"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm5); + + // request a container for AM, will succeed + // and now b1's queue capacity will be used, cannot allocate more containers + am2.allocate("*", 1024, 1, new ArrayList()); + containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm4, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertFalse(rm1.waitForState(nm5, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + + // launch an app to queue b2 + RMApp app3 = rm1.submitApp(1024, "app", "user", null, "b2"); + MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm5); + + // request a container. try to allocate on nm1 (label = x) and nm3 (label = + // y,z). Will successfully allocate on nm3 + am3.allocate("*", 1024, 1, new ArrayList(), "y"); + containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h3"); + + // try to allocate container (request label = y && z) on nm3 (label = y) and + // nm4 (label = y,z). Will sucessfully allocate on nm4 only. + am3.allocate("*", 1024, 1, new ArrayList(), "y && z"); + containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 3); + Assert.assertFalse(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm4, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h4"); + + rm1.close(); + } + + @Test (timeout = 120000) + public void testContainerAllocateWithLabels() throws Exception { + final NodeLabelManager mgr = new MemoryNodeLabelManager(); + mgr.init(conf); + + // set node -> label + mgr.addLabels(ImmutableSet.of("x", "y")); + mgr.setLabelsOnMultipleNodes(ImmutableMap.of("h1", toSet("x"), + "h2", toSet("y"))); + + // inject node label manager + MockRM rm1 = new MockRM(getConfigurationWithQueueLabels(conf)) { + @Override + public NodeLabelManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y + MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = + + ContainerId containerId; + + // launch an app to queue a1 (label = x), and check all container will + // be allocated in h1 + RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // request a container. + am1.allocate("*", 1024, 1, new ArrayList(), "x"); + containerId = + ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + // launch an app to queue b1 (label = y), and check all container will + // be allocated in h2 + RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2); + + // request a container. + am2.allocate("*", 1024, 1, new ArrayList(), "y"); + containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // launch an app to queue c1 (label = ""), and check all container will + // be allocated in h3 + RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1"); + MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3); + + // request a container. + am3.allocate("*", 1024, 1, new ArrayList()); + containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h3"); + + rm1.close(); + } + + @Test (timeout = 120000) + public void testContainerAllocateWithDefaultQueueLabels() throws Exception { + // This test is pretty much similar to testContainerAllocateWithLabel. + // Difference is, this test doesn't specify label expression in ResourceRequest, + // instead, it uses default queue label expression + + final NodeLabelManager mgr = new MemoryNodeLabelManager(); + mgr.init(conf); + + // set node -> label + mgr.addLabels(ImmutableSet.of("x", "y")); + mgr.setLabelsOnMultipleNodes(ImmutableMap.of("h1", toSet("x"), + "h2", toSet("y"))); + + // inject node label manager + MockRM rm1 = new MockRM(getConfigurationWithDefaultQueueLabels(conf)) { + @Override + public NodeLabelManager createNodeLabelManager() { + return mgr; + } + }; + + rm1.getRMContext().setNodeLabelManager(mgr); + rm1.start(); + MockNM nm1 = rm1.registerNode("h1:1234", 8000); // label = x + MockNM nm2 = rm1.registerNode("h2:1234", 8000); // label = y + MockNM nm3 = rm1.registerNode("h3:1234", 8000); // label = + + ContainerId containerId; + + // launch an app to queue a1 (label = x), and check all container will + // be allocated in h1 + RMApp app1 = rm1.submitApp(200, "app", "user", null, "a1"); + MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); + + // request a container. + am1.allocate("*", 1024, 1, new ArrayList()); + containerId = + ContainerId.newInstance(am1.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am1.getApplicationAttemptId(), containerId, rm1, + "h1"); + + // launch an app to queue b1 (label = y), and check all container will + // be allocated in h2 + RMApp app2 = rm1.submitApp(200, "app", "user", null, "b1"); + MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2); + + // request a container. + am2.allocate("*", 1024, 1, new ArrayList()); + containerId = ContainerId.newInstance(am2.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm1, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am2.getApplicationAttemptId(), containerId, rm1, + "h2"); + + // launch an app to queue c1 (label = ""), and check all container will + // be allocated in h3 + RMApp app3 = rm1.submitApp(200, "app", "user", null, "c1"); + MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, nm3); + + // request a container. + am3.allocate("*", 1024, 1, new ArrayList()); + containerId = ContainerId.newInstance(am3.getApplicationAttemptId(), 2); + Assert.assertFalse(rm1.waitForState(nm2, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + Assert.assertTrue(rm1.waitForState(nm3, containerId, + RMContainerState.ALLOCATED, 10 * 1000)); + checkTaskContainersHost(am3.getApplicationAttemptId(), containerId, rm1, + "h3"); + + rm1.close(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index d5eb933..c1bb845 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -39,8 +39,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; -import org.junit.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.security.UserGroupInformation; @@ -59,6 +59,7 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; @@ -77,6 +78,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; @@ -131,6 +133,7 @@ public void setUp() throws Exception { thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()). thenReturn(resourceCalculator); + when(csContext.getRMContext()).thenReturn(rmContext); RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf); containerTokenSecretManager.rollMasterKey(); @@ -141,7 +144,7 @@ public void setUp() throws Exception { CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); cs.setRMContext(rmContext); cs.init(csConf); @@ -731,6 +734,77 @@ public void testHeadroomWithMaxCap() throws Exception { a.assignContainers(clusterResource, node_1); assertEquals(1*GB, app_2.getHeadroom().getMemory()); // hit queue max-cap } + + @SuppressWarnings("unchecked") + @Test + public void testHeadroomWithLabel() throws Exception { + NodeLabelManager nlm = mock(NodeLabelManager.class); + + // Mock the queue + LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A)); + + //unset maxCapacity + a.setMaxCapacity(1.0f); + + // Users + final String user_0 = "user_0"; + + // Submit applications + final ApplicationAttemptId appAttemptId_0 = + TestUtils.getMockApplicationAttemptId(0, 0); + FiCaSchedulerApp app_0 = + new FiCaSchedulerApp(appAttemptId_0, user_0, a, + a.getActiveUsersManager(), rmContext); + a.submitApplicationAttempt(app_0, user_0); + + // Setup some nodes + String host_0 = "127.0.0.1"; + FiCaSchedulerNode node_0 = + TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 64 * GB); + + final int numNodes = 1; + Resource clusterResource = Resources.createResource(numNodes * (64 * GB), 1); + when(csContext.getNumClusterNodes()).thenReturn(numNodes); + + // Setup resource-requests + Priority priority = TestUtils.createMockPriority(1); + app_0.updateResourceRequests(Collections.singletonList(TestUtils + .createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, + recordFactory))); + + /** + * Start testing... + */ + + // Set user-limit + a.setUserLimit(100); + a.setUserLimitFactor(1); + + // 1 container to user_0 + a.assignContainers(clusterResource, node_0); + assertEquals(1 * GB, a.getUsedResources().getMemory()); + assertEquals(1 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(5 * GB, app_0.getHeadroom().getMemory()); // User limit = 6G + + // mock getQueueResource to 4999 MB + when( + nlm.getQueueResource(any(String.class), any(Set.class), + any(Resource.class))).thenReturn(Resource.newInstance(4999, 1)); + a.setNodeLabelManager(nlm); + + // do a resource allocation again + app_0.updateResourceRequests(Collections.singletonList(TestUtils + .createResourceRequest(ResourceRequest.ANY, 1 * GB, 1, true, priority, + recordFactory))); + a.assignContainers(clusterResource, node_0); + + // current headroom should be + // Headroom = min(6G (user-limit), 4G (queueLabelResource)) - + // 2G (used-resource) = 2G + assertEquals(2 * GB, a.getUsedResources().getMemory()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(2 * GB, app_0.getHeadroom().getMemory()); + } @Test public void testSingleQueueWithMultipleUsers() throws Exception { @@ -1682,7 +1756,7 @@ public void testActivateApplicationAfterQueueRefresh() throws Exception { CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, newQueues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); queues = newQueues; root.reinitialize(newRoot, cs.getClusterResource()); @@ -1707,7 +1781,7 @@ public void testNodeLocalityAfterQueueRefresh() throws Exception { CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, newQueues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); queues = newQueues; root.reinitialize(newRoot, cs.getClusterResource()); @@ -2025,6 +2099,7 @@ public void testMaxAMResourcePerQueuePercentAfterQueueRefresh() Resource clusterResource = Resources .createResource(100 * 16 * GB, 100 * 32); CapacitySchedulerContext csContext = mockCSContext(csConf, clusterResource); + when(csContext.getRMContext()).thenReturn(rmContext); csConf.setFloat(CapacitySchedulerConfiguration. MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT, 0.1f); ParentQueue root = new ParentQueue(csContext, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java index fa9edb1..ea4b3d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java @@ -94,6 +94,7 @@ public void setUp() throws Exception { thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()). thenReturn(resourceComparator); + when(csContext.getRMContext()).thenReturn(rmContext); } private static final String A = "a"; @@ -203,7 +204,7 @@ public void testSingleLevelQueues() throws Exception { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); // Setup some nodes final int memoryPerNode = 10; @@ -297,7 +298,7 @@ public void testSingleLevelQueuesPrecision() throws Exception { try { CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); } catch (IllegalArgumentException ie) { exceptionOccured = true; } @@ -311,7 +312,7 @@ public void testSingleLevelQueuesPrecision() throws Exception { try { CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); } catch (IllegalArgumentException ie) { exceptionOccured = true; } @@ -325,7 +326,7 @@ public void testSingleLevelQueuesPrecision() throws Exception { try { CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); } catch (IllegalArgumentException ie) { exceptionOccured = true; } @@ -402,7 +403,7 @@ public void testMultiLevelQueues() throws Exception { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); // Setup some nodes final int memoryPerNode = 10; @@ -518,7 +519,7 @@ public void testQueueCapacitySettingChildZero() throws Exception { Map queues = new HashMap(); CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); } @Test (expected=IllegalArgumentException.class) @@ -535,7 +536,7 @@ public void testQueueCapacitySettingParentZero() throws Exception { Map queues = new HashMap(); CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); } @Test @@ -557,7 +558,7 @@ public void testQueueCapacityZero() throws Exception { try { CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); } catch (IllegalArgumentException e) { fail("Failed to create queues with 0 capacity: " + e); } @@ -573,7 +574,7 @@ public void testOffSwitchScheduling() throws Exception { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); // Setup some nodes final int memoryPerNode = 10; @@ -639,7 +640,7 @@ public void testOffSwitchSchedulingMultiLevelQueues() throws Exception { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); // Setup some nodes final int memoryPerNode = 10; @@ -723,7 +724,7 @@ public void testQueueAcl() throws Exception { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - TestUtils.spyHook); + TestUtils.spyHook, null); UserGroupInformation user = UserGroupInformation.getCurrentUser(); // Setup queue configs diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java index a3b990c..70e417b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java @@ -18,23 +18,40 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; -import org.junit.Assert; +import java.io.IOException; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Matchers.any; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; +import org.apache.hadoop.yarn.server.resourcemanager.label.NodeLabelManager; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; -import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; +import org.junit.Assert; +import org.junit.Before; import org.junit.Test; +import com.google.common.collect.ImmutableSet; + public class TestQueueParsing { private static final Log LOG = LogFactory.getLog(TestQueueParsing.class); private static final double DELTA = 0.000001; + private NodeLabelManager nodeLabelManager; + + @Before + public void setup() { + nodeLabelManager = mock(NodeLabelManager.class); + when(nodeLabelManager.containsLabel(any(String.class))).thenReturn(true); + } + @Test public void testQueueParsing() throws Exception { CapacitySchedulerConfiguration csConf = @@ -202,4 +219,150 @@ public void testMaxCapacity() throws Exception { capacityScheduler.stop(); } + private void setupQueueConfigurationWithoutLabels(CapacitySchedulerConfiguration conf) { + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"}); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + conf.setCapacity(A, 10); + conf.setMaximumCapacity(A, 15); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + conf.setCapacity(B, 90); + + LOG.info("Setup top-level queues"); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + final String A2 = A + ".a2"; + conf.setQueues(A, new String[] {"a1", "a2"}); + conf.setCapacity(A1, 30); + conf.setMaximumCapacity(A1, 45); + conf.setCapacity(A2, 70); + conf.setMaximumCapacity(A2, 85); + + final String B1 = B + ".b1"; + final String B2 = B + ".b2"; + final String B3 = B + ".b3"; + conf.setQueues(B, new String[] {"b1", "b2", "b3"}); + conf.setCapacity(B1, 50); + conf.setMaximumCapacity(B1, 85); + conf.setCapacity(B2, 30); + conf.setMaximumCapacity(B2, 35); + conf.setCapacity(B3, 20); + conf.setMaximumCapacity(B3, 35); + } + + private void setupQueueConfigurationWithLabels(CapacitySchedulerConfiguration conf) { + // Define top-level queues + conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b"}); + + final String A = CapacitySchedulerConfiguration.ROOT + ".a"; + conf.setCapacity(A, 10); + conf.setMaximumCapacity(A, 15); + + final String B = CapacitySchedulerConfiguration.ROOT + ".b"; + conf.setCapacity(B, 90); + + LOG.info("Setup top-level queues"); + + // Define 2nd-level queues + final String A1 = A + ".a1"; + final String A2 = A + ".a2"; + conf.setQueues(A, new String[] {"a1", "a2"}); + conf.setLabels(A, ImmutableSet.of("*")); + conf.setCapacity(A1, 30); + conf.setMaximumCapacity(A1, 45); + conf.setCapacity(A2, 70); + conf.setMaximumCapacity(A2, 85); + conf.setLabels(A2, ImmutableSet.of("red")); + + final String B1 = B + ".b1"; + final String B2 = B + ".b2"; + final String B3 = B + ".b3"; + conf.setQueues(B, new String[] {"b1", "b2", "b3"}); + conf.setLabels(B, ImmutableSet.of("red", "blue")); + conf.setCapacity(B1, 50); + conf.setMaximumCapacity(B1, 85); + conf.setCapacity(B2, 30); + conf.setMaximumCapacity(B2, 35); + conf.setCapacity(B3, 20); + conf.setMaximumCapacity(B3, 35); + } + + @Test(timeout = 5000) + public void testQueueParsingReinitializeWithLabels() throws IOException { + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + setupQueueConfigurationWithoutLabels(csConf); + YarnConfiguration conf = new YarnConfiguration(csConf); + + CapacityScheduler capacityScheduler = new CapacityScheduler(); + RMContextImpl rmContext = + new RMContextImpl(null, null, null, null, null, null, + new RMContainerTokenSecretManager(conf), + new NMTokenSecretManagerInRM(conf), + new ClientToAMTokenSecretManagerInRM(), null); + rmContext.setNodeLabelManager(nodeLabelManager); + capacityScheduler.setConf(conf); + capacityScheduler.setRMContext(rmContext); + capacityScheduler.init(conf); + capacityScheduler.start(); + csConf = new CapacitySchedulerConfiguration(); + setupQueueConfigurationWithLabels(csConf); + conf = new YarnConfiguration(csConf); + capacityScheduler.reinitialize(conf, rmContext); + checkQueueLabels(capacityScheduler); + capacityScheduler.stop(); + } + + private void checkQueueLabels(CapacityScheduler capacityScheduler) { + // by default, label is empty + Assert.assertTrue(capacityScheduler + .getQueue(CapacitySchedulerConfiguration.ROOT).getLabels().isEmpty()); + + // queue-A is * + Assert.assertTrue(capacityScheduler + .getQueue("a").getLabels().contains("*")); + + // queue-A1 inherits A's configuration + Assert.assertTrue(capacityScheduler + .getQueue("a1").getLabels().contains("*")); + + // queue-A2 is "red" + Assert.assertEquals(1, capacityScheduler + .getQueue("a2").getLabels().size()); + Assert.assertTrue(capacityScheduler + .getQueue("a2").getLabels().contains("red")); + + // queue-B is "red"/"blue" + Assert.assertTrue(capacityScheduler + .getQueue("b").getLabels().containsAll(ImmutableSet.of("red", "blue"))); + + // queue-B2 inherits "red"/"blue" + Assert.assertTrue(capacityScheduler + .getQueue("b2").getLabels().containsAll(ImmutableSet.of("red", "blue"))); + } + + @Test(timeout = 5000) + public void testQueueParsingWithLabels() throws IOException { + CapacitySchedulerConfiguration csConf = + new CapacitySchedulerConfiguration(); + setupQueueConfigurationWithLabels(csConf); + YarnConfiguration conf = new YarnConfiguration(csConf); + + CapacityScheduler capacityScheduler = new CapacityScheduler(); + RMContextImpl rmContext = + new RMContextImpl(null, null, null, null, null, null, + new RMContainerTokenSecretManager(conf), + new NMTokenSecretManagerInRM(conf), + new ClientToAMTokenSecretManagerInRM(), null); + rmContext.setNodeLabelManager(nodeLabelManager); + capacityScheduler.setConf(conf); + capacityScheduler.setRMContext(rmContext); + capacityScheduler.init(conf); + capacityScheduler.start(); + checkQueueLabels(capacityScheduler); + capacityScheduler.stop(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java new file mode 100644 index 0000000..f8c4df0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodeLabels.java @@ -0,0 +1,257 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.webapp; + +import java.io.StringWriter; +import java.io.StringReader; + +import static org.junit.Assert.*; + +import javax.ws.rs.core.MediaType; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.security.UserGroupInformation; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager; +import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; +import org.codehaus.jettison.json.JSONException; +import org.codehaus.jettison.json.JSONObject; +import org.codehaus.jettison.json.JSONArray; +import com.sun.jersey.api.json.JSONJAXBContext; +import com.sun.jersey.api.json.JSONMarshaller; +import com.sun.jersey.api.json.JSONUnmarshaller; +import org.junit.Test; + +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.google.inject.servlet.GuiceServletContextListener; +import com.google.inject.servlet.ServletModule; +import com.sun.jersey.api.client.ClientResponse; +import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; +import com.sun.jersey.test.framework.JerseyTest; +import com.sun.jersey.test.framework.WebAppDescriptor; + +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesToLabelsInfo; +import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo; + +public class TestRMWebServicesNodeLabels extends JerseyTest { + + private static final Log LOG = LogFactory.getLog(TestRMWebServicesNodeLabels.class); + + private static MockRM rm; + private YarnConfiguration conf; + + private Injector injector = Guice.createInjector(new ServletModule() { + @Override + protected void configureServlets() { + bind(JAXBContextResolver.class); + bind(RMWebServices.class); + bind(GenericExceptionHandler.class); + conf = new YarnConfiguration(); + rm = new MockRM(conf); + bind(ResourceManager.class).toInstance(rm); + bind(RMContext.class).toInstance(rm.getRMContext()); + filter("/*").through(TestRMWebServicesAppsModification.TestRMCustomAuthFilter.class); + serve("/*").with(GuiceContainer.class); + } + }); + + public class GuiceServletConfig extends GuiceServletContextListener { + + @Override + protected Injector getInjector() { + return injector; + } + } + + public TestRMWebServicesNodeLabels() { + super(new WebAppDescriptor.Builder( + "org.apache.hadoop.yarn.server.resourcemanager.webapp") + .contextListenerClass(GuiceServletConfig.class) + .filterClass(com.google.inject.servlet.GuiceFilter.class) + .contextPath("jersey-guice-filter").servletPath("/").build()); + } + + @Test + public void testNodeLabels() throws JSONException, Exception { + + String userName = UserGroupInformation.getCurrentUser().getShortUserName(); + WebResource r = resource(); + + ClientResponse response; + JSONObject json; + JSONArray jarr; + String responseString; + + //Add a label + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("add-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity("{\"labels\":\"a\"}", MediaType.APPLICATION_JSON).post(ClientResponse.class); + + //Verify it is present + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("all-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + assertEquals("a", json.getString("labels")); + + //Add another + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("add-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity("{\"labels\":\"b\"}", MediaType.APPLICATION_JSON).post(ClientResponse.class); + + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("all-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + + //Verify + jarr = json.getJSONArray("labels"); + assertEquals(2, jarr.length()); + + //Remove one + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("remove-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity("{\"labels\":\"a\"}", MediaType.APPLICATION_JSON).post(ClientResponse.class); + + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("all-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + //Verify + assertEquals("b", json.getString("labels")); + + //Add a node->label mapping + NodesToLabelsInfo nsli = new NodesToLabelsInfo(); + NodeToLabelsInfo nli = new NodeToLabelsInfo("node1"); + nli.getLabels().add("b"); + nsli.add(nli); + + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("set-node-to-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity(toJson(nsli, NodesToLabelsInfo.class), MediaType.APPLICATION_JSON).post(ClientResponse.class); + + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("all-nodes-to-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + responseString = response.getEntity(String.class); + LOG.info(responseString); + //Verify + nsli = (NodesToLabelsInfo) fromJson(responseString, NodesToLabelsInfo.class); + assertEquals(1, nsli.getNodeToLabelsInfos().size()); + nli = nsli.getNodeToLabelsInfos().get(0); + assertEquals("node1", nli.getNode()); + assertEquals(1, nli.getLabels().size()); + assertTrue(nli.getLabels().contains("b")); + + //Get with filter which should suppress results + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("all-nodes-to-labels").queryParam("labels", "a") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + responseString = response.getEntity(String.class); + LOG.info(responseString); + nsli = (NodesToLabelsInfo) fromJson(responseString, NodesToLabelsInfo.class); + assertEquals(0, nsli.getNodeToLabelsInfos().size()); + + //Get with filter which should include results + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("all-nodes-to-labels").queryParam("labels", "b") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + responseString = response.getEntity(String.class); + LOG.info(responseString); + nsli = (NodesToLabelsInfo) fromJson(responseString, NodesToLabelsInfo.class); + assertEquals(1, nsli.getNodeToLabelsInfos().size()); + + //"Remove" by setting with an empty label set + nli = nsli.getNodeToLabelsInfos().get(0); + nli.getLabels().remove("b"); + + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("set-node-to-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .entity(toJson(nsli, NodesToLabelsInfo.class), MediaType.APPLICATION_JSON).post(ClientResponse.class); + + response = r.path("ws").path("v1").path("cluster") + .path("labels").path("all-nodes-to-labels") + .queryParam("user.name", userName) + .accept(MediaType.APPLICATION_JSON) + .get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + responseString = response.getEntity(String.class); + LOG.info(responseString); + nsli = (NodesToLabelsInfo) fromJson(responseString, NodesToLabelsInfo.class); + assertEquals(1, nsli.getNodeToLabelsInfos().size()); + nli = nsli.getNodeToLabelsInfos().get(0); + assertTrue(nli.getLabels().isEmpty()); + + } + + private String toJson(Object nsli, Class klass) throws Exception { + StringWriter sw = new StringWriter(); + JSONJAXBContext ctx = new JSONJAXBContext(klass); + JSONMarshaller jm = ctx.createJSONMarshaller(); + jm.marshallToJSON(nsli, sw); + return sw.toString(); + } + + private Object fromJson(String json, Class klass) throws Exception { + StringReader sr = new StringReader(json); + JSONJAXBContext ctx = new JSONJAXBContext(klass); + JSONUnmarshaller jm = ctx.createJSONUnmarshaller(); + return jm.unmarshalFromJSON(sr, klass); + } + +}