+ * The interface used by client to get attributes to nodes mappings
+ * available in ResourceManager.
+ *
+ *
+ * @param request request to get details of attributes to nodes mapping.
+ * @return Response containing the details of attributes to nodes mappings.
+ * @throws YarnException if any error happens inside YARN
+ * @throws IOException incase of other errors
+ */
+ @Public
+ @Unstable
+ GetAttributesToNodesResponse getAttributesToNodes(
+ GetAttributesToNodesRequest request) throws YarnException, IOException;
+
+ /**
+ *
+ * The interface used by client to get node attributes available in
+ * ResourceManager.
+ *
+ *
+ * @param request request to get node attributes collection of this cluster.
+ * @return Response containing node attributes collection.
+ * @throws YarnException if any error happens inside YARN.
+ * @throws IOException incase of other errors.
+ */
+ @Public
+ @Unstable
+ GetClusterNodeAttributesResponse getClusterNodeAttributes(
+ GetClusterNodeAttributesRequest request)
+ throws YarnException, IOException;
+
+ /**
+ *
+ * The interface used by client to get node to attributes mappings.
+ * in existing cluster.
+ *
+ *
+ * @param request request to get nodes to attributes mapping.
+ * @return nodes to attributes mappings.
+ * @throws YarnException if any error happens inside YARN.
+ * @throws IOException
+ */
+ @Public
+ @Unstable
+ GetNodesToAttributesResponse getNodesToAttributes(
+ GetNodesToAttributesRequest request) throws YarnException, IOException;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesRequest.java
new file mode 100644
index 00000000000..94814e9053e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesRequest.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * The request from clients to get node to attribute value mapping for all or
+ * given set of Node AttributeKey's in the cluster from the
+ * ResourceManager.
+ *
+ *
+ * @see ApplicationClientProtocol#getAttributesToNodes
+ * (GetAttributesToNodesRequest)
+ */
+@Public
+@Evolving
+public abstract class GetAttributesToNodesRequest {
+
+ public static GetAttributesToNodesRequest newInstance() {
+ return Records.newRecord(GetAttributesToNodesRequest.class);
+ }
+
+ public static GetAttributesToNodesRequest newInstance(
+ Set attributes) {
+ GetAttributesToNodesRequest request =
+ Records.newRecord(GetAttributesToNodesRequest.class);
+ request.setNodeAttributes(attributes);
+ return request;
+ }
+
+ /**
+ * Set node attributeKeys for which the mapping of hostname to attribute value
+ * is required.
+ *
+ * @param attributes Set provided.
+ */
+ @Public
+ @Unstable
+ public abstract void setNodeAttributes(Set attributes);
+
+ /**
+ * Get node attributeKeys for which mapping of hostname to attribute value is
+ * required.
+ *
+ * @return Set
+ */
+ @Public
+ @Unstable
+ public abstract Set getNodeAttributes();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesResponse.java
new file mode 100644
index 00000000000..9bd529f3c4c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesResponse.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeToAttributeValue;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * The response sent by the ResourceManager to a client requesting
+ * node to attribute value mapping for all or given set of Node AttributeKey's.
+ *
+ *
+ * @see ApplicationClientProtocol#getAttributesToNodes
+ * (GetAttributesToNodesRequest)
+ */
+@Public
+@Evolving
+public abstract class GetAttributesToNodesResponse {
+ public static GetAttributesToNodesResponse newInstance(
+ Map> map) {
+ GetAttributesToNodesResponse response =
+ Records.newRecord(GetAttributesToNodesResponse.class);
+ response.setAttributeToNodes(map);
+ return response;
+ }
+
+ @Public
+ @Evolving
+ public abstract void setAttributeToNodes(
+ Map> map);
+
+ /**
+ * Get mapping of NodeAttributeKey to its associated mapping of list of
+ * NodeToAttributeValuenode to attribute value.
+ *
+ * @return Map> node attributes
+ * to list of NodeToAttributeValuenode.
+ */
+ @Public
+ @Evolving
+ public abstract Map> getAttributesToNodes();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesRequest.java
new file mode 100644
index 00000000000..ca81f9a0841
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesRequest.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import static org.apache.hadoop.classification.InterfaceAudience.*;
+import static org.apache.hadoop.classification.InterfaceStability.*;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * The request from clients to get node attributes in the cluster from the
+ * ResourceManager.
+ *
+ *
+ * @see ApplicationClientProtocol#getClusterNodeAttributes
+ * (GetClusterNodeAttributesRequest)
+ */
+@Public
+@Evolving
+public abstract class GetClusterNodeAttributesRequest {
+
+ /**
+ * Create new instance of GetClusterNodeAttributesRequest.
+ *
+ * @return GetClusterNodeAttributesRequest is returned.
+ */
+ public static GetClusterNodeAttributesRequest newInstance() {
+ return Records.newRecord(GetClusterNodeAttributesRequest.class);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesResponse.java
new file mode 100644
index 00000000000..b0ccd906a32
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesResponse.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * The response sent by the ResourceManager to a client requesting
+ * a node attributes in cluster.
+ *
+ *
+ * @see ApplicationClientProtocol#getClusterNodeAttributes
+ * (GetClusterNodeAttributesRequest)
+ */
+@Public
+@Evolving
+public abstract class GetClusterNodeAttributesResponse {
+
+ /**
+ * Create instance of GetClusterNodeAttributesResponse.
+ *
+ * @param attributes
+ * @return GetClusterNodeAttributesResponse.
+ */
+ public static GetClusterNodeAttributesResponse newInstance(
+ Set attributes) {
+ GetClusterNodeAttributesResponse response =
+ Records.newRecord(GetClusterNodeAttributesResponse.class);
+ response.setNodeAttributes(attributes);
+ return response;
+ }
+
+ /**
+ * Set node attributes to the response.
+ *
+ * @param attributes Map of Node attributeKey to Type.
+ */
+ @Public
+ @Unstable
+ public abstract void setNodeAttributes(Set attributes);
+
+ /**
+ * Get node attributes from the response.
+ *
+ * @return Node attributes.
+ */
+ @Public
+ @Unstable
+ public abstract Set getNodeAttributes();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesRequest.java
new file mode 100644
index 00000000000..8e91bcafed0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesRequest.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.util.Set;
+
+/**
+ *
+ * The request from clients to get nodes to attributes mapping
+ * in the cluster from the ResourceManager.
+ *
+ *
+ * @see ApplicationClientProtocol#getNodesToAttributes
+ * (GetNodesToAttributesRequest)
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class GetNodesToAttributesRequest {
+
+ public static GetNodesToAttributesRequest newInstance(Set hostNames) {
+ GetNodesToAttributesRequest request =
+ Records.newRecord(GetNodesToAttributesRequest.class);
+ request.setHostNames(hostNames);
+ return request;
+ }
+
+ /**
+ * Set hostnames for which mapping is required.
+ *
+ * @param hostnames
+ */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public abstract void setHostNames(Set hostnames);
+
+ /**
+ * Get hostnames for which mapping is required.
+ *
+ * @return Set of hostnames.
+ */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public abstract Set getHostNames();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesResponse.java
new file mode 100644
index 00000000000..acc07bb1847
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesResponse.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ * The response sent by the ResourceManager to a client requesting
+ * nodes to attributes mapping.
+ *
+ *
+ * @see ApplicationClientProtocol#getNodesToAttributes
+ * (GetNodesToAttributesRequest)
+ */
+@Public
+@Evolving
+public abstract class GetNodesToAttributesResponse {
+
+ public static GetNodesToAttributesResponse newInstance(
+ Map> map) {
+ GetNodesToAttributesResponse response =
+ Records.newRecord(GetNodesToAttributesResponse.class);
+ response.setNodeToAttributes(map);
+ return response;
+ }
+
+ @Public
+ @Evolving
+ public abstract void setNodeToAttributes(Map> map);
+
+ /**
+ * Get hostnames to NodeAttributes mapping.
+ *
+ * @return Map> host to attributes.
+ */
+ @Public
+ @Evolving
+ public abstract Map> getNodeToAttributes();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java
index 13081f3d6df..70649390821 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java
@@ -37,16 +37,30 @@
* Its not compulsory for all the attributes to have value, empty string is the
* default value of the NodeAttributeType.STRING
*
- *
+ *
+ * Node Attribute Prefix is used as namespace to segregate the attributes.
+ *
*/
@Public
@Unstable
public abstract class NodeAttribute {
+ public static final String PREFIX_DISTRIBUTED = "nm.yarn.io";
+ public static final String PREFIX_CENTRALIZED = "rm.yarn.io";
+
public static NodeAttribute newInstance(String attributeName,
NodeAttributeType attributeType, String attributeValue) {
+ return newInstance(PREFIX_CENTRALIZED, attributeName, attributeType,
+ attributeValue);
+ }
+
+ public static NodeAttribute newInstance(String attributePrefix,
+ String attributeName, NodeAttributeType attributeType,
+ String attributeValue) {
NodeAttribute nodeAttribute = Records.newRecord(NodeAttribute.class);
- nodeAttribute.setAttributeName(attributeName);
+ NodeAttributeKey nodeAttributeKey =
+ NodeAttributeKey.newInstance(attributePrefix, attributeName);
+ nodeAttribute.setAttributeKey(nodeAttributeKey);
nodeAttribute.setAttributeType(attributeType);
nodeAttribute.setAttributeValue(attributeValue);
return nodeAttribute;
@@ -54,11 +68,11 @@ public static NodeAttribute newInstance(String attributeName,
@Public
@Unstable
- public abstract String getAttributeName();
+ public abstract NodeAttributeKey getAttributeKey();
@Public
@Unstable
- public abstract void setAttributeName(String attributeName);
+ public abstract void setAttributeKey(NodeAttributeKey attributeKey);
@Public
@Unstable
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeInfo.java
new file mode 100644
index 00000000000..d294333ed1e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeInfo.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * Node Attribute Info describes a NodeAttribute.
+ *
+ */
+@Public
+@Unstable
+public abstract class NodeAttributeInfo {
+
+ public static NodeAttributeInfo newInstance(NodeAttribute nodeAttribute) {
+ return newInstance(nodeAttribute.getAttributeKey(),
+ nodeAttribute.getAttributeType());
+ }
+
+ public static NodeAttributeInfo newInstance(NodeAttributeKey nodeAttributeKey,
+ NodeAttributeType attributeType) {
+ NodeAttributeInfo nodeAttribute =
+ Records.newRecord(NodeAttributeInfo.class);
+ nodeAttribute.setAttributeKey(nodeAttributeKey);
+ nodeAttribute.setAttributeType(attributeType);
+ return nodeAttribute;
+ }
+
+ @Public
+ @Unstable
+ public abstract NodeAttributeKey getAttributeKey();
+
+ @Public
+ @Unstable
+ public abstract void setAttributeKey(NodeAttributeKey attributeKey);
+
+ @Public
+ @Unstable
+ public abstract NodeAttributeType getAttributeType();
+
+ @Public
+ @Unstable
+ public abstract void setAttributeType(NodeAttributeType attributeType);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeKey.java
new file mode 100644
index 00000000000..35ff26f07f1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeKey.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * Node AttributeKey uniquely identifies a given Node Attribute. Node Attribute
+ * is identified based on attribute prefix and name.
+ *
+ *
+ * Node Attribute Prefix is used as namespace to segregate the attributes.
+ *
+ */
+@Public
+@Unstable
+public abstract class NodeAttributeKey {
+
+ public static NodeAttributeKey newInstance(String attributeName) {
+ return newInstance(NodeAttribute.PREFIX_CENTRALIZED, attributeName);
+ }
+
+ public static NodeAttributeKey newInstance(String attributePrefix,
+ String attributeName) {
+ NodeAttributeKey nodeAttributeKey =
+ Records.newRecord(NodeAttributeKey.class);
+ nodeAttributeKey.setAttributePrefix(attributePrefix);
+ nodeAttributeKey.setAttributeName(attributeName);
+ return nodeAttributeKey;
+ }
+
+ @Public
+ @Unstable
+ public abstract String getAttributePrefix();
+
+ @Public
+ @Unstable
+ public abstract void setAttributePrefix(String attributePrefix);
+
+ @Public
+ @Unstable
+ public abstract String getAttributeName();
+
+ @Public
+ @Unstable
+ public abstract void setAttributeName(String attributeName);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeOpCode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeOpCode.java
new file mode 100644
index 00000000000..76db063eed5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeOpCode.java
@@ -0,0 +1,43 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+
+/**
+ * Enumeration of various node attribute op codes.
+ */
+@Public
+@Evolving
+public enum NodeAttributeOpCode {
+ /**
+ * Default as No OP.
+ */
+ NO_OP,
+ /**
+ * EQUALS op code for Attribute.
+ */
+ EQ,
+
+ /**
+ * NOT EQUALS op code for Attribute.
+ */
+ NE
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
index 3a80641bb6d..625ad234081 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
@@ -258,4 +258,17 @@ public NodeUpdateType getNodeUpdateType() {
* Set the node update type (null indicates absent node update type).
* */
public void setNodeUpdateType(NodeUpdateType nodeUpdateType) {}
+
+ /**
+ * Set the node attributes of node.
+ *
+ * @param nodeAttributes set of node attributes.
+ */
+ public abstract void setNodeAttributes(Set nodeAttributes);
+
+ /**
+ * Get node attributes of node.
+ * @return the set of node attributes.
+ */
+ public abstract Set getNodeAttributes();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeToAttributeValue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeToAttributeValue.java
new file mode 100644
index 00000000000..0bcb8b68b41
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeToAttributeValue.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * Mapping of Attribute Value to a Node.
+ *
+ */
+@Public
+@Unstable
+public abstract class NodeToAttributeValue {
+ public static NodeToAttributeValue newInstance(String hostname,
+ String attributeValue) {
+ NodeToAttributeValue nodeToAttributeValue =
+ Records.newRecord(NodeToAttributeValue.class);
+ nodeToAttributeValue.setAttributeValue(attributeValue);
+ nodeToAttributeValue.setHostname(hostname);
+ return nodeToAttributeValue;
+ }
+
+ @Public
+ @Unstable
+ public abstract String getAttributeValue();
+
+ @Public
+ @Unstable
+ public abstract void setAttributeValue(String attributeValue);
+
+ @Public
+ @Unstable
+ public abstract String getHostname();
+
+ @Public
+ @Unstable
+ public abstract void setHostname(String hostname);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
index 0fe8273e6d7..79196fbf851 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
/**
* {@code PlacementConstraint} represents a placement constraint for a resource
@@ -155,13 +156,22 @@ public String toString() {
private int minCardinality;
private int maxCardinality;
private Set targetExpressions;
+ private NodeAttributeOpCode attributeOpCode;
public SingleConstraint(String scope, int minCardinality,
- int maxCardinality, Set targetExpressions) {
+ int maxCardinality, NodeAttributeOpCode opCode,
+ Set targetExpressions) {
this.scope = scope;
this.minCardinality = minCardinality;
this.maxCardinality = maxCardinality;
this.targetExpressions = targetExpressions;
+ this.attributeOpCode = opCode;
+ }
+
+ public SingleConstraint(String scope, int minCardinality,
+ int maxCardinality, Set targetExpressions) {
+ this(scope, minCardinality, maxCardinality, NodeAttributeOpCode.NO_OP,
+ targetExpressions);
}
public SingleConstraint(String scope, int minC, int maxC,
@@ -169,6 +179,13 @@ public SingleConstraint(String scope, int minC, int maxC,
this(scope, minC, maxC, new HashSet<>(Arrays.asList(targetExpressions)));
}
+ public SingleConstraint(String scope, int minC, int maxC,
+ NodeAttributeOpCode opCode,
+ TargetExpression... targetExpressions) {
+ this(scope, minC, maxC, opCode,
+ new HashSet<>(Arrays.asList(targetExpressions)));
+ }
+
/**
* Get the scope of the constraint.
*
@@ -205,6 +222,15 @@ public int getMaxCardinality() {
return targetExpressions;
}
+ /**
+ * Get the NodeAttributeOpCode of the constraint.
+ *
+ * @return nodeAttribute Op Code
+ */
+ public NodeAttributeOpCode getNodeAttributeOpCode() {
+ return attributeOpCode;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) {
@@ -225,6 +251,10 @@ public boolean equals(Object o) {
if (!getScope().equals(that.getScope())) {
return false;
}
+ if (getNodeAttributeOpCode() != null && !getNodeAttributeOpCode()
+ .equals(that.getNodeAttributeOpCode())) {
+ return false;
+ }
return getTargetExpressions().equals(that.getTargetExpressions());
}
@@ -233,6 +263,7 @@ public int hashCode() {
int result = getScope().hashCode();
result = 31 * result + getMinCardinality();
result = 31 * result + getMaxCardinality();
+ result = 31 * result + getNodeAttributeOpCode().hashCode();
result = 31 * result + getTargetExpressions().hashCode();
return result;
}
@@ -259,6 +290,13 @@ public String toString() {
.append(getScope()).append(",")
.append(targetExpr)
.toString());
+ } else if (min == -1 && max == -1) {
+ // node attribute
+ targetConstraints.add(new StringBuilder()
+ .append(getScope()).append(",")
+ .append(getNodeAttributeOpCode()).append(",")
+ .append(targetExpr)
+ .toString());
} else {
// cardinality
targetConstraints.add(new StringBuilder()
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
index d22a6bd90c0..73fa328833f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.AllocationTagNamespaceType;
+import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
@@ -85,6 +86,24 @@ public static AbstractConstraint targetNotIn(String scope,
return new SingleConstraint(scope, 0, 0, targetExpressions);
}
+ /**
+ * Creates a constraint that requires allocations to be placed on nodes that
+ * belong to a scope (e.g., node or rack) that satisfy any of the
+ * target expressions based on node attribute op code.
+ *
+ * @param scope the scope within which the target expressions should not be
+ * true
+ * @param opCode Node Attribute code which could be equals, not equals.
+ * @param targetExpressions the expressions that need to not be true within
+ * the scope
+ * @return the resulting placement constraint
+ */
+ public static AbstractConstraint targetNodeAttribute(String scope,
+ NodeAttributeOpCode opCode,
+ TargetExpression... targetExpressions) {
+ return new SingleConstraint(scope, -1, -1, opCode, targetExpressions);
+ }
+
/**
* Creates a constraint that restricts the number of allocations within a
* given scope (e.g., node or rack).
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 148edb9f26c..dd804a30937 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3463,6 +3463,22 @@ public static boolean isAclEnabled(Configuration conf) {
public static final String FS_NODE_LABELS_STORE_ROOT_DIR = NODE_LABELS_PREFIX
+ "fs-store.root-dir";
+ /**
+ * Node-attribute configurations.
+ */
+ public static final String NODE_ATTRIBUTE_PREFIX =
+ YARN_PREFIX + "node-attribute.";
+ /**
+ * Node attribute store implementation class.
+ */
+ public static final String FS_NODE_ATTRIBUTE_STORE_IMPL_CLASS =
+ NODE_ATTRIBUTE_PREFIX + "fs-store.impl.class";
+ /**
+ * File system node attribute store directory.
+ */
+ public static final String FS_NODE_ATTRIBUTE_STORE_ROOT_DIR =
+ NODE_ATTRIBUTE_PREFIX + "fs-store.root-dir";
+
/**
* Flag to indicate if the node labels feature enabled, by default it's
* disabled
@@ -3525,16 +3541,25 @@ public static boolean areNodeLabelsEnabled(
private static final String NM_NODE_LABELS_PREFIX = NM_PREFIX
+ "node-labels.";
+ private static final String NM_NODE_ATTRIBUTES_PREFIX = NM_PREFIX
+ + "node-attributes.";
+
public static final String NM_NODE_LABELS_PROVIDER_CONFIG =
NM_NODE_LABELS_PREFIX + "provider";
+ public static final String NM_NODE_ATTRIBUTES_PROVIDER_CONFIG =
+ NM_NODE_ATTRIBUTES_PREFIX + "provider";
+
// whitelist names for the yarn.nodemanager.node-labels.provider
- public static final String CONFIG_NODE_LABELS_PROVIDER = "config";
- public static final String SCRIPT_NODE_LABELS_PROVIDER = "script";
+ public static final String CONFIG_NODE_DESCRIPTOR_PROVIDER = "config";
+ public static final String SCRIPT_NODE_DESCRIPTOR_PROVIDER = "script";
private static final String NM_NODE_LABELS_PROVIDER_PREFIX =
NM_NODE_LABELS_PREFIX + "provider.";
+ private static final String NM_NODE_ATTRIBUTES_PROVIDER_PREFIX =
+ NM_NODE_ATTRIBUTES_PREFIX + "provider.";
+
public static final String NM_NODE_LABELS_RESYNC_INTERVAL =
NM_NODE_LABELS_PREFIX + "resync-interval-ms";
@@ -3559,6 +3584,9 @@ public static boolean areNodeLabelsEnabled(
public static final String NM_PROVIDER_CONFIGURED_NODE_PARTITION =
NM_NODE_LABELS_PROVIDER_PREFIX + "configured-node-partition";
+ public static final String NM_PROVIDER_CONFIGURED_NODE_ATTRIBUTES =
+ NM_NODE_ATTRIBUTES_PROVIDER_PREFIX + "configured-node-attributes";
+
private static final String RM_NODE_LABELS_PREFIX = RM_PREFIX
+ "node-labels.";
@@ -3606,6 +3634,33 @@ public static boolean areNodeLabelsEnabled(
NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_PREFIX + "opts";
/**
+ * Node attribute provider fetch attributes interval and timeout.
+ */
+ public static final String NM_NODE_ATTRIBUTES_PROVIDER_FETCH_INTERVAL_MS =
+ NM_NODE_ATTRIBUTES_PROVIDER_PREFIX + "fetch-interval-ms";
+
+ public static final long
+ DEFAULT_NM_NODE_ATTRIBUTES_PROVIDER_FETCH_INTERVAL_MS = 10 * 60 * 1000;
+
+ public static final String NM_NODE_ATTRIBUTES_PROVIDER_FETCH_TIMEOUT_MS =
+ NM_NODE_ATTRIBUTES_PROVIDER_PREFIX + "fetch-timeout-ms";
+
+ public static final long DEFAULT_NM_NODE_ATTRIBUTES_PROVIDER_FETCH_TIMEOUT_MS
+ = DEFAULT_NM_NODE_ATTRIBUTES_PROVIDER_FETCH_INTERVAL_MS * 2;
+
+ /**
+ * Script to collect node attributes.
+ */
+ private static final String NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_PREFIX =
+ NM_NODE_ATTRIBUTES_PROVIDER_PREFIX + "script.";
+
+ public static final String NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_PATH =
+ NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_PREFIX + "path";
+
+ public static final String NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_OPTS =
+ NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_PREFIX + "opts";
+
+ /*
* Support to view apps for given user in secure cluster.
* @deprecated This field is deprecated for {@link #FILTER_ENTITY_LIST_BY_USER}
*/
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
index 2926c9d1de8..93fd706b0c3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
@@ -19,6 +19,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
@@ -44,11 +45,12 @@
@InterfaceStability.Unstable
public final class PlacementConstraintParser {
+ public static final char EXPRESSION_VAL_DELIM = ',';
private static final char EXPRESSION_DELIM = ':';
private static final char KV_SPLIT_DELIM = '=';
- private static final char EXPRESSION_VAL_DELIM = ',';
private static final char BRACKET_START = '(';
private static final char BRACKET_END = ')';
+ private static final String KV_NE_DELIM = "!=";
private static final String IN = "in";
private static final String NOT_IN = "notin";
private static final String AND = "and";
@@ -349,6 +351,91 @@ public String nextElement() {
}
}
+ /**
+ * Constraint parser used to parse a given target expression.
+ */
+ public static class NodeConstraintParser extends ConstraintParser {
+
+ public NodeConstraintParser(String expression) {
+ super(new BaseStringTokenizer(expression,
+ String.valueOf(EXPRESSION_VAL_DELIM)));
+ }
+
+ @Override
+ public AbstractConstraint parse()
+ throws PlacementConstraintParseException {
+ PlacementConstraint.AbstractConstraint placementConstraints = null;
+ String attributeName = "";
+ NodeAttributeOpCode opCode = NodeAttributeOpCode.EQ;
+ String scope = SCOPE_NODE;
+
+ Set constraintEntities = new TreeSet<>();
+ while (hasMoreTokens()) {
+ String currentTag = nextToken();
+ StringTokenizer attributeKV = getAttributeOpCodeTokenizer(currentTag);
+
+ // Usually there will be only one k=v pair. However in case when
+ // multiple values are present for same attribute, it will also be
+ // coming as next token. for example, java=1.8,1.9 or python!=2.
+ if (attributeKV.countTokens() > 1) {
+ opCode = getAttributeOpCode(currentTag);
+ attributeName = attributeKV.nextToken();
+ currentTag = attributeKV.nextToken();
+ }
+ constraintEntities.add(currentTag);
+ }
+
+ if(attributeName.isEmpty()) {
+ throw new PlacementConstraintParseException(
+ "expecting valid expression like k=v or k!=v, but get "
+ + constraintEntities);
+ }
+
+ PlacementConstraint.TargetExpression target = null;
+ if (!constraintEntities.isEmpty()) {
+ target = PlacementConstraints.PlacementTargets
+ .nodeAttribute(attributeName,
+ constraintEntities
+ .toArray(new String[constraintEntities.size()]));
+ }
+
+ placementConstraints = PlacementConstraints
+ .targetNodeAttribute(scope, opCode, target);
+ return placementConstraints;
+ }
+
+ private StringTokenizer getAttributeOpCodeTokenizer(String currentTag) {
+ StringTokenizer attributeKV = new StringTokenizer(currentTag,
+ KV_NE_DELIM);
+
+ // Try with '!=' delim as well.
+ if (attributeKV.countTokens() < 2) {
+ attributeKV = new StringTokenizer(currentTag,
+ String.valueOf(KV_SPLIT_DELIM));
+ }
+ return attributeKV;
+ }
+
+ /**
+ * Below conditions are validated.
+ * java=8 : OpCode = EQUALS
+ * java!=8 : OpCode = NEQUALS
+ * @param currentTag tag
+ * @return Attribute op code.
+ */
+ private NodeAttributeOpCode getAttributeOpCode(String currentTag)
+ throws PlacementConstraintParseException {
+ if (currentTag.contains(KV_NE_DELIM)) {
+ return NodeAttributeOpCode.NE;
+ } else if (currentTag.contains(String.valueOf(KV_SPLIT_DELIM))) {
+ return NodeAttributeOpCode.EQ;
+ }
+ throw new PlacementConstraintParseException(
+ "expecting valid expression like k=v or k!=v, but get "
+ + currentTag);
+ }
+ }
+
/**
* Constraint parser used to parse a given target expression, such as
* "NOTIN, NODE, foo, bar".
@@ -363,20 +450,23 @@ public TargetConstraintParser(String expression) {
@Override
public AbstractConstraint parse()
throws PlacementConstraintParseException {
- PlacementConstraint.AbstractConstraint placementConstraints;
+ PlacementConstraint.AbstractConstraint placementConstraints = null;
String op = nextToken();
if (op.equalsIgnoreCase(IN) || op.equalsIgnoreCase(NOT_IN)) {
String scope = nextToken();
scope = parseScope(scope);
- Set allocationTags = new TreeSet<>();
+ Set constraintEntities = new TreeSet<>();
while(hasMoreTokens()) {
String tag = nextToken();
- allocationTags.add(tag);
+ constraintEntities.add(tag);
+ }
+ PlacementConstraint.TargetExpression target = null;
+ if(!constraintEntities.isEmpty()) {
+ target = PlacementConstraints.PlacementTargets.allocationTag(
+ constraintEntities
+ .toArray(new String[constraintEntities.size()]));
}
- PlacementConstraint.TargetExpression target =
- PlacementConstraints.PlacementTargets.allocationTag(
- allocationTags.toArray(new String[allocationTags.size()]));
if (op.equalsIgnoreCase(IN)) {
placementConstraints = PlacementConstraints
.targetIn(scope, target);
@@ -550,6 +640,11 @@ public static AbstractConstraint parseExpression(String constraintStr)
new ConjunctionConstraintParser(constraintStr);
constraintOptional = Optional.ofNullable(jp.tryParse());
}
+ if (!constraintOptional.isPresent()) {
+ NodeConstraintParser np =
+ new NodeConstraintParser(constraintStr);
+ constraintOptional = Optional.ofNullable(np.tryParse());
+ }
if (!constraintOptional.isPresent()) {
throw new PlacementConstraintParseException(
"Invalid constraint expression " + constraintStr);
@@ -584,12 +679,13 @@ public static AbstractConstraint parseExpression(String constraintStr)
*/
public static Map parsePlacementSpec(
String expression) throws PlacementConstraintParseException {
+ // Continue handling for application tag based constraint otherwise.
// Respect insertion order.
Map result = new LinkedHashMap<>();
PlacementConstraintParser.ConstraintTokenizer tokenizer =
new PlacementConstraintParser.MultipleConstraintsTokenizer(expression);
tokenizer.validate();
- while(tokenizer.hasMoreElements()) {
+ while (tokenizer.hasMoreElements()) {
String specStr = tokenizer.nextElement();
// each spec starts with sourceAllocationTag=numOfContainers and
// followed by a constraint expression.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
index 81adef19335..fdd4bc5aca8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
@@ -64,4 +64,7 @@ service ApplicationClientProtocolService {
rpc getResourceProfiles(GetAllResourceProfilesRequestProto) returns (GetAllResourceProfilesResponseProto);
rpc getResourceProfile(GetResourceProfileRequestProto) returns (GetResourceProfileResponseProto);
rpc getResourceTypeInfo(GetAllResourceTypeInfoRequestProto) returns (GetAllResourceTypeInfoResponseProto);
+ rpc getClusterNodeAttributes (GetClusterNodeAttributesRequestProto) returns (GetClusterNodeAttributesResponseProto);
+ rpc getAttributesToNodes (GetAttributesToNodesRequestProto) returns (GetAttributesToNodesResponseProto);
+ rpc getNodesToAttributes (GetNodesToAttributesRequestProto) returns (GetNodesToAttributesResponseProto);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
index 5b93aec036f..d37e36a1878 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
@@ -144,11 +144,6 @@ message NodesToAttributesMappingRequestProto {
optional bool failOnUnknownNodes = 3;
}
-message NodeToAttributesProto {
- optional string node = 1;
- repeated NodeAttributeProto nodeAttributes = 2;
-}
-
message NodesToAttributesMappingResponseProto {
}
//////////////////////////////////////////////////////////////////
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 41f5ccb8398..5fe2cc94550 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -355,6 +355,7 @@ message NodeReportProto {
optional ResourceUtilizationProto node_utilization = 12;
optional uint32 decommissioning_timeout = 13;
optional NodeUpdateTypeProto node_update_type = 14;
+ repeated NodeAttributeProto node_attributes = 15;
}
message NodeIdToLabelsProto {
@@ -376,12 +377,37 @@ enum NodeAttributeTypeProto {
STRING = 1;
}
+message NodeAttributeKeyProto {
+ optional string attributePrefix = 1 [default="rm.yarn.io"];
+ required string attributeName = 2;
+}
+
message NodeAttributeProto {
- optional string attributeName = 1;
- optional NodeAttributeTypeProto attributeType = 2;
- optional string attributeValue = 3;
+ required NodeAttributeKeyProto attributeKey = 1;
+ optional NodeAttributeTypeProto attributeType = 2 [default = STRING];
+ optional string attributeValue = 3 [default=""];
+}
+
+
+message NodeAttributeInfoProto {
+ required NodeAttributeKeyProto attributeKey = 1;
+ required NodeAttributeTypeProto attributeType = 2;
+}
+
+message NodeToAttributeValueProto {
+ required string hostname = 1;
+ required string attributeValue = 2;
+}
+
+message AttributeToNodesProto {
+ required NodeAttributeKeyProto nodeAttribute = 1;
+ repeated NodeToAttributeValueProto nodeValueMap = 2;
}
+message NodeToAttributesProto {
+ optional string node = 1;
+ repeated NodeAttributeProto nodeAttributes = 2;
+}
enum ContainerTypeProto {
APPLICATION_MASTER = 1;
@@ -620,11 +646,18 @@ message PlacementConstraintProto {
optional CompositePlacementConstraintProto compositeConstraint = 2;
}
+enum NodeAttributeOpCodeProto {
+ NO_OP = 1;
+ EQ = 2;
+ NE = 3;
+}
+
message SimplePlacementConstraintProto {
required string scope = 1;
repeated PlacementConstraintTargetProto targetExpressions = 2;
optional int32 minCardinality = 3;
optional int32 maxCardinality = 4;
+ optional NodeAttributeOpCodeProto attributeOpCode = 5;
}
message PlacementConstraintTargetProto {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index acd452dc79f..248f775bdeb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -260,6 +260,29 @@ message GetClusterNodeLabelsResponseProto {
repeated NodeLabelProto nodeLabels = 2;
}
+message GetClusterNodeAttributesRequestProto {
+}
+
+message GetClusterNodeAttributesResponseProto {
+ repeated NodeAttributeInfoProto nodeAttributes = 1;
+}
+
+message GetAttributesToNodesRequestProto {
+ repeated NodeAttributeKeyProto nodeAttributes = 1;
+}
+
+message GetAttributesToNodesResponseProto {
+ repeated AttributeToNodesProto attributesToNodes = 1;
+}
+
+message GetNodesToAttributesRequestProto {
+ repeated string hostnames = 1;
+}
+
+message GetNodesToAttributesResponseProto {
+ repeated NodeToAttributesProto nodesToAttributes = 1;
+}
+
message UpdateApplicationPriorityRequestProto {
required ApplicationIdProto applicationId = 1;
required PriorityProto applicationPriority = 2;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java
index a69571c5c80..9806ba4ac96 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java
@@ -22,6 +22,8 @@
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
+
+import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
@@ -38,8 +40,14 @@
import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.SourceTagsTokenizer;
import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.ConstraintTokenizer;
-import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.*;
import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.and;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.cardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.or;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetNodeAttribute;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetNotIn;
import org.junit.Assert;
import org.junit.Test;
@@ -443,4 +451,55 @@ private void verifyConstraintToString(String inputExpr,
+ constrainExpr + ", caused by: " + e.getMessage());
}
}
+
+ @Test
+ public void testParseNodeAttributeSpec()
+ throws PlacementConstraintParseException {
+ Map result;
+ PlacementConstraint.AbstractConstraint expectedPc1, expectedPc2;
+ PlacementConstraint actualPc1, actualPc2;
+
+ // A single node attribute constraint
+ result = PlacementConstraintParser
+ .parsePlacementSpec("xyz=4,rm.yarn.io/foo=true");
+ Assert.assertEquals(1, result.size());
+ TargetExpression target = PlacementTargets
+ .nodeAttribute("rm.yarn.io/foo", "true");
+ expectedPc1 = targetNodeAttribute("node", NodeAttributeOpCode.EQ, target);
+
+ actualPc1 = result.values().iterator().next();
+ Assert.assertEquals(expectedPc1, actualPc1.getConstraintExpr());
+
+ // A single node attribute constraint
+ result = PlacementConstraintParser
+ .parsePlacementSpec("xyz=3,rm.yarn.io/foo!=abc");
+ Assert.assertEquals(1, result.size());
+ target = PlacementTargets
+ .nodeAttribute("rm.yarn.io/foo", "abc");
+ expectedPc1 = targetNodeAttribute("node", NodeAttributeOpCode.NE, target);
+
+ actualPc1 = result.values().iterator().next();
+ Assert.assertEquals(expectedPc1, actualPc1.getConstraintExpr());
+
+ actualPc1 = result.values().iterator().next();
+ Assert.assertEquals(expectedPc1, actualPc1.getConstraintExpr());
+
+ // A single node attribute constraint
+ result = PlacementConstraintParser
+ .parsePlacementSpec(
+ "xyz=1,rm.yarn.io/foo!=abc:zxy=1,rm.yarn.io/bar=true");
+ Assert.assertEquals(2, result.size());
+ target = PlacementTargets
+ .nodeAttribute("rm.yarn.io/foo", "abc");
+ expectedPc1 = targetNodeAttribute("node", NodeAttributeOpCode.NE, target);
+ target = PlacementTargets
+ .nodeAttribute("rm.yarn.io/bar", "true");
+ expectedPc2 = targetNodeAttribute("node", NodeAttributeOpCode.EQ, target);
+
+ Iterator valueIt = result.values().iterator();
+ actualPc1 = valueIt.next();
+ actualPc2 = valueIt.next();
+ Assert.assertEquals(expectedPc1, actualPc1.getConstraintExpr());
+ Assert.assertEquals(expectedPc2, actualPc2.getConstraintExpr());
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 76fa38f922a..f3693097515 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -523,9 +523,13 @@ public boolean init(String[] args) throws ParseException, IOException {
if (cliParser.hasOption("placement_spec")) {
String placementSpec = cliParser.getOptionValue("placement_spec");
- LOG.info("Placement Spec received [{}]", placementSpec);
- parsePlacementSpecs(placementSpec);
+ String decodedSpec = getDecodedPlacementSpec(placementSpec);
+ LOG.info("Placement Spec received [{}]", decodedSpec);
+
+ this.numTotalContainers = 0;
+ parsePlacementSpecs(decodedSpec);
LOG.info("Total num containers requested [{}]", numTotalContainers);
+
if (numTotalContainers == 0) {
throw new IllegalArgumentException(
"Cannot run distributed shell with no containers");
@@ -694,23 +698,25 @@ public boolean init(String[] args) throws ParseException, IOException {
return true;
}
- private void parsePlacementSpecs(String placementSpecifications) {
- // Client sends placement spec in encoded format
- Base64.Decoder decoder = Base64.getDecoder();
- byte[] decodedBytes = decoder.decode(
- placementSpecifications.getBytes(StandardCharsets.UTF_8));
- String decodedSpec = new String(decodedBytes, StandardCharsets.UTF_8);
- LOG.info("Decode placement spec: " + decodedSpec);
+ private void parsePlacementSpecs(String decodedSpec) {
Map pSpecs =
PlacementSpec.parse(decodedSpec);
this.placementSpecs = new HashMap<>();
- this.numTotalContainers = 0;
for (PlacementSpec pSpec : pSpecs.values()) {
- this.numTotalContainers += pSpec.numContainers;
+ this.numTotalContainers += pSpec.getNumContainers();
this.placementSpecs.put(pSpec.sourceTag, pSpec);
}
}
+ private String getDecodedPlacementSpec(String placementSpecifications) {
+ Base64.Decoder decoder = Base64.getDecoder();
+ byte[] decodedBytes = decoder.decode(
+ placementSpecifications.getBytes(StandardCharsets.UTF_8));
+ String decodedSpec = new String(decodedBytes, StandardCharsets.UTF_8);
+ LOG.info("Decode placement spec: " + decodedSpec);
+ return decodedSpec;
+ }
+
/**
* Helper function to print usage
*
@@ -798,6 +804,7 @@ public void run() throws YarnException, IOException, InterruptedException {
}
}
}
+
RegisterApplicationMasterResponse response = amRMClient
.registerApplicationMaster(appMasterHostname, appMasterRpcPort,
appMasterTrackingUrl, placementConstraintMap);
@@ -845,14 +852,18 @@ public void run() throws YarnException, IOException, InterruptedException {
// Keep looping until all the containers are launched and shell script
// executed on them ( regardless of success/failure).
if (this.placementSpecs == null) {
+ LOG.info("placementSpecs null");
for (int i = 0; i < numTotalContainersToRequest; ++i) {
ContainerRequest containerAsk = setupContainerAskForRM();
amRMClient.addContainerRequest(containerAsk);
}
} else {
+ LOG.info("placementSpecs to create req:" + placementSpecs);
List schedReqs = new ArrayList<>();
for (PlacementSpec pSpec : this.placementSpecs.values()) {
- for (int i = 0; i < pSpec.numContainers; i++) {
+ LOG.info("placementSpec :" + pSpec + ", container:" + pSpec
+ .getNumContainers());
+ for (int i = 0; i < pSpec.getNumContainers(); i++) {
SchedulingRequest sr = setupSchedulingRequest(pSpec);
schedReqs.add(sr);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index c8a71b320c0..446b6088b0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -103,7 +103,7 @@
* the provided shell command on a set of containers.
*
*
This client is meant to act as an example on how to write yarn-based applications.
- *
+ *
*
To submit an application, a client first needs to connect to the ResourceManager
* aka ApplicationsManager or ASM via the {@link ApplicationClientProtocol}. The {@link ApplicationClientProtocol}
* provides a way for the client to get access to cluster information and to request for a
@@ -192,6 +192,8 @@
// Placement specification
private String placementSpec = "";
+ // Node Attribute specification
+ private String nodeAttributeSpec = "";
// log4j.properties file
// if available, add to local resources and set into classpath
private String log4jPropFile = "";
@@ -448,6 +450,7 @@ public boolean init(String[] args) throws ParseException {
// Check if it is parsable
PlacementSpec.parse(this.placementSpec);
}
+
appName = cliParser.getOptionValue("appname", "DistributedShell");
amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0"));
amQueue = cliParser.getOptionValue("queue", "default");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
index 290925980a5..ceaa37d5879 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
@@ -37,8 +37,8 @@
LoggerFactory.getLogger(PlacementSpec.class);
public final String sourceTag;
- public final int numContainers;
public final PlacementConstraint constraint;
+ private int numContainers;
public PlacementSpec(String sourceTag, int numContainers,
PlacementConstraint constraint) {
@@ -47,6 +47,22 @@ public PlacementSpec(String sourceTag, int numContainers,
this.constraint = constraint;
}
+ /**
+ * Get the number of container for this spec.
+ * @return container count
+ */
+ public int getNumContainers() {
+ return numContainers;
+ }
+
+ /**
+ * Set number of containers for this spec.
+ * @param numContainers number of containers.
+ */
+ public void setNumContainers(int numContainers) {
+ this.numContainers = numContainers;
+ }
+
// Placement specification should be of the form:
// PlacementSpec => ""|KeyVal;PlacementSpec
// KeyVal => SourceTag=Constraint
@@ -71,6 +87,7 @@ public PlacementSpec(String sourceTag, int numContainers,
public static Map parse(String specs)
throws IllegalArgumentException {
LOG.info("Parsing Placement Specs: [{}]", specs);
+
Map pSpecs = new HashMap<>();
Map parsed;
try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
index 26c99e31aa9..f51b2f9b168 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
@@ -33,7 +33,6 @@
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
@@ -52,10 +51,14 @@
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.NodeToAttributeValue;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
@@ -900,4 +903,58 @@ public abstract Resource getResourceProfile(String profile)
@Unstable
public abstract List getResourceTypeInfo()
throws YarnException, IOException;
+
+ /**
+ *
+ * The interface used by client to get node attributes in the cluster.
+ *
+ *
+ * @return cluster node attributes collection
+ * @throws YarnException when there is a failure in
+ * {@link ApplicationClientProtocol}
+ * @throws IOException when there is a failure in
+ * {@link ApplicationClientProtocol}
+ */
+ @Public
+ @Unstable
+ public abstract Set getClusterAttributes()
+ throws YarnException, IOException;
+
+ /**
+ *
+ * The interface used by client to get mapping of AttributeKey to associated
+ * NodeToAttributeValue list for specified node attributeKeys in the cluster.
+ *
+ *
+ * @param attributes AttributeKeys for which associated NodeToAttributeValue
+ * mapping value has to be retrieved. If empty or null is set then
+ * will return mapping for all attributeKeys in the cluster
+ * @return mapping of AttributeKey to List of associated
+ * NodeToAttributeValue's.
+ * @throws YarnException
+ * @throws IOException
+ */
+ @Public
+ @Unstable
+ public abstract Map> getAttributesToNodes(
+ Set attributes) throws YarnException, IOException;
+
+ /**
+ *
+ * The interface used by client to get all node to attribute mapping in
+ * existing cluster.
+ *
+ *
+ * @param hostNames HostNames for which host to attributes mapping has to
+ * be retrived.If empty or null is set then will return
+ * all nodes to attributes mapping in cluster.
+ * @return Node to attribute mappings
+ * @throws YarnException
+ * @throws IOException
+ */
+ @Public
+ @Unstable
+ public abstract Map> getNodeToAttributes(
+ Set hostNames) throws YarnException, IOException;
+
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 1ceb46209b1..acfc3ff70be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -22,7 +22,6 @@
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.EnumSet;
-import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
@@ -52,8 +51,10 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
@@ -68,6 +69,7 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
@@ -96,15 +98,18 @@
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.NodeToAttributeValue;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
import org.apache.hadoop.yarn.api.records.Token;
@@ -977,4 +982,28 @@ public Resource getResourceProfile(String profile)
GetAllResourceTypeInfoRequest.newInstance();
return rmClient.getResourceTypeInfo(request).getResourceTypeInfo();
}
+
+ @Override
+ public Set getClusterAttributes()
+ throws YarnException, IOException {
+ GetClusterNodeAttributesRequest request =
+ GetClusterNodeAttributesRequest.newInstance();
+ return rmClient.getClusterNodeAttributes(request).getNodeAttributes();
+ }
+
+ @Override
+ public Map> getAttributesToNodes(
+ Set attributes) throws YarnException, IOException {
+ GetAttributesToNodesRequest request =
+ GetAttributesToNodesRequest.newInstance(attributes);
+ return rmClient.getAttributesToNodes(request).getAttributesToNodes();
+ }
+
+ @Override
+ public Map> getNodeToAttributes(
+ Set hostNames) throws YarnException, IOException {
+ GetNodesToAttributesRequest request =
+ GetNodesToAttributesRequest.newInstance(hostNames);
+ return rmClient.getNodesToAttributes(request).getNodeToAttributes();
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
index a29b0db7362..4d939498453 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -52,6 +53,7 @@
public static final String LIST_LABELS_CMD = "list-node-labels";
public static final String DIRECTLY_ACCESS_NODE_LABEL_STORE =
"directly-access-node-label-store";
+ public static final String LIST_CLUSTER_ATTRIBUTES="list-node-attributes";
public static final String CMD = "cluster";
private boolean accessLocal = false;
static CommonNodeLabelsManager localNodeLabelsManager = null;
@@ -71,6 +73,8 @@ public int run(String[] args) throws Exception {
opts.addOption("lnl", LIST_LABELS_CMD, false,
"List cluster node-label collection");
+ opts.addOption("lna", LIST_CLUSTER_ATTRIBUTES, false,
+ "List cluster node-attribute collection");
opts.addOption("h", HELP_CMD, false, "Displays help for all commands.");
opts.addOption("dnl", DIRECTLY_ACCESS_NODE_LABEL_STORE, false,
"This is DEPRECATED, will be removed in future releases. Directly access node label store, "
@@ -102,6 +106,8 @@ public int run(String[] args) throws Exception {
if (parsedCli.hasOption(LIST_LABELS_CMD)) {
printClusterNodeLabels();
+ } else if(parsedCli.hasOption(LIST_CLUSTER_ATTRIBUTES)){
+ printClusterNodeAttributes();
} else if (parsedCli.hasOption(HELP_CMD)) {
printUsage(opts);
return 0;
@@ -112,6 +118,17 @@ public int run(String[] args) throws Exception {
return 0;
}
+ private void printClusterNodeAttributes() throws IOException, YarnException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ PrintWriter pw = new PrintWriter(
+ new OutputStreamWriter(baos, Charset.forName("UTF-8")));
+ for (NodeAttributeInfo attribute : client.getClusterAttributes()) {
+ pw.println(attribute.toString());
+ }
+ pw.close();
+ sysout.println(baos.toString("UTF-8"));
+ }
+
void printClusterNodeLabels() throws YarnException, IOException {
List nodeLabels = null;
if (accessLocal) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java
new file mode 100644
index 00000000000..13d5e24c1c5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java
@@ -0,0 +1,715 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.cli;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.MissingArgumentException;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionGroup;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.UnrecognizedOptionException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeAttributeType;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
+import org.apache.hadoop.yarn.server.api.protocolrecords.AttributeMappingOperationType;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * CLI to map attributes to Nodes.
+ */
+public class NodeAttributesCLI extends Configured implements Tool {
+
+ protected static final String INVALID_MAPPING_ERR_MSG =
+ "Invalid Node to attribute mapping : ";
+
+ protected static final String USAGE_YARN_NODE_ATTRIBUTES =
+ "Usage: yarn nodeattributes ";
+
+ protected static final String MISSING_ARGUMENT =
+ "Missing argument for command";
+
+ protected static final String NO_MAPPING_ERR_MSG =
+ "No node-to-attributes mappings are specified";
+
+ private static final String DEFAULT_SEPARATOR = System.lineSeparator();
+ public static final String INVALID_COMMAND_USAGE = "Invalid Command Usage : ";
+ /**
+ * Output stream for errors, for use in tests.
+ */
+ private PrintStream errOut = System.err;
+
+ public NodeAttributesCLI() {
+ super();
+ }
+
+ protected void setErrOut(PrintStream errOut) {
+ this.errOut = errOut;
+ }
+
+ protected AdminCommandHandler getAdminCommandHandler() {
+ return new AdminCommandHandler();
+ }
+
+ protected ClientCommandHandler getClientCommandHandler() {
+ return new ClientCommandHandler();
+ }
+
+ void printUsage(String cmd, boolean desc, CommandHandler... handlers)
+ throws UnsupportedEncodingException {
+ StringBuilder usageBuilder = new StringBuilder();
+ usageBuilder.append(USAGE_YARN_NODE_ATTRIBUTES);
+ boolean satisfied = false;
+ for (CommandHandler cmdHandlers : handlers) {
+ satisfied |= cmdHandlers.getHelp(cmd, usageBuilder, desc);
+ }
+ if (!satisfied) {
+ printUsage(desc, handlers);
+ } else {
+ print(usageBuilder);
+ }
+ }
+
+ private void printUsage(boolean desc, CommandHandler... handlers)
+ throws UnsupportedEncodingException {
+ StringBuilder usageBuilder = new StringBuilder();
+ usageBuilder.append(USAGE_YARN_NODE_ATTRIBUTES);
+ for (CommandHandler cmdHandlers : handlers) {
+ cmdHandlers.getHelp(usageBuilder, desc);
+ }
+
+ // append help with usage
+ usageBuilder.append(DEFAULT_SEPARATOR)
+ .append(" -help [cmd] List help of commands");
+ print(usageBuilder);
+ }
+
+ private void print(StringBuilder usageBuilder)
+ throws UnsupportedEncodingException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ PrintWriter pw =
+ new PrintWriter(new OutputStreamWriter(baos, Charset.forName("UTF-8")));
+ pw.write(usageBuilder.toString());
+ pw.close();
+ errOut.println(baos.toString("UTF-8"));
+ }
+
+ private Options buildOptions(CommandHandler... handlers) {
+ Options opts = new Options();
+ for (CommandHandler handler : handlers) {
+ Options handlerOpts = handler.getOptions();
+ handlerOpts.getOptions().iterator()
+ .forEachRemaining(option -> opts.addOption((Option) option));
+ }
+ return opts;
+ }
+
+ public int run(String[] args) throws Exception {
+
+ int exitCode = -1;
+
+ AdminCommandHandler adminCmdHandler = getAdminCommandHandler();
+ ClientCommandHandler clientCmdHandler = getClientCommandHandler();
+
+ // Build options
+ Options opts = buildOptions(adminCmdHandler, clientCmdHandler);
+
+ if (args.length < 1) {
+ printUsage(false, adminCmdHandler, clientCmdHandler);
+ return -1;
+ }
+
+ // Handle command separate
+ if (handleHelpCommand(args, adminCmdHandler, clientCmdHandler)) {
+ return 0;
+ }
+
+ CommandLine cliParser;
+ CommandHandler handler = null;
+ try {
+ cliParser = new GnuParser().parse(opts, args);
+ handler = adminCmdHandler.canHandleCommand(cliParser) ?
+ adminCmdHandler :
+ clientCmdHandler.canHandleCommand(cliParser) ?
+ clientCmdHandler :
+ null;
+ if (handler == null) {
+ errOut.println(INVALID_COMMAND_USAGE);
+ printUsage(false, adminCmdHandler, clientCmdHandler);
+ return exitCode;
+ } else {
+ return handler.handleCommand(cliParser);
+ }
+ } catch (UnrecognizedOptionException e) {
+ errOut.println(INVALID_COMMAND_USAGE);
+ printUsage(false, adminCmdHandler, clientCmdHandler);
+ return exitCode;
+ } catch (MissingArgumentException ex) {
+ errOut.println(MISSING_ARGUMENT);
+ printUsage(true, adminCmdHandler, clientCmdHandler);
+ return exitCode;
+ } catch (IllegalArgumentException arge) {
+ errOut.println(arge.getLocalizedMessage());
+ // print admin command detail
+ printUsage(true, handler);
+ return exitCode;
+ } catch (Exception e) {
+ errOut.println(e.toString());
+ printUsage(true, handler);
+ return exitCode;
+ }
+ }
+
+ private boolean handleHelpCommand(String[] args, CommandHandler... handlers)
+ throws UnsupportedEncodingException {
+ if (args[0].equals("-help")) {
+ if (args.length == 2) {
+ printUsage(args[1], true, handlers);
+ } else {
+ printUsage(true, handlers);
+ }
+ return true;
+ }
+ return false;
+ }
+
+ public static void main(String[] args) throws Exception {
+ int result = ToolRunner.run(new NodeAttributesCLI(), args);
+ System.exit(result);
+ }
+
+ /**
+ * Abstract class for command handler.
+ */
+ public static abstract class CommandHandler extends Configured {
+
+ private Options options;
+
+ private LinkedList order = new LinkedList<>();
+ private String header;
+
+ protected CommandHandler(String header) {
+ this(new YarnConfiguration());
+ this.header = header;
+ }
+
+ protected CommandHandler(Configuration conf) {
+ super(conf);
+ options = buildOptions();
+ }
+
+ public boolean canHandleCommand(CommandLine parse) {
+ ArrayList