diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index 9b7d7ba5d1a..e955979acde 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -574,8 +574,11 @@ private int help(String[] argv) {
}
return 0;
}
-
- protected static class UsageInfo {
+
+ /**
+ * UsageInfo class holds args and help details.
+ */
+ public static class UsageInfo {
public final String args;
public final String help;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index ac4b73b2417..2cb37166c9b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -58,10 +58,14 @@
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.NodeToAttributeValue;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
@@ -538,4 +542,22 @@ public Resource getResourceProfile(String profile)
throws YarnException, IOException {
return client.getResourceTypeInfo();
}
+
+ @Override
+ public Set getClusterAttributes()
+ throws YarnException, IOException {
+ return client.getClusterAttributes();
+ }
+
+ @Override
+ public Map> getAttributesToNodes(
+ Set attributes) throws YarnException, IOException {
+ return client.getAttributesToNodes(attributes);
+ }
+
+ @Override
+ public Map> getNodeToAttributes(
+ Set hostNames) throws YarnException, IOException {
+ return client.getNodeToAttributes(hostNames);
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index f97d0a48f72..5972f65e961 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -82,8 +82,12 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
@@ -100,6 +104,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
@@ -521,6 +527,25 @@ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
throws YarnException, IOException {
return null;
}
+
+ @Override
+ public GetAttributesToNodesResponse getAttributesToNodes(
+ GetAttributesToNodesRequest request) throws YarnException, IOException {
+ return null;
+ }
+
+ @Override
+ public GetClusterNodeAttributesResponse getClusterNodeAttributes(
+ GetClusterNodeAttributesRequest request)
+ throws YarnException, IOException {
+ return null;
+ }
+
+ @Override
+ public GetNodesToAttributesResponse getNodesToAttributes(
+ GetNodesToAttributesRequest request) throws YarnException, IOException {
+ return null;
+ }
}
class HistoryService extends AMService implements HSClientProtocol {
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 40df7c5e854..b40dbfcfef2 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -142,6 +142,7 @@
+
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 69946c88bef..2eee3517170 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.sls.nodemanager;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -32,6 +33,7 @@
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
@@ -219,6 +221,11 @@ public Integer getDecommissioningTimeout() {
return null;
}
+ @Override
+ public Set getAllNodeAttributes() {
+ return Collections.emptySet();
+ }
+
@Override
public RMContext getRMContext() {
return null;
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index a96b7901bfc..c73fb15be00 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
@@ -207,6 +208,11 @@ public Integer getDecommissioningTimeout() {
return node.getAllocationTagsWithCount();
}
+ @Override
+ public Set getAllNodeAttributes() {
+ return node.getAllNodeAttributes();
+ }
+
@Override
public RMContext getRMContext() {
return node.getRMContext();
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 69afe6f88a7..8290fcda8d9 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -55,6 +55,7 @@ function hadoop_usage
hadoop_add_subcommand "timelinereader" client "run the timeline reader server"
hadoop_add_subcommand "timelineserver" daemon "run the timeline server"
hadoop_add_subcommand "top" client "view cluster information"
+ hadoop_add_subcommand "nodeattributes" client "node attributes cli client"
hadoop_add_subcommand "version" client "print the version"
hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
}
@@ -186,6 +187,10 @@ ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/lib/*"
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderServer'
;;
+ nodeattributes)
+ HADOOP_SUBCMD_SUPPORTDAEMONIZATION="false"
+ HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.NodeAttributesCLI'
+ ;;
timelineserver)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
index 3c4e4d01002..941a688134f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
@@ -27,8 +27,12 @@
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
@@ -39,6 +43,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
@@ -642,4 +648,53 @@ GetResourceProfileResponse getResourceProfile(
@Unstable
GetAllResourceTypeInfoResponse getResourceTypeInfo(
GetAllResourceTypeInfoRequest request) throws YarnException, IOException;
+
+ /**
+ *
+ * The interface used by client to get attributes to nodes mappings
+ * available in ResourceManager.
+ *
+ *
+ * @param request request to get details of attributes to nodes mapping.
+ * @return Response containing the details of attributes to nodes mappings.
+ * @throws YarnException if any error happens inside YARN
+ * @throws IOException incase of other errors
+ */
+ @Public
+ @Unstable
+ GetAttributesToNodesResponse getAttributesToNodes(
+ GetAttributesToNodesRequest request) throws YarnException, IOException;
+
+ /**
+ *
+ * The interface used by client to get node attributes available in
+ * ResourceManager.
+ *
+ *
+ * @param request request to get node attributes collection of this cluster.
+ * @return Response containing node attributes collection.
+ * @throws YarnException if any error happens inside YARN.
+ * @throws IOException incase of other errors.
+ */
+ @Public
+ @Unstable
+ GetClusterNodeAttributesResponse getClusterNodeAttributes(
+ GetClusterNodeAttributesRequest request)
+ throws YarnException, IOException;
+
+ /**
+ *
+ * The interface used by client to get node to attributes mappings.
+ * in existing cluster.
+ *
+ *
+ * @param request request to get nodes to attributes mapping.
+ * @return nodes to attributes mappings.
+ * @throws YarnException if any error happens inside YARN.
+ * @throws IOException
+ */
+ @Public
+ @Unstable
+ GetNodesToAttributesResponse getNodesToAttributes(
+ GetNodesToAttributesRequest request) throws YarnException, IOException;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesRequest.java
new file mode 100644
index 00000000000..94814e9053e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesRequest.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * The request from clients to get node to attribute value mapping for all or
+ * given set of Node AttributeKey's in the cluster from the
+ * ResourceManager.
+ *
+ *
+ * @see ApplicationClientProtocol#getAttributesToNodes
+ * (GetAttributesToNodesRequest)
+ */
+@Public
+@Evolving
+public abstract class GetAttributesToNodesRequest {
+
+ public static GetAttributesToNodesRequest newInstance() {
+ return Records.newRecord(GetAttributesToNodesRequest.class);
+ }
+
+ public static GetAttributesToNodesRequest newInstance(
+ Set attributes) {
+ GetAttributesToNodesRequest request =
+ Records.newRecord(GetAttributesToNodesRequest.class);
+ request.setNodeAttributes(attributes);
+ return request;
+ }
+
+ /**
+ * Set node attributeKeys for which the mapping of hostname to attribute value
+ * is required.
+ *
+ * @param attributes Set provided.
+ */
+ @Public
+ @Unstable
+ public abstract void setNodeAttributes(Set attributes);
+
+ /**
+ * Get node attributeKeys for which mapping of hostname to attribute value is
+ * required.
+ *
+ * @return Set
+ */
+ @Public
+ @Unstable
+ public abstract Set getNodeAttributes();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesResponse.java
new file mode 100644
index 00000000000..c83785fc6ae
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAttributesToNodesResponse.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeToAttributeValue;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * The response sent by the ResourceManager to a client requesting
+ * node to attribute value mapping for all or given set of Node AttributeKey's.
+ *
+ *
+ * @see ApplicationClientProtocol#getAttributesToNodes
+ * (GetAttributesToNodesRequest)
+ */
+@Public
+@Evolving
+public abstract class GetAttributesToNodesResponse {
+ public static GetAttributesToNodesResponse newInstance(
+ Map> map) {
+ GetAttributesToNodesResponse response =
+ Records.newRecord(GetAttributesToNodesResponse.class);
+ response.setAttributeToNodes(map);
+ return response;
+ }
+
+ @Public
+ @Evolving
+ public abstract void setAttributeToNodes(
+ Map> map);
+
+ /**
+ * Get mapping of NodeAttributeKey to its associated mapping of list of
+ * NodeToAttributeValue associated with attribute.
+ *
+ * @return Map> node attributes
+ * to list of NodeToAttributeValuenode.
+ */
+ @Public
+ @Evolving
+ public abstract Map> getAttributesToNodes();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesRequest.java
new file mode 100644
index 00000000000..ca81f9a0841
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesRequest.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import static org.apache.hadoop.classification.InterfaceAudience.*;
+import static org.apache.hadoop.classification.InterfaceStability.*;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * The request from clients to get node attributes in the cluster from the
+ * ResourceManager.
+ *
+ *
+ * @see ApplicationClientProtocol#getClusterNodeAttributes
+ * (GetClusterNodeAttributesRequest)
+ */
+@Public
+@Evolving
+public abstract class GetClusterNodeAttributesRequest {
+
+ /**
+ * Create new instance of GetClusterNodeAttributesRequest.
+ *
+ * @return GetClusterNodeAttributesRequest is returned.
+ */
+ public static GetClusterNodeAttributesRequest newInstance() {
+ return Records.newRecord(GetClusterNodeAttributesRequest.class);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesResponse.java
new file mode 100644
index 00000000000..b0ccd906a32
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterNodeAttributesResponse.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * The response sent by the ResourceManager to a client requesting
+ * a node attributes in cluster.
+ *
+ *
+ * @see ApplicationClientProtocol#getClusterNodeAttributes
+ * (GetClusterNodeAttributesRequest)
+ */
+@Public
+@Evolving
+public abstract class GetClusterNodeAttributesResponse {
+
+ /**
+ * Create instance of GetClusterNodeAttributesResponse.
+ *
+ * @param attributes
+ * @return GetClusterNodeAttributesResponse.
+ */
+ public static GetClusterNodeAttributesResponse newInstance(
+ Set attributes) {
+ GetClusterNodeAttributesResponse response =
+ Records.newRecord(GetClusterNodeAttributesResponse.class);
+ response.setNodeAttributes(attributes);
+ return response;
+ }
+
+ /**
+ * Set node attributes to the response.
+ *
+ * @param attributes Map of Node attributeKey to Type.
+ */
+ @Public
+ @Unstable
+ public abstract void setNodeAttributes(Set attributes);
+
+ /**
+ * Get node attributes from the response.
+ *
+ * @return Node attributes.
+ */
+ @Public
+ @Unstable
+ public abstract Set getNodeAttributes();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesRequest.java
new file mode 100644
index 00000000000..8e91bcafed0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesRequest.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.util.Set;
+
+/**
+ *
+ * The request from clients to get nodes to attributes mapping
+ * in the cluster from the ResourceManager.
+ *
+ *
+ * @see ApplicationClientProtocol#getNodesToAttributes
+ * (GetNodesToAttributesRequest)
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class GetNodesToAttributesRequest {
+
+ public static GetNodesToAttributesRequest newInstance(Set hostNames) {
+ GetNodesToAttributesRequest request =
+ Records.newRecord(GetNodesToAttributesRequest.class);
+ request.setHostNames(hostNames);
+ return request;
+ }
+
+ /**
+ * Set hostnames for which mapping is required.
+ *
+ * @param hostnames
+ */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public abstract void setHostNames(Set hostnames);
+
+ /**
+ * Get hostnames for which mapping is required.
+ *
+ * @return Set of hostnames.
+ */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public abstract Set getHostNames();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesResponse.java
new file mode 100644
index 00000000000..acc07bb1847
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetNodesToAttributesResponse.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ *
+ * The response sent by the ResourceManager to a client requesting
+ * nodes to attributes mapping.
+ *
+ *
+ * @see ApplicationClientProtocol#getNodesToAttributes
+ * (GetNodesToAttributesRequest)
+ */
+@Public
+@Evolving
+public abstract class GetNodesToAttributesResponse {
+
+ public static GetNodesToAttributesResponse newInstance(
+ Map> map) {
+ GetNodesToAttributesResponse response =
+ Records.newRecord(GetNodesToAttributesResponse.class);
+ response.setNodeToAttributes(map);
+ return response;
+ }
+
+ @Public
+ @Evolving
+ public abstract void setNodeToAttributes(Map> map);
+
+ /**
+ * Get hostnames to NodeAttributes mapping.
+ *
+ * @return Map> host to attributes.
+ */
+ @Public
+ @Evolving
+ public abstract Map> getNodeToAttributes();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java
new file mode 100644
index 00000000000..70649390821
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttribute.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * Node Attribute is a kind of a label which represents one of the
+ * attribute/feature of a Node. Its different from node partition label as
+ * resource guarantees across the queues will not be maintained for these type
+ * of labels.
+ *
+ *
+ * A given Node can be mapped with any kind of attribute, few examples are
+ * HAS_SSD=true, JAVA_VERSION=JDK1.8, OS_TYPE=WINDOWS.
+ *
+ *
+ * Its not compulsory for all the attributes to have value, empty string is the
+ * default value of the NodeAttributeType.STRING
+ *
+ *
+ * Node Attribute Prefix is used as namespace to segregate the attributes.
+ *
+ */
+@Public
+@Unstable
+public abstract class NodeAttribute {
+
+ public static final String PREFIX_DISTRIBUTED = "nm.yarn.io";
+ public static final String PREFIX_CENTRALIZED = "rm.yarn.io";
+
+ public static NodeAttribute newInstance(String attributeName,
+ NodeAttributeType attributeType, String attributeValue) {
+ return newInstance(PREFIX_CENTRALIZED, attributeName, attributeType,
+ attributeValue);
+ }
+
+ public static NodeAttribute newInstance(String attributePrefix,
+ String attributeName, NodeAttributeType attributeType,
+ String attributeValue) {
+ NodeAttribute nodeAttribute = Records.newRecord(NodeAttribute.class);
+ NodeAttributeKey nodeAttributeKey =
+ NodeAttributeKey.newInstance(attributePrefix, attributeName);
+ nodeAttribute.setAttributeKey(nodeAttributeKey);
+ nodeAttribute.setAttributeType(attributeType);
+ nodeAttribute.setAttributeValue(attributeValue);
+ return nodeAttribute;
+ }
+
+ @Public
+ @Unstable
+ public abstract NodeAttributeKey getAttributeKey();
+
+ @Public
+ @Unstable
+ public abstract void setAttributeKey(NodeAttributeKey attributeKey);
+
+ @Public
+ @Unstable
+ public abstract String getAttributeValue();
+
+ @Public
+ @Unstable
+ public abstract void setAttributeValue(String attributeValue);
+
+ @Public
+ @Unstable
+ public abstract NodeAttributeType getAttributeType();
+
+ @Public
+ @Unstable
+ public abstract void setAttributeType(NodeAttributeType attributeType);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeInfo.java
new file mode 100644
index 00000000000..d294333ed1e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeInfo.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * Node Attribute Info describes a NodeAttribute.
+ *
+ */
+@Public
+@Unstable
+public abstract class NodeAttributeInfo {
+
+ public static NodeAttributeInfo newInstance(NodeAttribute nodeAttribute) {
+ return newInstance(nodeAttribute.getAttributeKey(),
+ nodeAttribute.getAttributeType());
+ }
+
+ public static NodeAttributeInfo newInstance(NodeAttributeKey nodeAttributeKey,
+ NodeAttributeType attributeType) {
+ NodeAttributeInfo nodeAttribute =
+ Records.newRecord(NodeAttributeInfo.class);
+ nodeAttribute.setAttributeKey(nodeAttributeKey);
+ nodeAttribute.setAttributeType(attributeType);
+ return nodeAttribute;
+ }
+
+ @Public
+ @Unstable
+ public abstract NodeAttributeKey getAttributeKey();
+
+ @Public
+ @Unstable
+ public abstract void setAttributeKey(NodeAttributeKey attributeKey);
+
+ @Public
+ @Unstable
+ public abstract NodeAttributeType getAttributeType();
+
+ @Public
+ @Unstable
+ public abstract void setAttributeType(NodeAttributeType attributeType);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeKey.java
new file mode 100644
index 00000000000..35ff26f07f1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeKey.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * Node AttributeKey uniquely identifies a given Node Attribute. Node Attribute
+ * is identified based on attribute prefix and name.
+ *
+ *
+ * Node Attribute Prefix is used as namespace to segregate the attributes.
+ *
+ */
+@Public
+@Unstable
+public abstract class NodeAttributeKey {
+
+ public static NodeAttributeKey newInstance(String attributeName) {
+ return newInstance(NodeAttribute.PREFIX_CENTRALIZED, attributeName);
+ }
+
+ public static NodeAttributeKey newInstance(String attributePrefix,
+ String attributeName) {
+ NodeAttributeKey nodeAttributeKey =
+ Records.newRecord(NodeAttributeKey.class);
+ nodeAttributeKey.setAttributePrefix(attributePrefix);
+ nodeAttributeKey.setAttributeName(attributeName);
+ return nodeAttributeKey;
+ }
+
+ @Public
+ @Unstable
+ public abstract String getAttributePrefix();
+
+ @Public
+ @Unstable
+ public abstract void setAttributePrefix(String attributePrefix);
+
+ @Public
+ @Unstable
+ public abstract String getAttributeName();
+
+ @Public
+ @Unstable
+ public abstract void setAttributeName(String attributeName);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeOpCode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeOpCode.java
new file mode 100644
index 00000000000..76db063eed5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeOpCode.java
@@ -0,0 +1,43 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+
+/**
+ * Enumeration of various node attribute op codes.
+ */
+@Public
+@Evolving
+public enum NodeAttributeOpCode {
+ /**
+ * Default as No OP.
+ */
+ NO_OP,
+ /**
+ * EQUALS op code for Attribute.
+ */
+ EQ,
+
+ /**
+ * NOT EQUALS op code for Attribute.
+ */
+ NE
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeType.java
new file mode 100644
index 00000000000..3f281c81b19
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeAttributeType.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ *
+ * Type of a node Attribute.
+ *
+ * Based on this attribute expressions and values will be evaluated.
+ */
+@Public
+@Unstable
+public enum NodeAttributeType {
+ /** string type node attribute. */
+ STRING
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
index 3a80641bb6d..625ad234081 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
@@ -258,4 +258,17 @@ public NodeUpdateType getNodeUpdateType() {
* Set the node update type (null indicates absent node update type).
* */
public void setNodeUpdateType(NodeUpdateType nodeUpdateType) {}
+
+ /**
+ * Set the node attributes of node.
+ *
+ * @param nodeAttributes set of node attributes.
+ */
+ public abstract void setNodeAttributes(Set nodeAttributes);
+
+ /**
+ * Get node attributes of node.
+ * @return the set of node attributes.
+ */
+ public abstract Set getNodeAttributes();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeToAttributeValue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeToAttributeValue.java
new file mode 100644
index 00000000000..0bcb8b68b41
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeToAttributeValue.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ *
+ * Mapping of Attribute Value to a Node.
+ *
+ */
+@Public
+@Unstable
+public abstract class NodeToAttributeValue {
+ public static NodeToAttributeValue newInstance(String hostname,
+ String attributeValue) {
+ NodeToAttributeValue nodeToAttributeValue =
+ Records.newRecord(NodeToAttributeValue.class);
+ nodeToAttributeValue.setAttributeValue(attributeValue);
+ nodeToAttributeValue.setHostname(hostname);
+ return nodeToAttributeValue;
+ }
+
+ @Public
+ @Unstable
+ public abstract String getAttributeValue();
+
+ @Public
+ @Unstable
+ public abstract void setAttributeValue(String attributeValue);
+
+ @Public
+ @Unstable
+ public abstract String getHostname();
+
+ @Public
+ @Unstable
+ public abstract void setHostname(String hostname);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
index 0fe8273e6d7..79196fbf851 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
/**
* {@code PlacementConstraint} represents a placement constraint for a resource
@@ -155,13 +156,22 @@ public String toString() {
private int minCardinality;
private int maxCardinality;
private Set targetExpressions;
+ private NodeAttributeOpCode attributeOpCode;
public SingleConstraint(String scope, int minCardinality,
- int maxCardinality, Set targetExpressions) {
+ int maxCardinality, NodeAttributeOpCode opCode,
+ Set targetExpressions) {
this.scope = scope;
this.minCardinality = minCardinality;
this.maxCardinality = maxCardinality;
this.targetExpressions = targetExpressions;
+ this.attributeOpCode = opCode;
+ }
+
+ public SingleConstraint(String scope, int minCardinality,
+ int maxCardinality, Set targetExpressions) {
+ this(scope, minCardinality, maxCardinality, NodeAttributeOpCode.NO_OP,
+ targetExpressions);
}
public SingleConstraint(String scope, int minC, int maxC,
@@ -169,6 +179,13 @@ public SingleConstraint(String scope, int minC, int maxC,
this(scope, minC, maxC, new HashSet<>(Arrays.asList(targetExpressions)));
}
+ public SingleConstraint(String scope, int minC, int maxC,
+ NodeAttributeOpCode opCode,
+ TargetExpression... targetExpressions) {
+ this(scope, minC, maxC, opCode,
+ new HashSet<>(Arrays.asList(targetExpressions)));
+ }
+
/**
* Get the scope of the constraint.
*
@@ -205,6 +222,15 @@ public int getMaxCardinality() {
return targetExpressions;
}
+ /**
+ * Get the NodeAttributeOpCode of the constraint.
+ *
+ * @return nodeAttribute Op Code
+ */
+ public NodeAttributeOpCode getNodeAttributeOpCode() {
+ return attributeOpCode;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) {
@@ -225,6 +251,10 @@ public boolean equals(Object o) {
if (!getScope().equals(that.getScope())) {
return false;
}
+ if (getNodeAttributeOpCode() != null && !getNodeAttributeOpCode()
+ .equals(that.getNodeAttributeOpCode())) {
+ return false;
+ }
return getTargetExpressions().equals(that.getTargetExpressions());
}
@@ -233,6 +263,7 @@ public int hashCode() {
int result = getScope().hashCode();
result = 31 * result + getMinCardinality();
result = 31 * result + getMaxCardinality();
+ result = 31 * result + getNodeAttributeOpCode().hashCode();
result = 31 * result + getTargetExpressions().hashCode();
return result;
}
@@ -259,6 +290,13 @@ public String toString() {
.append(getScope()).append(",")
.append(targetExpr)
.toString());
+ } else if (min == -1 && max == -1) {
+ // node attribute
+ targetConstraints.add(new StringBuilder()
+ .append(getScope()).append(",")
+ .append(getNodeAttributeOpCode()).append(",")
+ .append(targetExpr)
+ .toString());
} else {
// cardinality
targetConstraints.add(new StringBuilder()
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
index d22a6bd90c0..73fa328833f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.AllocationTagNamespaceType;
+import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr;
@@ -85,6 +86,24 @@ public static AbstractConstraint targetNotIn(String scope,
return new SingleConstraint(scope, 0, 0, targetExpressions);
}
+ /**
+ * Creates a constraint that requires allocations to be placed on nodes that
+ * belong to a scope (e.g., node or rack) that satisfy any of the
+ * target expressions based on node attribute op code.
+ *
+ * @param scope the scope within which the target expressions should not be
+ * true
+ * @param opCode Node Attribute code which could be equals, not equals.
+ * @param targetExpressions the expressions that need to not be true within
+ * the scope
+ * @return the resulting placement constraint
+ */
+ public static AbstractConstraint targetNodeAttribute(String scope,
+ NodeAttributeOpCode opCode,
+ TargetExpression... targetExpressions) {
+ return new SingleConstraint(scope, -1, -1, opCode, targetExpressions);
+ }
+
/**
* Creates a constraint that restricts the number of allocations within a
* given scope (e.g., node or rack).
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 148edb9f26c..dd804a30937 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3463,6 +3463,22 @@ public static boolean isAclEnabled(Configuration conf) {
public static final String FS_NODE_LABELS_STORE_ROOT_DIR = NODE_LABELS_PREFIX
+ "fs-store.root-dir";
+ /**
+ * Node-attribute configurations.
+ */
+ public static final String NODE_ATTRIBUTE_PREFIX =
+ YARN_PREFIX + "node-attribute.";
+ /**
+ * Node attribute store implementation class.
+ */
+ public static final String FS_NODE_ATTRIBUTE_STORE_IMPL_CLASS =
+ NODE_ATTRIBUTE_PREFIX + "fs-store.impl.class";
+ /**
+ * File system node attribute store directory.
+ */
+ public static final String FS_NODE_ATTRIBUTE_STORE_ROOT_DIR =
+ NODE_ATTRIBUTE_PREFIX + "fs-store.root-dir";
+
/**
* Flag to indicate if the node labels feature enabled, by default it's
* disabled
@@ -3525,16 +3541,25 @@ public static boolean areNodeLabelsEnabled(
private static final String NM_NODE_LABELS_PREFIX = NM_PREFIX
+ "node-labels.";
+ private static final String NM_NODE_ATTRIBUTES_PREFIX = NM_PREFIX
+ + "node-attributes.";
+
public static final String NM_NODE_LABELS_PROVIDER_CONFIG =
NM_NODE_LABELS_PREFIX + "provider";
+ public static final String NM_NODE_ATTRIBUTES_PROVIDER_CONFIG =
+ NM_NODE_ATTRIBUTES_PREFIX + "provider";
+
// whitelist names for the yarn.nodemanager.node-labels.provider
- public static final String CONFIG_NODE_LABELS_PROVIDER = "config";
- public static final String SCRIPT_NODE_LABELS_PROVIDER = "script";
+ public static final String CONFIG_NODE_DESCRIPTOR_PROVIDER = "config";
+ public static final String SCRIPT_NODE_DESCRIPTOR_PROVIDER = "script";
private static final String NM_NODE_LABELS_PROVIDER_PREFIX =
NM_NODE_LABELS_PREFIX + "provider.";
+ private static final String NM_NODE_ATTRIBUTES_PROVIDER_PREFIX =
+ NM_NODE_ATTRIBUTES_PREFIX + "provider.";
+
public static final String NM_NODE_LABELS_RESYNC_INTERVAL =
NM_NODE_LABELS_PREFIX + "resync-interval-ms";
@@ -3559,6 +3584,9 @@ public static boolean areNodeLabelsEnabled(
public static final String NM_PROVIDER_CONFIGURED_NODE_PARTITION =
NM_NODE_LABELS_PROVIDER_PREFIX + "configured-node-partition";
+ public static final String NM_PROVIDER_CONFIGURED_NODE_ATTRIBUTES =
+ NM_NODE_ATTRIBUTES_PROVIDER_PREFIX + "configured-node-attributes";
+
private static final String RM_NODE_LABELS_PREFIX = RM_PREFIX
+ "node-labels.";
@@ -3606,6 +3634,33 @@ public static boolean areNodeLabelsEnabled(
NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_PREFIX + "opts";
/**
+ * Node attribute provider fetch attributes interval and timeout.
+ */
+ public static final String NM_NODE_ATTRIBUTES_PROVIDER_FETCH_INTERVAL_MS =
+ NM_NODE_ATTRIBUTES_PROVIDER_PREFIX + "fetch-interval-ms";
+
+ public static final long
+ DEFAULT_NM_NODE_ATTRIBUTES_PROVIDER_FETCH_INTERVAL_MS = 10 * 60 * 1000;
+
+ public static final String NM_NODE_ATTRIBUTES_PROVIDER_FETCH_TIMEOUT_MS =
+ NM_NODE_ATTRIBUTES_PROVIDER_PREFIX + "fetch-timeout-ms";
+
+ public static final long DEFAULT_NM_NODE_ATTRIBUTES_PROVIDER_FETCH_TIMEOUT_MS
+ = DEFAULT_NM_NODE_ATTRIBUTES_PROVIDER_FETCH_INTERVAL_MS * 2;
+
+ /**
+ * Script to collect node attributes.
+ */
+ private static final String NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_PREFIX =
+ NM_NODE_ATTRIBUTES_PROVIDER_PREFIX + "script.";
+
+ public static final String NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_PATH =
+ NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_PREFIX + "path";
+
+ public static final String NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_OPTS =
+ NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_PREFIX + "opts";
+
+ /*
* Support to view apps for given user in secure cluster.
* @deprecated This field is deprecated for {@link #FILTER_ENTITY_LIST_BY_USER}
*/
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
index 852334245ce..4777cf8b62a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
@@ -30,6 +30,8 @@
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
@@ -37,6 +39,8 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse;
@@ -52,8 +56,6 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
-import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
-import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
@Private
public interface ResourceManagerAdministrationProtocol extends GetUserMappingsProtocol {
@@ -144,4 +146,11 @@ public CheckForDecommissioningNodesResponse checkForDecommissioningNodes(
public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
RefreshClusterMaxPriorityRequest request) throws YarnException,
IOException;
+
+
+ @Private
+ @Idempotent
+ NodesToAttributesMappingResponse mapAttributesToNodes(
+ NodesToAttributesMappingRequest request) throws YarnException,
+ IOException;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AttributeMappingOperationType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AttributeMappingOperationType.java
new file mode 100644
index 00000000000..5de15040503
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AttributeMappingOperationType.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ *
+ * Type of node to attribute mapping operation.
+ *
+ *
+ */
+@Public
+@Unstable
+public enum AttributeMappingOperationType {
+ /** Replaces the existing node to attribute mapping with new mapping.*/
+ REPLACE,
+
+ /** Add attribute(s) to a node and if it already exists will update the
+ * value.*/
+ ADD,
+
+ /** Removes attribute(s) mapped to a node. */
+ REMOVE
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeToAttributes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeToAttributes.java
new file mode 100644
index 00000000000..b2e38b4490f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeToAttributes.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * Represents a mapping of Node id to list of attributes.
+ */
+@Public
+@Unstable
+public abstract class NodeToAttributes {
+
+ public static NodeToAttributes newInstance(String node,
+ List attributes) {
+ NodeToAttributes nodeIdToAttributes =
+ Records.newRecord(NodeToAttributes.class);
+ nodeIdToAttributes.setNode(node);
+ nodeIdToAttributes.setNodeAttributes(attributes);
+ return nodeIdToAttributes;
+ }
+
+ @Public
+ @Unstable
+ public abstract String getNode();
+
+ @Public
+ @Unstable
+ public abstract void setNode(String node);
+
+ @Public
+ @Unstable
+ public abstract List getNodeAttributes();
+
+ @Public
+ @Unstable
+ public abstract void setNodeAttributes(List attributes);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodesToAttributesMappingRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodesToAttributesMappingRequest.java
new file mode 100644
index 00000000000..71421ed6665
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodesToAttributesMappingRequest.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * list of node-attribute mapping request info.
+ */
+@Public
+@Unstable
+public abstract class NodesToAttributesMappingRequest {
+
+ public static NodesToAttributesMappingRequest newInstance(
+ AttributeMappingOperationType operation,
+ List nodesToAttributes, boolean failOnUnknownNodes) {
+ NodesToAttributesMappingRequest request =
+ Records.newRecord(NodesToAttributesMappingRequest.class);
+ request.setNodesToAttributes(nodesToAttributes);
+ request.setFailOnUnknownNodes(failOnUnknownNodes);
+ request.setOperation(operation);
+ return request;
+ }
+
+ @Public
+ @Unstable
+ public abstract void setNodesToAttributes(
+ List nodesToAttributes);
+
+ @Public
+ @Unstable
+ public abstract List getNodesToAttributes();
+
+ @Public
+ @Unstable
+ public abstract void setFailOnUnknownNodes(boolean failOnUnknownNodes);
+
+ @Public
+ @Unstable
+ public abstract boolean getFailOnUnknownNodes();
+
+ @Public
+ @Unstable
+ public abstract void setOperation(AttributeMappingOperationType operation);
+
+ @Public
+ @Unstable
+ public abstract AttributeMappingOperationType getOperation();
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodesToAttributesMappingResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodesToAttributesMappingResponse.java
new file mode 100644
index 00000000000..10081e1f928
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodesToAttributesMappingResponse.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * NodesToAttributesMappingResponse holds response object for attribute
+ * mapping.
+ */
+public class NodesToAttributesMappingResponse {
+ public static NodesToAttributesMappingResponse newInstance() {
+ return Records.newRecord(NodesToAttributesMappingResponse.class);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
index 2926c9d1de8..93fd706b0c3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/constraint/PlacementConstraintParser.java
@@ -19,6 +19,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
@@ -44,11 +45,12 @@
@InterfaceStability.Unstable
public final class PlacementConstraintParser {
+ public static final char EXPRESSION_VAL_DELIM = ',';
private static final char EXPRESSION_DELIM = ':';
private static final char KV_SPLIT_DELIM = '=';
- private static final char EXPRESSION_VAL_DELIM = ',';
private static final char BRACKET_START = '(';
private static final char BRACKET_END = ')';
+ private static final String KV_NE_DELIM = "!=";
private static final String IN = "in";
private static final String NOT_IN = "notin";
private static final String AND = "and";
@@ -349,6 +351,91 @@ public String nextElement() {
}
}
+ /**
+ * Constraint parser used to parse a given target expression.
+ */
+ public static class NodeConstraintParser extends ConstraintParser {
+
+ public NodeConstraintParser(String expression) {
+ super(new BaseStringTokenizer(expression,
+ String.valueOf(EXPRESSION_VAL_DELIM)));
+ }
+
+ @Override
+ public AbstractConstraint parse()
+ throws PlacementConstraintParseException {
+ PlacementConstraint.AbstractConstraint placementConstraints = null;
+ String attributeName = "";
+ NodeAttributeOpCode opCode = NodeAttributeOpCode.EQ;
+ String scope = SCOPE_NODE;
+
+ Set constraintEntities = new TreeSet<>();
+ while (hasMoreTokens()) {
+ String currentTag = nextToken();
+ StringTokenizer attributeKV = getAttributeOpCodeTokenizer(currentTag);
+
+ // Usually there will be only one k=v pair. However in case when
+ // multiple values are present for same attribute, it will also be
+ // coming as next token. for example, java=1.8,1.9 or python!=2.
+ if (attributeKV.countTokens() > 1) {
+ opCode = getAttributeOpCode(currentTag);
+ attributeName = attributeKV.nextToken();
+ currentTag = attributeKV.nextToken();
+ }
+ constraintEntities.add(currentTag);
+ }
+
+ if(attributeName.isEmpty()) {
+ throw new PlacementConstraintParseException(
+ "expecting valid expression like k=v or k!=v, but get "
+ + constraintEntities);
+ }
+
+ PlacementConstraint.TargetExpression target = null;
+ if (!constraintEntities.isEmpty()) {
+ target = PlacementConstraints.PlacementTargets
+ .nodeAttribute(attributeName,
+ constraintEntities
+ .toArray(new String[constraintEntities.size()]));
+ }
+
+ placementConstraints = PlacementConstraints
+ .targetNodeAttribute(scope, opCode, target);
+ return placementConstraints;
+ }
+
+ private StringTokenizer getAttributeOpCodeTokenizer(String currentTag) {
+ StringTokenizer attributeKV = new StringTokenizer(currentTag,
+ KV_NE_DELIM);
+
+ // Try with '!=' delim as well.
+ if (attributeKV.countTokens() < 2) {
+ attributeKV = new StringTokenizer(currentTag,
+ String.valueOf(KV_SPLIT_DELIM));
+ }
+ return attributeKV;
+ }
+
+ /**
+ * Below conditions are validated.
+ * java=8 : OpCode = EQUALS
+ * java!=8 : OpCode = NEQUALS
+ * @param currentTag tag
+ * @return Attribute op code.
+ */
+ private NodeAttributeOpCode getAttributeOpCode(String currentTag)
+ throws PlacementConstraintParseException {
+ if (currentTag.contains(KV_NE_DELIM)) {
+ return NodeAttributeOpCode.NE;
+ } else if (currentTag.contains(String.valueOf(KV_SPLIT_DELIM))) {
+ return NodeAttributeOpCode.EQ;
+ }
+ throw new PlacementConstraintParseException(
+ "expecting valid expression like k=v or k!=v, but get "
+ + currentTag);
+ }
+ }
+
/**
* Constraint parser used to parse a given target expression, such as
* "NOTIN, NODE, foo, bar".
@@ -363,20 +450,23 @@ public TargetConstraintParser(String expression) {
@Override
public AbstractConstraint parse()
throws PlacementConstraintParseException {
- PlacementConstraint.AbstractConstraint placementConstraints;
+ PlacementConstraint.AbstractConstraint placementConstraints = null;
String op = nextToken();
if (op.equalsIgnoreCase(IN) || op.equalsIgnoreCase(NOT_IN)) {
String scope = nextToken();
scope = parseScope(scope);
- Set allocationTags = new TreeSet<>();
+ Set constraintEntities = new TreeSet<>();
while(hasMoreTokens()) {
String tag = nextToken();
- allocationTags.add(tag);
+ constraintEntities.add(tag);
+ }
+ PlacementConstraint.TargetExpression target = null;
+ if(!constraintEntities.isEmpty()) {
+ target = PlacementConstraints.PlacementTargets.allocationTag(
+ constraintEntities
+ .toArray(new String[constraintEntities.size()]));
}
- PlacementConstraint.TargetExpression target =
- PlacementConstraints.PlacementTargets.allocationTag(
- allocationTags.toArray(new String[allocationTags.size()]));
if (op.equalsIgnoreCase(IN)) {
placementConstraints = PlacementConstraints
.targetIn(scope, target);
@@ -550,6 +640,11 @@ public static AbstractConstraint parseExpression(String constraintStr)
new ConjunctionConstraintParser(constraintStr);
constraintOptional = Optional.ofNullable(jp.tryParse());
}
+ if (!constraintOptional.isPresent()) {
+ NodeConstraintParser np =
+ new NodeConstraintParser(constraintStr);
+ constraintOptional = Optional.ofNullable(np.tryParse());
+ }
if (!constraintOptional.isPresent()) {
throw new PlacementConstraintParseException(
"Invalid constraint expression " + constraintStr);
@@ -584,12 +679,13 @@ public static AbstractConstraint parseExpression(String constraintStr)
*/
public static Map parsePlacementSpec(
String expression) throws PlacementConstraintParseException {
+ // Continue handling for application tag based constraint otherwise.
// Respect insertion order.
Map result = new LinkedHashMap<>();
PlacementConstraintParser.ConstraintTokenizer tokenizer =
new PlacementConstraintParser.MultipleConstraintsTokenizer(expression);
tokenizer.validate();
- while(tokenizer.hasMoreElements()) {
+ while (tokenizer.hasMoreElements()) {
String specStr = tokenizer.nextElement();
// each spec starts with sourceAllocationTag=numOfContainers and
// followed by a constraint expression.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
index 81adef19335..fdd4bc5aca8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
@@ -64,4 +64,7 @@ service ApplicationClientProtocolService {
rpc getResourceProfiles(GetAllResourceProfilesRequestProto) returns (GetAllResourceProfilesResponseProto);
rpc getResourceProfile(GetResourceProfileRequestProto) returns (GetResourceProfileResponseProto);
rpc getResourceTypeInfo(GetAllResourceTypeInfoRequestProto) returns (GetAllResourceTypeInfoResponseProto);
+ rpc getClusterNodeAttributes (GetClusterNodeAttributesRequestProto) returns (GetClusterNodeAttributesResponseProto);
+ rpc getAttributesToNodes (GetAttributesToNodesRequestProto) returns (GetAttributesToNodesResponseProto);
+ rpc getNodesToAttributes (GetNodesToAttributesRequestProto) returns (GetNodesToAttributesResponseProto);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
index 113462305cd..032aa8e67b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
@@ -45,4 +45,5 @@ service ResourceManagerAdministrationProtocolService {
rpc replaceLabelsOnNodes(ReplaceLabelsOnNodeRequestProto) returns (ReplaceLabelsOnNodeResponseProto);
rpc checkForDecommissioningNodes(CheckForDecommissioningNodesRequestProto) returns (CheckForDecommissioningNodesResponseProto);
rpc refreshClusterMaxPriority(RefreshClusterMaxPriorityRequestProto) returns (RefreshClusterMaxPriorityResponseProto);
+ rpc mapAttributesToNodes(NodesToAttributesMappingRequestProto) returns (NodesToAttributesMappingResponseProto);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
index e8c92d962f3..d37e36a1878 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
@@ -130,6 +130,22 @@ enum DecommissionTypeProto {
GRACEFUL = 2;
FORCEFUL = 3;
}
+
+
+enum AttributeMappingOperationTypeProto {
+ REPLACE = 1;
+ ADD = 2;
+ REMOVE = 3;
+}
+
+message NodesToAttributesMappingRequestProto {
+ optional AttributeMappingOperationTypeProto operation = 1 [default = REPLACE];
+ repeated NodeToAttributesProto nodeToAttributes = 2;
+ optional bool failOnUnknownNodes = 3;
+}
+
+message NodesToAttributesMappingResponseProto {
+}
//////////////////////////////////////////////////////////////////
///////////// RM Failover related records ////////////////////////
//////////////////////////////////////////////////////////////////
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index d6138e865ff..5fe2cc94550 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -355,6 +355,7 @@ message NodeReportProto {
optional ResourceUtilizationProto node_utilization = 12;
optional uint32 decommissioning_timeout = 13;
optional NodeUpdateTypeProto node_update_type = 14;
+ repeated NodeAttributeProto node_attributes = 15;
}
message NodeIdToLabelsProto {
@@ -372,6 +373,42 @@ message NodeLabelProto {
optional bool isExclusive = 2 [default = true];
}
+enum NodeAttributeTypeProto {
+ STRING = 1;
+}
+
+message NodeAttributeKeyProto {
+ optional string attributePrefix = 1 [default="rm.yarn.io"];
+ required string attributeName = 2;
+}
+
+message NodeAttributeProto {
+ required NodeAttributeKeyProto attributeKey = 1;
+ optional NodeAttributeTypeProto attributeType = 2 [default = STRING];
+ optional string attributeValue = 3 [default=""];
+}
+
+
+message NodeAttributeInfoProto {
+ required NodeAttributeKeyProto attributeKey = 1;
+ required NodeAttributeTypeProto attributeType = 2;
+}
+
+message NodeToAttributeValueProto {
+ required string hostname = 1;
+ required string attributeValue = 2;
+}
+
+message AttributeToNodesProto {
+ required NodeAttributeKeyProto nodeAttribute = 1;
+ repeated NodeToAttributeValueProto nodeValueMap = 2;
+}
+
+message NodeToAttributesProto {
+ optional string node = 1;
+ repeated NodeAttributeProto nodeAttributes = 2;
+}
+
enum ContainerTypeProto {
APPLICATION_MASTER = 1;
TASK = 2;
@@ -609,11 +646,18 @@ message PlacementConstraintProto {
optional CompositePlacementConstraintProto compositeConstraint = 2;
}
+enum NodeAttributeOpCodeProto {
+ NO_OP = 1;
+ EQ = 2;
+ NE = 3;
+}
+
message SimplePlacementConstraintProto {
required string scope = 1;
repeated PlacementConstraintTargetProto targetExpressions = 2;
optional int32 minCardinality = 3;
optional int32 maxCardinality = 4;
+ optional NodeAttributeOpCodeProto attributeOpCode = 5;
}
message PlacementConstraintTargetProto {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index acd452dc79f..248f775bdeb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -260,6 +260,29 @@ message GetClusterNodeLabelsResponseProto {
repeated NodeLabelProto nodeLabels = 2;
}
+message GetClusterNodeAttributesRequestProto {
+}
+
+message GetClusterNodeAttributesResponseProto {
+ repeated NodeAttributeInfoProto nodeAttributes = 1;
+}
+
+message GetAttributesToNodesRequestProto {
+ repeated NodeAttributeKeyProto nodeAttributes = 1;
+}
+
+message GetAttributesToNodesResponseProto {
+ repeated AttributeToNodesProto attributesToNodes = 1;
+}
+
+message GetNodesToAttributesRequestProto {
+ repeated string hostnames = 1;
+}
+
+message GetNodesToAttributesResponseProto {
+ repeated NodeToAttributesProto nodesToAttributes = 1;
+}
+
message UpdateApplicationPriorityRequestProto {
required ApplicationIdProto applicationId = 1;
required PriorityProto applicationPriority = 2;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java
index a69571c5c80..9806ba4ac96 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/api/resource/TestPlacementConstraintParser.java
@@ -22,6 +22,8 @@
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
+
+import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.And;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or;
@@ -38,8 +40,14 @@
import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.SourceTagsTokenizer;
import org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser.ConstraintTokenizer;
-import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.*;
import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets.allocationTag;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.and;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.cardinality;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.or;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.PlacementTargets;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetIn;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetNodeAttribute;
+import static org.apache.hadoop.yarn.api.resource.PlacementConstraints.targetNotIn;
import org.junit.Assert;
import org.junit.Test;
@@ -443,4 +451,55 @@ private void verifyConstraintToString(String inputExpr,
+ constrainExpr + ", caused by: " + e.getMessage());
}
}
+
+ @Test
+ public void testParseNodeAttributeSpec()
+ throws PlacementConstraintParseException {
+ Map result;
+ PlacementConstraint.AbstractConstraint expectedPc1, expectedPc2;
+ PlacementConstraint actualPc1, actualPc2;
+
+ // A single node attribute constraint
+ result = PlacementConstraintParser
+ .parsePlacementSpec("xyz=4,rm.yarn.io/foo=true");
+ Assert.assertEquals(1, result.size());
+ TargetExpression target = PlacementTargets
+ .nodeAttribute("rm.yarn.io/foo", "true");
+ expectedPc1 = targetNodeAttribute("node", NodeAttributeOpCode.EQ, target);
+
+ actualPc1 = result.values().iterator().next();
+ Assert.assertEquals(expectedPc1, actualPc1.getConstraintExpr());
+
+ // A single node attribute constraint
+ result = PlacementConstraintParser
+ .parsePlacementSpec("xyz=3,rm.yarn.io/foo!=abc");
+ Assert.assertEquals(1, result.size());
+ target = PlacementTargets
+ .nodeAttribute("rm.yarn.io/foo", "abc");
+ expectedPc1 = targetNodeAttribute("node", NodeAttributeOpCode.NE, target);
+
+ actualPc1 = result.values().iterator().next();
+ Assert.assertEquals(expectedPc1, actualPc1.getConstraintExpr());
+
+ actualPc1 = result.values().iterator().next();
+ Assert.assertEquals(expectedPc1, actualPc1.getConstraintExpr());
+
+ // A single node attribute constraint
+ result = PlacementConstraintParser
+ .parsePlacementSpec(
+ "xyz=1,rm.yarn.io/foo!=abc:zxy=1,rm.yarn.io/bar=true");
+ Assert.assertEquals(2, result.size());
+ target = PlacementTargets
+ .nodeAttribute("rm.yarn.io/foo", "abc");
+ expectedPc1 = targetNodeAttribute("node", NodeAttributeOpCode.NE, target);
+ target = PlacementTargets
+ .nodeAttribute("rm.yarn.io/bar", "true");
+ expectedPc2 = targetNodeAttribute("node", NodeAttributeOpCode.EQ, target);
+
+ Iterator valueIt = result.values().iterator();
+ actualPc1 = valueIt.next();
+ actualPc2 = valueIt.next();
+ Assert.assertEquals(expectedPc1, actualPc1.getConstraintExpr());
+ Assert.assertEquals(expectedPc2, actualPc2.getConstraintExpr());
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 76fa38f922a..f3693097515 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -523,9 +523,13 @@ public boolean init(String[] args) throws ParseException, IOException {
if (cliParser.hasOption("placement_spec")) {
String placementSpec = cliParser.getOptionValue("placement_spec");
- LOG.info("Placement Spec received [{}]", placementSpec);
- parsePlacementSpecs(placementSpec);
+ String decodedSpec = getDecodedPlacementSpec(placementSpec);
+ LOG.info("Placement Spec received [{}]", decodedSpec);
+
+ this.numTotalContainers = 0;
+ parsePlacementSpecs(decodedSpec);
LOG.info("Total num containers requested [{}]", numTotalContainers);
+
if (numTotalContainers == 0) {
throw new IllegalArgumentException(
"Cannot run distributed shell with no containers");
@@ -694,23 +698,25 @@ public boolean init(String[] args) throws ParseException, IOException {
return true;
}
- private void parsePlacementSpecs(String placementSpecifications) {
- // Client sends placement spec in encoded format
- Base64.Decoder decoder = Base64.getDecoder();
- byte[] decodedBytes = decoder.decode(
- placementSpecifications.getBytes(StandardCharsets.UTF_8));
- String decodedSpec = new String(decodedBytes, StandardCharsets.UTF_8);
- LOG.info("Decode placement spec: " + decodedSpec);
+ private void parsePlacementSpecs(String decodedSpec) {
Map pSpecs =
PlacementSpec.parse(decodedSpec);
this.placementSpecs = new HashMap<>();
- this.numTotalContainers = 0;
for (PlacementSpec pSpec : pSpecs.values()) {
- this.numTotalContainers += pSpec.numContainers;
+ this.numTotalContainers += pSpec.getNumContainers();
this.placementSpecs.put(pSpec.sourceTag, pSpec);
}
}
+ private String getDecodedPlacementSpec(String placementSpecifications) {
+ Base64.Decoder decoder = Base64.getDecoder();
+ byte[] decodedBytes = decoder.decode(
+ placementSpecifications.getBytes(StandardCharsets.UTF_8));
+ String decodedSpec = new String(decodedBytes, StandardCharsets.UTF_8);
+ LOG.info("Decode placement spec: " + decodedSpec);
+ return decodedSpec;
+ }
+
/**
* Helper function to print usage
*
@@ -798,6 +804,7 @@ public void run() throws YarnException, IOException, InterruptedException {
}
}
}
+
RegisterApplicationMasterResponse response = amRMClient
.registerApplicationMaster(appMasterHostname, appMasterRpcPort,
appMasterTrackingUrl, placementConstraintMap);
@@ -845,14 +852,18 @@ public void run() throws YarnException, IOException, InterruptedException {
// Keep looping until all the containers are launched and shell script
// executed on them ( regardless of success/failure).
if (this.placementSpecs == null) {
+ LOG.info("placementSpecs null");
for (int i = 0; i < numTotalContainersToRequest; ++i) {
ContainerRequest containerAsk = setupContainerAskForRM();
amRMClient.addContainerRequest(containerAsk);
}
} else {
+ LOG.info("placementSpecs to create req:" + placementSpecs);
List schedReqs = new ArrayList<>();
for (PlacementSpec pSpec : this.placementSpecs.values()) {
- for (int i = 0; i < pSpec.numContainers; i++) {
+ LOG.info("placementSpec :" + pSpec + ", container:" + pSpec
+ .getNumContainers());
+ for (int i = 0; i < pSpec.getNumContainers(); i++) {
SchedulingRequest sr = setupSchedulingRequest(pSpec);
schedReqs.add(sr);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index c8a71b320c0..446b6088b0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -103,7 +103,7 @@
* the provided shell command on a set of containers.
*
* This client is meant to act as an example on how to write yarn-based applications.
- *
+ *
* To submit an application, a client first needs to connect to the ResourceManager
* aka ApplicationsManager or ASM via the {@link ApplicationClientProtocol}. The {@link ApplicationClientProtocol}
* provides a way for the client to get access to cluster information and to request for a
@@ -192,6 +192,8 @@
// Placement specification
private String placementSpec = "";
+ // Node Attribute specification
+ private String nodeAttributeSpec = "";
// log4j.properties file
// if available, add to local resources and set into classpath
private String log4jPropFile = "";
@@ -448,6 +450,7 @@ public boolean init(String[] args) throws ParseException {
// Check if it is parsable
PlacementSpec.parse(this.placementSpec);
}
+
appName = cliParser.getOptionValue("appname", "DistributedShell");
amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0"));
amQueue = cliParser.getOptionValue("queue", "default");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
index 290925980a5..ceaa37d5879 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/PlacementSpec.java
@@ -37,8 +37,8 @@
LoggerFactory.getLogger(PlacementSpec.class);
public final String sourceTag;
- public final int numContainers;
public final PlacementConstraint constraint;
+ private int numContainers;
public PlacementSpec(String sourceTag, int numContainers,
PlacementConstraint constraint) {
@@ -47,6 +47,22 @@ public PlacementSpec(String sourceTag, int numContainers,
this.constraint = constraint;
}
+ /**
+ * Get the number of container for this spec.
+ * @return container count
+ */
+ public int getNumContainers() {
+ return numContainers;
+ }
+
+ /**
+ * Set number of containers for this spec.
+ * @param numContainers number of containers.
+ */
+ public void setNumContainers(int numContainers) {
+ this.numContainers = numContainers;
+ }
+
// Placement specification should be of the form:
// PlacementSpec => ""|KeyVal;PlacementSpec
// KeyVal => SourceTag=Constraint
@@ -71,6 +87,7 @@ public PlacementSpec(String sourceTag, int numContainers,
public static Map parse(String specs)
throws IllegalArgumentException {
LOG.info("Parsing Placement Specs: [{}]", specs);
+
Map pSpecs = new HashMap<>();
Map parsed;
try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
index 26c99e31aa9..59fa6a8f2f6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
@@ -33,7 +33,6 @@
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
@@ -52,10 +51,14 @@
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.NodeToAttributeValue;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
@@ -900,4 +903,59 @@ public abstract Resource getResourceProfile(String profile)
@Unstable
public abstract List getResourceTypeInfo()
throws YarnException, IOException;
+
+ /**
+ *
+ * The interface used by client to get node attributes in the cluster.
+ *
+ *
+ * @return cluster node attributes collection
+ * @throws YarnException when there is a failure in
+ * {@link ApplicationClientProtocol}
+ * @throws IOException when there is a failure in
+ * {@link ApplicationClientProtocol}
+ */
+ @Public
+ @Unstable
+ public abstract Set getClusterAttributes()
+ throws YarnException, IOException;
+
+ /**
+ *
+ * The interface used by client to get mapping of AttributeKey to associated
+ * NodeToAttributeValue list for specified node attributeKeys in the cluster.
+ *
+ *
+ * @param attributes AttributeKeys for which associated NodeToAttributeValue
+ * mapping value has to be retrieved. If empty or null is set then
+ * will return mapping for all attributeKeys in the cluster
+ * @return mapping of AttributeKey to List of associated
+ * NodeToAttributeValue's.
+ * @throws YarnException
+ * @throws IOException
+ */
+ @Public
+ @Unstable
+ public abstract Map> getAttributesToNodes(
+ Set attributes) throws YarnException, IOException;
+
+ /**
+ *
+ * The interface used by client to get all node to attribute mapping in
+ * existing cluster.
+ *
+ *
+ * @param hostNames HostNames for which host to attributes mapping has to
+ * be retrived.If empty or null is set then will return
+ * all nodes to attributes mapping in cluster.
+ * @return Node to attribute mappings
+ * @throws YarnException
+ * @throws IOException
+ */
+ @Public
+ @Unstable
+ public abstract Map> getNodeToAttributes(
+ Set hostNames) throws YarnException, IOException;
+
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 1ceb46209b1..acfc3ff70be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -22,7 +22,6 @@
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.EnumSet;
-import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
@@ -52,8 +51,10 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
@@ -68,6 +69,7 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
@@ -96,15 +98,18 @@
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.NodeToAttributeValue;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
import org.apache.hadoop.yarn.api.records.Token;
@@ -977,4 +982,28 @@ public Resource getResourceProfile(String profile)
GetAllResourceTypeInfoRequest.newInstance();
return rmClient.getResourceTypeInfo(request).getResourceTypeInfo();
}
+
+ @Override
+ public Set getClusterAttributes()
+ throws YarnException, IOException {
+ GetClusterNodeAttributesRequest request =
+ GetClusterNodeAttributesRequest.newInstance();
+ return rmClient.getClusterNodeAttributes(request).getNodeAttributes();
+ }
+
+ @Override
+ public Map> getAttributesToNodes(
+ Set attributes) throws YarnException, IOException {
+ GetAttributesToNodesRequest request =
+ GetAttributesToNodesRequest.newInstance(attributes);
+ return rmClient.getAttributesToNodes(request).getAttributesToNodes();
+ }
+
+ @Override
+ public Map> getNodeToAttributes(
+ Set hostNames) throws YarnException, IOException {
+ GetNodesToAttributesRequest request =
+ GetNodesToAttributesRequest.newInstance(hostNames);
+ return rmClient.getNodesToAttributes(request).getNodeToAttributes();
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
index a29b0db7362..4d939498453 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -52,6 +53,7 @@
public static final String LIST_LABELS_CMD = "list-node-labels";
public static final String DIRECTLY_ACCESS_NODE_LABEL_STORE =
"directly-access-node-label-store";
+ public static final String LIST_CLUSTER_ATTRIBUTES="list-node-attributes";
public static final String CMD = "cluster";
private boolean accessLocal = false;
static CommonNodeLabelsManager localNodeLabelsManager = null;
@@ -71,6 +73,8 @@ public int run(String[] args) throws Exception {
opts.addOption("lnl", LIST_LABELS_CMD, false,
"List cluster node-label collection");
+ opts.addOption("lna", LIST_CLUSTER_ATTRIBUTES, false,
+ "List cluster node-attribute collection");
opts.addOption("h", HELP_CMD, false, "Displays help for all commands.");
opts.addOption("dnl", DIRECTLY_ACCESS_NODE_LABEL_STORE, false,
"This is DEPRECATED, will be removed in future releases. Directly access node label store, "
@@ -102,6 +106,8 @@ public int run(String[] args) throws Exception {
if (parsedCli.hasOption(LIST_LABELS_CMD)) {
printClusterNodeLabels();
+ } else if(parsedCli.hasOption(LIST_CLUSTER_ATTRIBUTES)){
+ printClusterNodeAttributes();
} else if (parsedCli.hasOption(HELP_CMD)) {
printUsage(opts);
return 0;
@@ -112,6 +118,17 @@ public int run(String[] args) throws Exception {
return 0;
}
+ private void printClusterNodeAttributes() throws IOException, YarnException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ PrintWriter pw = new PrintWriter(
+ new OutputStreamWriter(baos, Charset.forName("UTF-8")));
+ for (NodeAttributeInfo attribute : client.getClusterAttributes()) {
+ pw.println(attribute.toString());
+ }
+ pw.close();
+ sysout.println(baos.toString("UTF-8"));
+ }
+
void printClusterNodeLabels() throws YarnException, IOException {
List nodeLabels = null;
if (accessLocal) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java
new file mode 100644
index 00000000000..13d5e24c1c5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java
@@ -0,0 +1,715 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.cli;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.MissingArgumentException;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionGroup;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.UnrecognizedOptionException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeAttributeType;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
+import org.apache.hadoop.yarn.server.api.protocolrecords.AttributeMappingOperationType;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * CLI to map attributes to Nodes.
+ */
+public class NodeAttributesCLI extends Configured implements Tool {
+
+ protected static final String INVALID_MAPPING_ERR_MSG =
+ "Invalid Node to attribute mapping : ";
+
+ protected static final String USAGE_YARN_NODE_ATTRIBUTES =
+ "Usage: yarn nodeattributes ";
+
+ protected static final String MISSING_ARGUMENT =
+ "Missing argument for command";
+
+ protected static final String NO_MAPPING_ERR_MSG =
+ "No node-to-attributes mappings are specified";
+
+ private static final String DEFAULT_SEPARATOR = System.lineSeparator();
+ public static final String INVALID_COMMAND_USAGE = "Invalid Command Usage : ";
+ /**
+ * Output stream for errors, for use in tests.
+ */
+ private PrintStream errOut = System.err;
+
+ public NodeAttributesCLI() {
+ super();
+ }
+
+ protected void setErrOut(PrintStream errOut) {
+ this.errOut = errOut;
+ }
+
+ protected AdminCommandHandler getAdminCommandHandler() {
+ return new AdminCommandHandler();
+ }
+
+ protected ClientCommandHandler getClientCommandHandler() {
+ return new ClientCommandHandler();
+ }
+
+ void printUsage(String cmd, boolean desc, CommandHandler... handlers)
+ throws UnsupportedEncodingException {
+ StringBuilder usageBuilder = new StringBuilder();
+ usageBuilder.append(USAGE_YARN_NODE_ATTRIBUTES);
+ boolean satisfied = false;
+ for (CommandHandler cmdHandlers : handlers) {
+ satisfied |= cmdHandlers.getHelp(cmd, usageBuilder, desc);
+ }
+ if (!satisfied) {
+ printUsage(desc, handlers);
+ } else {
+ print(usageBuilder);
+ }
+ }
+
+ private void printUsage(boolean desc, CommandHandler... handlers)
+ throws UnsupportedEncodingException {
+ StringBuilder usageBuilder = new StringBuilder();
+ usageBuilder.append(USAGE_YARN_NODE_ATTRIBUTES);
+ for (CommandHandler cmdHandlers : handlers) {
+ cmdHandlers.getHelp(usageBuilder, desc);
+ }
+
+ // append help with usage
+ usageBuilder.append(DEFAULT_SEPARATOR)
+ .append(" -help [cmd] List help of commands");
+ print(usageBuilder);
+ }
+
+ private void print(StringBuilder usageBuilder)
+ throws UnsupportedEncodingException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ PrintWriter pw =
+ new PrintWriter(new OutputStreamWriter(baos, Charset.forName("UTF-8")));
+ pw.write(usageBuilder.toString());
+ pw.close();
+ errOut.println(baos.toString("UTF-8"));
+ }
+
+ private Options buildOptions(CommandHandler... handlers) {
+ Options opts = new Options();
+ for (CommandHandler handler : handlers) {
+ Options handlerOpts = handler.getOptions();
+ handlerOpts.getOptions().iterator()
+ .forEachRemaining(option -> opts.addOption((Option) option));
+ }
+ return opts;
+ }
+
+ public int run(String[] args) throws Exception {
+
+ int exitCode = -1;
+
+ AdminCommandHandler adminCmdHandler = getAdminCommandHandler();
+ ClientCommandHandler clientCmdHandler = getClientCommandHandler();
+
+ // Build options
+ Options opts = buildOptions(adminCmdHandler, clientCmdHandler);
+
+ if (args.length < 1) {
+ printUsage(false, adminCmdHandler, clientCmdHandler);
+ return -1;
+ }
+
+ // Handle command separate
+ if (handleHelpCommand(args, adminCmdHandler, clientCmdHandler)) {
+ return 0;
+ }
+
+ CommandLine cliParser;
+ CommandHandler handler = null;
+ try {
+ cliParser = new GnuParser().parse(opts, args);
+ handler = adminCmdHandler.canHandleCommand(cliParser) ?
+ adminCmdHandler :
+ clientCmdHandler.canHandleCommand(cliParser) ?
+ clientCmdHandler :
+ null;
+ if (handler == null) {
+ errOut.println(INVALID_COMMAND_USAGE);
+ printUsage(false, adminCmdHandler, clientCmdHandler);
+ return exitCode;
+ } else {
+ return handler.handleCommand(cliParser);
+ }
+ } catch (UnrecognizedOptionException e) {
+ errOut.println(INVALID_COMMAND_USAGE);
+ printUsage(false, adminCmdHandler, clientCmdHandler);
+ return exitCode;
+ } catch (MissingArgumentException ex) {
+ errOut.println(MISSING_ARGUMENT);
+ printUsage(true, adminCmdHandler, clientCmdHandler);
+ return exitCode;
+ } catch (IllegalArgumentException arge) {
+ errOut.println(arge.getLocalizedMessage());
+ // print admin command detail
+ printUsage(true, handler);
+ return exitCode;
+ } catch (Exception e) {
+ errOut.println(e.toString());
+ printUsage(true, handler);
+ return exitCode;
+ }
+ }
+
+ private boolean handleHelpCommand(String[] args, CommandHandler... handlers)
+ throws UnsupportedEncodingException {
+ if (args[0].equals("-help")) {
+ if (args.length == 2) {
+ printUsage(args[1], true, handlers);
+ } else {
+ printUsage(true, handlers);
+ }
+ return true;
+ }
+ return false;
+ }
+
+ public static void main(String[] args) throws Exception {
+ int result = ToolRunner.run(new NodeAttributesCLI(), args);
+ System.exit(result);
+ }
+
+ /**
+ * Abstract class for command handler.
+ */
+ public static abstract class CommandHandler extends Configured {
+
+ private Options options;
+
+ private LinkedList order = new LinkedList<>();
+ private String header;
+
+ protected CommandHandler(String header) {
+ this(new YarnConfiguration());
+ this.header = header;
+ }
+
+ protected CommandHandler(Configuration conf) {
+ super(conf);
+ options = buildOptions();
+ }
+
+ public boolean canHandleCommand(CommandLine parse) {
+ ArrayList arrayList = new ArrayList (options.getOptions());
+ return arrayList.stream().anyMatch(opt -> parse.hasOption(opt.getOpt()));
+ }
+
+ public abstract int handleCommand(CommandLine parse)
+ throws IOException, YarnException;
+
+ public abstract Options buildOptions();
+
+ public Options getOptions() {
+ return options;
+ }
+
+ public boolean getHelp(String cmd, StringBuilder strcnd, boolean addDesc) {
+ Option opt = options.getOption(cmd);
+ if (opt != null) {
+ strcnd.append(DEFAULT_SEPARATOR).append(" -").append(opt.getOpt());
+ if (opt.hasArg()) {
+ strcnd.append(" <").append(opt.getArgName()).append(">");
+ }
+ if (addDesc) {
+ strcnd.append(DEFAULT_SEPARATOR).append("\t")
+ .append(opt.getDescription());
+ }
+ }
+ return opt == null;
+ }
+
+ public void getHelp(StringBuilder builder, boolean description) {
+ builder.append(DEFAULT_SEPARATOR).append(DEFAULT_SEPARATOR)
+ .append(header);
+ for (String option : order) {
+ getHelp(option, builder, description);
+ }
+ }
+
+ protected void addOrder(String key){
+ order.add(key);
+ }
+ }
+
+ /**
+ * Client commands handler.
+ */
+ public static class ClientCommandHandler extends CommandHandler {
+
+ private static final String LIST_ALL_ATTRS = "list";
+
+ private static final String NODESTOATTR = "nodestoattributes";
+ private static final String NODES = "nodes";
+
+ private static final String ATTRTONODES = "attributestonodes";
+ private static final String ATTRIBUTES = "attributes";
+
+ public static final String SPLITPATTERN = "/";
+
+ private static final String NODEATTRIBUTE =
+ "%40s\t%10s\t%20s" + DEFAULT_SEPARATOR;
+ private static final String NODEATTRIBUTEINFO =
+ "%40s\t%15s" + DEFAULT_SEPARATOR;
+ private static final String HOSTNAMEVAL = "%40s\t%15s" + DEFAULT_SEPARATOR;
+
+ private PrintStream sysOut = System.out;
+
+ public ClientCommandHandler() {
+ super("Client Commands:");
+
+ }
+
+ public void setSysOut(PrintStream out) {
+ this.sysOut = out;
+ }
+
+ @Override
+ public int handleCommand(CommandLine parse)
+ throws IOException, YarnException {
+ if (parse.hasOption(LIST_ALL_ATTRS)) {
+ return printClusterAttributes();
+ } else if (parse.hasOption(NODESTOATTR)) {
+ String[] nodes = new String[0];
+ if (parse.hasOption(NODES)) {
+ nodes = parse.getOptionValues(NODES);
+ }
+ return printAttributesByNode(nodes);
+ } else if (parse.hasOption(ATTRTONODES)) {
+ String[] attrKeys = {};
+ if (parse.hasOption(ATTRIBUTES)) {
+ attrKeys = parse.getOptionValues(ATTRIBUTES);
+ }
+ return printNodesByAttributes(attrKeys);
+ }
+ return 0;
+ }
+
+ protected ApplicationClientProtocol createApplicationProtocol()
+ throws IOException {
+ // Get the current configuration
+ final YarnConfiguration conf = new YarnConfiguration(getConf());
+ return ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
+ }
+
+ public int printNodesByAttributes(String[] attrs)
+ throws YarnException, IOException {
+ ApplicationClientProtocol protocol = createApplicationProtocol();
+ HashSet set = new HashSet<>();
+
+ for (String attr : attrs) {
+ String[] attrFields = attr.split(SPLITPATTERN);
+ if (attrFields.length == 1) {
+ set.add(NodeAttributeKey.newInstance(attrFields[0]));
+ } else if (attrFields.length == 2) {
+ set.add(NodeAttributeKey.newInstance(attrFields[0], attrFields[1]));
+ } else {
+ throw new IllegalArgumentException(
+ " Attribute format not correct. Should be <[prefix]/[name]> :"
+ + attr);
+ }
+ }
+
+ GetAttributesToNodesRequest request =
+ GetAttributesToNodesRequest.newInstance(set);
+ GetAttributesToNodesResponse response =
+ protocol.getAttributesToNodes(request);
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ PrintWriter writer = new PrintWriter(
+ new OutputStreamWriter(baos, Charset.forName("UTF-8")));
+ writer.format(HOSTNAMEVAL, "Hostname", "Attribute-value");
+ response.getAttributesToNodes().forEach((attributeKey, v) -> {
+ writer.println(getKeyString(attributeKey) + " :");
+ v.iterator().forEachRemaining(attrVal -> writer
+ .format(HOSTNAMEVAL, attrVal.getHostname(),
+ attrVal.getAttributeValue()));
+ });
+ writer.close();
+ sysOut.println(baos.toString("UTF-8"));
+ return 0;
+ }
+
+ private int printAttributesByNode(String[] nodeArray)
+ throws YarnException, IOException {
+ ApplicationClientProtocol protocol = createApplicationProtocol();
+ HashSet nodes = new HashSet<>(Arrays.asList(nodeArray));
+ GetNodesToAttributesRequest request =
+ GetNodesToAttributesRequest.newInstance(nodes);
+ GetNodesToAttributesResponse response =
+ protocol.getNodesToAttributes(request);
+ Map> nodeToAttrs =
+ response.getNodeToAttributes();
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ PrintWriter writer = new PrintWriter(
+ new OutputStreamWriter(baos, Charset.forName("UTF-8")));
+ writer.printf(NODEATTRIBUTE, "Attribute", "Type", "Value");
+ nodeToAttrs.forEach((node, v) -> {
+ // print node header
+ writer.println(node + ":");
+ v.iterator().forEachRemaining(attr -> writer
+ .format(NODEATTRIBUTE, getKeyString(attr.getAttributeKey()),
+ attr.getAttributeType().name(), attr.getAttributeValue()));
+ });
+ writer.close();
+ sysOut.println(baos.toString("UTF-8"));
+ return 0;
+ }
+
+ private int printClusterAttributes() throws IOException, YarnException {
+ ApplicationClientProtocol protocol = createApplicationProtocol();
+ GetClusterNodeAttributesRequest request =
+ GetClusterNodeAttributesRequest.newInstance();
+ GetClusterNodeAttributesResponse response =
+ protocol.getClusterNodeAttributes(request);
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ PrintWriter writer = new PrintWriter(
+ new OutputStreamWriter(baos, Charset.forName("UTF-8")));
+ writer.format(NODEATTRIBUTEINFO, "Attribute", "Type");
+ for (NodeAttributeInfo attr : response.getNodeAttributes()) {
+ writer.format(NODEATTRIBUTEINFO, getKeyString(attr.getAttributeKey()),
+ attr.getAttributeType().name());
+ }
+ writer.close();
+ sysOut.println(baos.toString("UTF-8"));
+ return 0;
+ }
+
+ private String getKeyString(NodeAttributeKey key) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(key.getAttributePrefix()).append("/")
+ .append(key.getAttributeName());
+ return sb.toString();
+ }
+
+ @Override
+ public Options buildOptions() {
+ Options clientOptions = new Options();
+ clientOptions.addOption(
+ new Option(LIST_ALL_ATTRS, false, "List all attributes in cluster"));
+
+ // group by command
+ OptionGroup nodeToAttr = new OptionGroup();
+ Option attrtonodes = new Option(NODESTOATTR, false,
+ "Lists all mapping to nodes to attributes");
+ Option nodes = new Option(NODES,
+ "Works with [" + LIST_ALL_ATTRS + "] to specify node hostnames "
+ + "whose mappings are required to be displayed.");
+ nodes.setValueSeparator(',');
+ nodes.setArgName("Host Names");
+ nodes.setArgs(Option.UNLIMITED_VALUES);
+ nodeToAttr.addOption(attrtonodes);
+ nodeToAttr.addOption(nodes);
+ clientOptions.addOptionGroup(nodeToAttr);
+
+ // Defines as groups to add extendability for later
+ OptionGroup attrToNodes = new OptionGroup();
+ attrToNodes.addOption(new Option(ATTRTONODES, false,
+ "Displays mapping of "
+ + "attributes to nodes and attribute values grouped by "
+ + "attributes"));
+ Option attrs = new Option(ATTRIBUTES, "Works with [" + ATTRTONODES
+ + "] to specify attributes whose mapping "
+ + "are required to be displayed.");
+ attrs.setValueSeparator(',');
+ attrs.setArgName("Attributes");
+ attrs.setArgs(Option.UNLIMITED_VALUES);
+ attrToNodes.addOption(attrs);
+ clientOptions.addOptionGroup(attrToNodes);
+
+ // DEFINE ORDER
+ addOrder(LIST_ALL_ATTRS);
+ addOrder(NODESTOATTR);
+ addOrder(NODES);
+ addOrder(ATTRTONODES);
+ addOrder(ATTRIBUTES);
+ return clientOptions;
+ }
+ }
+
+ /**
+ * Admin commands handler.
+ */
+ public static class AdminCommandHandler extends CommandHandler {
+
+ private static final String ADD = "add";
+ private static final String REMOVE = "remove";
+ private static final String REPLACE = "replace";
+ private static final String FAILUNKNOWNNODES = "failOnUnknownNodes";
+
+ AdminCommandHandler() {
+ super("Admin Commands:");
+ }
+
+ @Override
+ public Options buildOptions() {
+ Options adminOptions = new Options();
+ Option replace = new Option(REPLACE, true,
+ "Replace the node to attributes mapping information at the"
+ + " ResourceManager with the new mapping. Currently"
+ + " supported attribute type. And string is the default"
+ + " type too. Attribute value if not specified for string"
+ + " type value will be considered as empty string."
+ + " Replaced node-attributes should not violate the"
+ + " existing attribute to attribute type mapping.");
+ replace.setArgName("\"node1:attribute[(type)][=value],attribute1[=value],"
+ + "attribute2 node2:attribute2[=value],attribute3\"");
+ replace.setArgs(1);
+ adminOptions.addOption(replace);
+
+ Option add = new Option(ADD, true,
+ "Adds or updates the node to attributes mapping information"
+ + " at the ResourceManager. Currently supported attribute"
+ + " type is string. And string is the default type too."
+ + " Attribute value if not specified for string type"
+ + " value will be considered as empty string. Added or"
+ + " updated node-attributes should not violate the"
+ + " existing attribute to attribute type mapping.");
+ add.setArgName("\"node1:attribute[(type)][=value],attribute1[=value],"
+ + "attribute2 node2:attribute2[=value],attribute3\"");
+ add.setArgs(1);
+ adminOptions.addOption(add);
+
+ Option remove = new Option(REMOVE, true,
+ "Removes the specified node to attributes mapping"
+ + " information at the ResourceManager");
+ remove.setArgName("\"node1:attribute,attribute1 node2:attribute2\"");
+ remove.setArgs(1);
+ adminOptions.addOption(remove);
+
+ adminOptions.addOption(new Option(FAILUNKNOWNNODES, false,
+ "Can be used optionally along with [add,remove,replace] options. "
+ + "When set, command will fail if specified nodes are unknown."));
+
+ // DEFINE ORDER
+ addOrder(REPLACE);
+ addOrder(ADD);
+ addOrder(REMOVE);
+ addOrder(FAILUNKNOWNNODES);
+
+ return adminOptions;
+ }
+
+ protected ResourceManagerAdministrationProtocol createAdminProtocol()
+ throws IOException {
+ // Get the current configuration
+ final YarnConfiguration conf = new YarnConfiguration(getConf());
+ return ClientRMProxy
+ .createRMProxy(conf, ResourceManagerAdministrationProtocol.class);
+ }
+
+ public int handleCommand(CommandLine cliParser)
+ throws IOException, YarnException {
+ String operation = null;
+ if (cliParser.hasOption(ADD)) {
+ operation = ADD;
+ } else if (cliParser.hasOption(REMOVE)) {
+ operation = REMOVE;
+ } else if (cliParser.hasOption(REPLACE)) {
+ operation = REPLACE;
+ }
+ if (operation != null) {
+ List buildNodeLabelsListFromStr =
+ buildNodeLabelsListFromStr(cliParser.getOptionValue(operation),
+ !operation.equals(REPLACE), operation);
+ NodesToAttributesMappingRequest request =
+ NodesToAttributesMappingRequest.newInstance(
+ AttributeMappingOperationType.valueOf(operation.toUpperCase()),
+ buildNodeLabelsListFromStr,
+ cliParser.hasOption(FAILUNKNOWNNODES));
+ ResourceManagerAdministrationProtocol adminProtocol =
+ createAdminProtocol();
+ adminProtocol.mapAttributesToNodes(request);
+ } else {
+ // Handle case for only failOnUnknownNodes passed
+ throw new IllegalArgumentException(
+ getOptions().getOption(FAILUNKNOWNNODES).getDescription());
+ }
+ return 0;
+ }
+
+ /**
+ * args are expected to be of the format
+ * node1:java(string)=8,ssd(boolean)=false node2:ssd(boolean)=true.
+ */
+ private List buildNodeLabelsListFromStr(String args,
+ boolean validateForAttributes, String operation) {
+ Map nodeToAttributesMap = new HashMap<>();
+ for (String nodeToAttributesStr : args.split("[ \n]")) {
+ // for each node to attribute mapping
+ nodeToAttributesStr = nodeToAttributesStr.trim();
+ if (nodeToAttributesStr.isEmpty() || nodeToAttributesStr
+ .startsWith("#")) {
+ continue;
+ }
+ if (nodeToAttributesStr.indexOf(":") == -1) {
+ throw new IllegalArgumentException(
+ INVALID_MAPPING_ERR_MSG + nodeToAttributesStr);
+ }
+ String[] nodeToAttributes = nodeToAttributesStr.split(":");
+ Preconditions.checkArgument(!nodeToAttributes[0].trim().isEmpty(),
+ "Node name cannot be empty");
+ String node = nodeToAttributes[0];
+ String[] attributeNameValueType = null;
+ List attributesList = new ArrayList<>();
+ NodeAttributeType attributeType = NodeAttributeType.STRING;
+ String attributeValue;
+ String attributeName;
+ Set attributeNamesMapped = new HashSet<>();
+
+ String[] attributesStr;
+ if (nodeToAttributes.length == 2) {
+ // fetching multiple attributes for a node
+ attributesStr = nodeToAttributes[1].split(",");
+ for (String attributeStr : attributesStr) {
+ // get information about each attribute.
+ attributeNameValueType = attributeStr.split("="); // to find name
+ // value
+ Preconditions.checkArgument(
+ !(attributeNameValueType[0] == null || attributeNameValueType[0]
+ .isEmpty()), "Attribute name cannot be null or empty");
+ attributeValue = attributeNameValueType.length > 1 ?
+ attributeNameValueType[1] :
+ "";
+ int indexOfOpenBracket = attributeNameValueType[0].indexOf("(");
+ if (indexOfOpenBracket == -1) {
+ attributeName = attributeNameValueType[0];
+ } else if (indexOfOpenBracket == 0) {
+ throw new IllegalArgumentException("Attribute for node " + node
+ + " is not properly configured : " + attributeStr);
+ } else {
+ // attribute type has been explicitly configured
+ int indexOfCloseBracket = attributeNameValueType[0].indexOf(")");
+ if (indexOfCloseBracket == -1
+ || indexOfCloseBracket < indexOfOpenBracket) {
+ throw new IllegalArgumentException("Attribute for node " + node
+ + " is not properly Configured : " + attributeStr);
+ }
+ String attributeTypeStr;
+ attributeName =
+ attributeNameValueType[0].substring(0, indexOfOpenBracket);
+ attributeTypeStr = attributeNameValueType[0]
+ .substring(indexOfOpenBracket + 1, indexOfCloseBracket);
+ try {
+ attributeType = NodeAttributeType
+ .valueOf(attributeTypeStr.trim().toUpperCase());
+ } catch (IllegalArgumentException e) {
+ throw new IllegalArgumentException(
+ "Invalid Attribute type configuration : " + attributeTypeStr
+ + " in " + attributeStr);
+ }
+ }
+ if (attributeNamesMapped.contains(attributeName)) {
+ throw new IllegalArgumentException("Attribute " + attributeName
+ + " has been mapped more than once in : "
+ + nodeToAttributesStr);
+ }
+ // TODO when we support different type of attribute type we need to
+ // cross verify whether input attributes itself is not violating
+ // attribute Name to Type mapping.
+ attributesList.add(NodeAttribute
+ .newInstance(NodeAttribute.PREFIX_CENTRALIZED,
+ attributeName.trim(), attributeType,
+ attributeValue.trim()));
+ }
+ }
+ if (validateForAttributes) {
+ Preconditions.checkArgument((attributesList.size() > 0),
+ "Attributes cannot be null or empty for Operation [" + operation
+ + "] on the node " + node);
+ }
+ nodeToAttributesMap
+ .put(node, NodeToAttributes.newInstance(node, attributesList));
+ }
+
+ if (nodeToAttributesMap.isEmpty()) {
+ throw new IllegalArgumentException(NO_MAPPING_ERR_MSG);
+ }
+ return Lists.newArrayList(nodeToAttributesMap.values());
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ if (conf != null) {
+ conf = addSecurityConfiguration(conf);
+ }
+ super.setConf(conf);
+ }
+
+ /**
+ * Add the requisite security principal settings to the given Configuration,
+ * returning a copy.
+ *
+ * @param conf the original config
+ * @return a copy with the security settings added
+ */
+ private Configuration addSecurityConfiguration(Configuration conf) {
+ // Make a copy so we don't mutate it. Also use an YarnConfiguration to
+ // force loading of yarn-site.xml.
+ conf = new YarnConfiguration(conf);
+ conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
+ conf.get(YarnConfiguration.RM_PRINCIPAL, ""));
+ return conf;
+ }
+
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java
index e9253eb6909..44e98705844 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java
@@ -44,7 +44,6 @@
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.util.ConverterUtils;
@Private
@Unstable
@@ -307,6 +306,18 @@ private void printNodeStatus(String nodeIdStr) throws YarnException,
Collections.sort(nodeLabelsList);
nodeReportStr.println(StringUtils.join(nodeLabelsList.iterator(), ','));
+ if (nodeReport.getNodeAttributes().size() > 0) {
+ ArrayList nodeAtrs = new ArrayList<>(nodeReport.getNodeAttributes());
+ nodeReportStr.print("\tNode Attributes : ");
+ nodeReportStr.println(nodeAtrs.get(0).toString());
+ for (int index = 1; index < nodeAtrs.size(); index++) {
+ nodeReportStr.println(
+ String.format("\t%18s%s", "", nodeAtrs.get(index).toString()));
+ }
+ } else {
+ nodeReportStr.println("\tNode Attributes : ");
+ }
+
nodeReportStr.print("\tResource Utilization by Node : ");
if (nodeReport.getNodeUtilization() != null) {
nodeReportStr.print("PMem:"
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java
index 5a0f049504f..26afe6f6ea9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java
@@ -18,6 +18,9 @@
package org.apache.hadoop.yarn.client.cli;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeAttributeType;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
@@ -74,7 +77,32 @@ public void testGetClusterNodeLabels() throws Exception {
pw.close();
verify(sysOut).println(baos.toString("UTF-8"));
}
-
+
+ @Test
+ public void testGetClusterNodeAttributes() throws Exception {
+ YarnClient client = mock(YarnClient.class);
+ when(client.getClusterAttributes()).thenReturn(ImmutableSet
+ .of(NodeAttributeInfo.newInstance(NodeAttributeKey.newInstance("GPU"),
+ NodeAttributeType.STRING), NodeAttributeInfo
+ .newInstance(NodeAttributeKey.newInstance("CPU"),
+ NodeAttributeType.STRING)));
+ ClusterCLI cli = new ClusterCLI();
+ cli.setClient(client);
+ cli.setSysOutPrintStream(sysOut);
+ cli.setSysErrPrintStream(sysErr);
+
+ int rc = cli.run(new String[] {ClusterCLI.CMD,
+ "-" + ClusterCLI.LIST_CLUSTER_ATTRIBUTES});
+ assertEquals(0, rc);
+
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ PrintWriter pw = new PrintWriter(baos);
+ pw.println("rm.yarn.io/GPU(STRING)");
+ pw.println("rm.yarn.io/CPU(STRING)");
+ pw.close();
+ verify(sysOut).println(baos.toString("UTF-8"));
+ }
+
@Test
public void testGetClusterNodeLabelsWithLocalAccess() throws Exception {
YarnClient client = mock(YarnClient.class);
@@ -157,6 +185,8 @@ public void testHelp() throws Exception {
pw.println(" option is UNSTABLE, could be");
pw.println(" removed in future releases.");
pw.println(" -h,--help Displays help for all commands.");
+ pw.println(" -lna,--list-node-attributes List cluster node-attribute");
+ pw.println(" collection");
pw.println(" -lnl,--list-node-labels List cluster node-label");
pw.println(" collection");
pw.close();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java
new file mode 100644
index 00000000000..7f48493d097
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java
@@ -0,0 +1,537 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client.cli;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeToAttributeValue;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeType;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
+import org.apache.hadoop.yarn.server.api.protocolrecords.AttributeMappingOperationType;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Joiner;
+
+/**
+ * Test class for TestNodeAttributesCLI.
+ */
+public class TestNodeAttributesCLI {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestNodeAttributesCLI.class);
+ private ResourceManagerAdministrationProtocol admin;
+ private ApplicationClientProtocol client;
+ private NodesToAttributesMappingRequest nodeToAttrRequest;
+ private NodeAttributesCLI nodeAttributesCLI;
+ private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
+ private ByteArrayOutputStream sysOutBytes = new ByteArrayOutputStream();
+ private String errOutput;
+ private String sysOutput;
+
+ @Before
+ public void configure() throws IOException, YarnException {
+
+ admin = mock(ResourceManagerAdministrationProtocol.class);
+ client = mock(ApplicationClientProtocol.class);
+
+ when(admin.mapAttributesToNodes(any(NodesToAttributesMappingRequest.class)))
+ .thenAnswer(new Answer() {
+ @Override
+ public NodesToAttributesMappingResponse answer(
+ InvocationOnMock invocation) throws Throwable {
+ nodeToAttrRequest =
+ (NodesToAttributesMappingRequest) invocation.getArguments()[0];
+ return NodesToAttributesMappingResponse.newInstance();
+ }
+ });
+
+ nodeAttributesCLI = new NodeAttributesCLI() {
+ @Override
+ protected AdminCommandHandler getAdminCommandHandler() {
+ return new AdminCommandHandler() {
+ @Override
+ protected ResourceManagerAdministrationProtocol createAdminProtocol()
+ throws IOException {
+ return admin;
+ }
+ };
+ }
+
+ @Override
+ protected ClientCommandHandler getClientCommandHandler() {
+ ClientCommandHandler handler = new ClientCommandHandler() {
+ @Override
+ protected ApplicationClientProtocol createApplicationProtocol()
+ throws IOException {
+ return client;
+ }
+ };
+ handler.setSysOut(new PrintStream(sysOutBytes));
+ return handler;
+ }
+ };
+ nodeAttributesCLI.setErrOut(new PrintStream(errOutBytes));
+ }
+
+ @Test
+ public void testHelp() throws Exception {
+ String[] args = new String[] {"-help", "-replace"};
+ assertTrue("It should have succeeded help for replace", 0 == runTool(args));
+ assertErrorContains("-replace <\"node1:attribute[(type)][=value],attribute1"
+ + "[=value],attribute2 node2:attribute2[=value],attribute3\">");
+ assertErrorContains("Replace the node to attributes mapping information at"
+ + " the ResourceManager with the new mapping. Currently supported"
+ + " attribute type. And string is the default type too. Attribute value"
+ + " if not specified for string type value will be considered as empty"
+ + " string. Replaced node-attributes should not violate the existing"
+ + " attribute to attribute type mapping.");
+
+ args = new String[] {"-help", "-remove"};
+ assertTrue("It should have succeeded help for replace", 0 == runTool(args));
+ assertErrorContains(
+ "-remove <\"node1:attribute,attribute1" + " node2:attribute2\">");
+ assertErrorContains("Removes the specified node to attributes mapping"
+ + " information at the ResourceManager");
+
+ args = new String[] {"-help", "-add"};
+ assertTrue("It should have succeeded help for replace", 0 == runTool(args));
+ assertErrorContains("-add <\"node1:attribute[(type)][=value],"
+ + "attribute1[=value],attribute2 node2:attribute2[=value],"
+ + "attribute3\">");
+ assertErrorContains("Adds or updates the node to attributes mapping"
+ + " information at the ResourceManager. Currently supported attribute"
+ + " type is string. And string is the default type too. Attribute value"
+ + " if not specified for string type value will be considered as empty"
+ + " string. Added or updated node-attributes should not violate the"
+ + " existing attribute to attribute type mapping.");
+
+ args = new String[] {"-help", "-failOnUnknownNodes"};
+ assertTrue("It should have succeeded help for replace", 0 == runTool(args));
+ assertErrorContains("-failOnUnknownNodes");
+ assertErrorContains("Can be used optionally along with [add,remove,"
+ + "replace] options. When set, command will fail if specified nodes "
+ + "are unknown.");
+
+ args = new String[] {"-help", "-list"};
+ assertTrue("It should have succeeded help for replace", 0 == runTool(args));
+ assertErrorContains("-list");
+ assertErrorContains("List all attributes in cluster");
+
+ args = new String[] {"-help", "-nodes"};
+ assertTrue("It should have succeeded help for replace", 0 == runTool(args));
+ assertErrorContains("-nodes");
+ assertErrorContains(
+ "Works with [list] to specify node hostnames whose mappings "
+ + "are required to be displayed.");
+
+ args = new String[] {"-help", "-attributes"};
+ assertTrue("It should have succeeded help for replace", 0 == runTool(args));
+ assertErrorContains("-attributes");
+ assertErrorContains(
+ "Works with [attributestonodes] to specify attributes whose mapping "
+ + "are required to be displayed.");
+
+ args = new String[] {"-help", "-attributestonodes"};
+ assertTrue("It should have succeeded help for replace", 0 == runTool(args));
+ assertErrorContains("-attributestonodes");
+ assertErrorContains("Displays mapping of attributes to nodes and attribute "
+ + "values grouped by attributes");
+ }
+
+ @Test
+ public void testReplace() throws Exception {
+ // --------------------------------
+ // failure scenarios
+ // --------------------------------
+ // parenthesis not match
+ String[] args = new String[] {"-replace", "x("};
+ assertTrue("It should have failed as no node is specified",
+ 0 != runTool(args));
+ assertFailureMessageContains(NodeAttributesCLI.INVALID_MAPPING_ERR_MSG);
+
+ // parenthesis not match
+ args = new String[] {"-replace", "x:(=abc"};
+ assertTrue(
+ "It should have failed as no closing parenthesis is not specified",
+ 0 != runTool(args));
+ assertFailureMessageContains(
+ "Attribute for node x is not properly configured : (=abc");
+
+ args = new String[] {"-replace", "x:()=abc"};
+ assertTrue("It should have failed as no type specified inside parenthesis",
+ 0 != runTool(args));
+ assertFailureMessageContains(
+ "Attribute for node x is not properly configured : ()=abc");
+
+ args = new String[] {"-replace", ":x(string)"};
+ assertTrue("It should have failed as no node is specified",
+ 0 != runTool(args));
+ assertFailureMessageContains("Node name cannot be empty");
+
+ // Not expected key=value specifying inner parenthesis
+ args = new String[] {"-replace", "x:(key=value)"};
+ assertTrue(0 != runTool(args));
+ assertFailureMessageContains(
+ "Attribute for node x is not properly configured : (key=value)");
+
+ // Should fail as no attributes specified
+ args = new String[] {"-replace"};
+ assertTrue("Should fail as no attribute mappings specified",
+ 0 != runTool(args));
+ assertFailureMessageContains(NodeAttributesCLI.MISSING_ARGUMENT);
+
+ // no labels, should fail
+ args = new String[] {"-replace", "-failOnUnknownNodes",
+ "x:key(string)=value,key2=val2"};
+ assertTrue("Should fail as no attribute mappings specified for replace",
+ 0 != runTool(args));
+ assertFailureMessageContains(NodeAttributesCLI.MISSING_ARGUMENT);
+
+ // no labels, should fail
+ args = new String[] {"-replace", " "};
+ assertTrue(0 != runTool(args));
+ assertFailureMessageContains(NodeAttributesCLI.NO_MAPPING_ERR_MSG);
+
+ args = new String[] {"-replace", ", "};
+ assertTrue(0 != runTool(args));
+ assertFailureMessageContains(NodeAttributesCLI.INVALID_MAPPING_ERR_MSG);
+ // --------------------------------
+ // success scenarios
+ // --------------------------------
+ args = new String[] {"-replace",
+ "x:key(string)=value,key2=val2 y:key2=val23,key3 z:key4"};
+ assertTrue("Should not fail as attribute has been properly mapped",
+ 0 == runTool(args));
+ List nodeAttributesList = new ArrayList<>();
+ List attributes = new ArrayList<>();
+ attributes.add(
+ NodeAttribute.newInstance("key", NodeAttributeType.STRING, "value"));
+ attributes.add(
+ NodeAttribute.newInstance("key2", NodeAttributeType.STRING, "val2"));
+ nodeAttributesList.add(NodeToAttributes.newInstance("x", attributes));
+
+ // for node y
+ attributes = new ArrayList<>();
+ attributes.add(
+ NodeAttribute.newInstance("key2", NodeAttributeType.STRING, "val23"));
+ attributes
+ .add(NodeAttribute.newInstance("key3", NodeAttributeType.STRING, ""));
+ nodeAttributesList.add(NodeToAttributes.newInstance("y", attributes));
+
+ // for node y
+ attributes = new ArrayList<>();
+ attributes.add(
+ NodeAttribute.newInstance("key2", NodeAttributeType.STRING, "val23"));
+ attributes
+ .add(NodeAttribute.newInstance("key3", NodeAttributeType.STRING, ""));
+ nodeAttributesList.add(NodeToAttributes.newInstance("y", attributes));
+
+ // for node z
+ attributes = new ArrayList<>();
+ attributes
+ .add(NodeAttribute.newInstance("key4", NodeAttributeType.STRING, ""));
+ nodeAttributesList.add(NodeToAttributes.newInstance("z", attributes));
+
+ NodesToAttributesMappingRequest expected = NodesToAttributesMappingRequest
+ .newInstance(AttributeMappingOperationType.REPLACE, nodeAttributesList,
+ false);
+ assertTrue(nodeToAttrRequest.equals(expected));
+ }
+
+ @Test
+ public void testRemove() throws Exception {
+ // --------------------------------
+ // failure scenarios
+ // --------------------------------
+ // parenthesis not match
+ String[] args = new String[] {"-remove", "x:"};
+ assertTrue("It should have failed as no node is specified",
+ 0 != runTool(args));
+ assertFailureMessageContains(
+ "Attributes cannot be null or empty for Operation [remove] on the "
+ + "node x");
+ // --------------------------------
+ // success scenarios
+ // --------------------------------
+ args =
+ new String[] {"-remove", "x:key2,key3 z:key4", "-failOnUnknownNodes"};
+ assertTrue("Should not fail as attribute has been properly mapped",
+ 0 == runTool(args));
+ List nodeAttributesList = new ArrayList<>();
+ List attributes = new ArrayList<>();
+ attributes
+ .add(NodeAttribute.newInstance("key2", NodeAttributeType.STRING, ""));
+ attributes
+ .add(NodeAttribute.newInstance("key3", NodeAttributeType.STRING, ""));
+ nodeAttributesList.add(NodeToAttributes.newInstance("x", attributes));
+
+ // for node z
+ attributes = new ArrayList<>();
+ attributes
+ .add(NodeAttribute.newInstance("key4", NodeAttributeType.STRING, ""));
+ nodeAttributesList.add(NodeToAttributes.newInstance("z", attributes));
+
+ NodesToAttributesMappingRequest expected = NodesToAttributesMappingRequest
+ .newInstance(AttributeMappingOperationType.REMOVE, nodeAttributesList,
+ true);
+ assertTrue(nodeToAttrRequest.equals(expected));
+ }
+
+ @Test
+ public void testAdd() throws Exception {
+ // --------------------------------
+ // failure scenarios
+ // --------------------------------
+ // parenthesis not match
+ String[] args = new String[] {"-add", "x:"};
+ assertTrue("It should have failed as no node is specified",
+ 0 != runTool(args));
+ assertFailureMessageContains(
+ "Attributes cannot be null or empty for Operation [add] on the node x");
+ // --------------------------------
+ // success scenarios
+ // --------------------------------
+ args = new String[] {"-add", "x:key2=123,key3=abc z:key4(string)",
+ "-failOnUnknownNodes"};
+ assertTrue("Should not fail as attribute has been properly mapped",
+ 0 == runTool(args));
+ List nodeAttributesList = new ArrayList<>();
+ List attributes = new ArrayList<>();
+ attributes.add(
+ NodeAttribute.newInstance("key2", NodeAttributeType.STRING, "123"));
+ attributes.add(
+ NodeAttribute.newInstance("key3", NodeAttributeType.STRING, "abc"));
+ nodeAttributesList.add(NodeToAttributes.newInstance("x", attributes));
+
+ // for node z
+ attributes = new ArrayList<>();
+ attributes
+ .add(NodeAttribute.newInstance("key4", NodeAttributeType.STRING, ""));
+ nodeAttributesList.add(NodeToAttributes.newInstance("z", attributes));
+
+ NodesToAttributesMappingRequest expected = NodesToAttributesMappingRequest
+ .newInstance(AttributeMappingOperationType.ADD, nodeAttributesList,
+ true);
+ assertTrue(nodeToAttrRequest.equals(expected));
+
+ // --------------------------------
+ // with Duplicate mappings for a host
+ // --------------------------------
+ args = new String[] {"-add", "x:key2=123,key3=abc x:key4(string)",
+ "-failOnUnknownNodes"};
+ assertTrue("Should not fail as attribute has been properly mapped",
+ 0 == runTool(args));
+ nodeAttributesList = new ArrayList<>();
+ attributes = new ArrayList<>();
+ attributes
+ .add(NodeAttribute.newInstance("key4", NodeAttributeType.STRING, ""));
+ nodeAttributesList.add(NodeToAttributes.newInstance("x", attributes));
+
+ expected = NodesToAttributesMappingRequest
+ .newInstance(AttributeMappingOperationType.ADD, nodeAttributesList,
+ true);
+ assertTrue(nodeToAttrRequest.equals(expected));
+ }
+
+ @Test
+ public void testListAttributes() throws Exception {
+
+ // GetClusterNodeAttributesRequest
+ when(client
+ .getClusterNodeAttributes(any(GetClusterNodeAttributesRequest.class)))
+ .thenAnswer(new Answer() {
+ @Override
+ public GetClusterNodeAttributesResponse answer(
+ InvocationOnMock invocation) throws Throwable {
+ GetClusterNodeAttributesRequest nodeAttrReq =
+ (GetClusterNodeAttributesRequest) invocation.getArguments()[0];
+ return GetClusterNodeAttributesResponse.newInstance(ImmutableSet
+ .of(NodeAttributeInfo
+ .newInstance(NodeAttributeKey.newInstance("GPU"),
+ NodeAttributeType.STRING)));
+ }
+ });
+
+ // --------------------------------
+ // Success scenarios
+ // --------------------------------
+ String[] args = new String[] {"-list"};
+ assertTrue("It should be success since it list all attributes",
+ 0 == runTool(args));
+ assertSysOutContains("Attribute\t Type",
+ "rm.yarn.io/GPU\t STRING");
+ }
+
+ @Test
+ public void testNodeToAttributes() throws Exception {
+ // GetNodesToAttributesRequest response
+ when(client.getNodesToAttributes(any(GetNodesToAttributesRequest.class)))
+ .thenAnswer(new Answer() {
+ @Override
+ public GetNodesToAttributesResponse answer(
+ InvocationOnMock invocation) throws Throwable {
+ GetNodesToAttributesRequest nodeToAttributes =
+ (GetNodesToAttributesRequest) invocation.getArguments()[0];
+ return GetNodesToAttributesResponse.newInstance(
+ ImmutableMap.>builder()
+ .put("hostname", ImmutableSet.of(NodeAttribute
+ .newInstance("GPU", NodeAttributeType.STRING, "ARM")))
+ .build());
+ }
+ });
+ // --------------------------------
+ // Failure scenarios
+ // --------------------------------
+ String[] args = new String[] {"-nodetoattributes", "-nodes"};
+ assertTrue("It should not success since nodes are not specified",
+ 0 != runTool(args));
+ assertErrorContains(NodeAttributesCLI.INVALID_COMMAND_USAGE);
+
+ // Missing argument for nodes
+ args = new String[] {"-nodestoattributes", "-nodes"};
+ assertTrue("It should not success since nodes are not specified",
+ 0 != runTool(args));
+ assertErrorContains(NodeAttributesCLI.MISSING_ARGUMENT);
+
+ // --------------------------------
+ // Success with hostname param
+ // --------------------------------
+ args = new String[] {"-nodestoattributes", "-nodes", "hostname"};
+ assertTrue("Should return hostname to attributed list", 0 == runTool(args));
+ assertSysOutContains("hostname");
+ }
+
+ @Test
+ public void testAttributesToNodes() throws Exception {
+ // GetAttributesToNodesResponse response
+ when(client.getAttributesToNodes(any(GetAttributesToNodesRequest.class)))
+ .thenAnswer(new Answer() {
+ @Override
+ public GetAttributesToNodesResponse answer(
+ InvocationOnMock invocation) throws Throwable {
+ GetAttributesToNodesRequest attrToNodes =
+ (GetAttributesToNodesRequest) invocation.getArguments()[0];
+ return GetAttributesToNodesResponse.newInstance(
+ ImmutableMap.>builder()
+ .put(NodeAttributeKey.newInstance("GPU"), ImmutableList
+ .of(NodeToAttributeValue.newInstance("host1", "ARM")))
+ .build());
+ }
+ });
+ // --------------------------------
+ // Success scenarios
+ // --------------------------------
+ String[] args = new String[] {"-attributestonodes"};
+ assertTrue("It should be success since it list all attributes",
+ 0 == runTool(args));
+ assertSysOutContains("Hostname\tAttribute-value", "rm.yarn.io/GPU :",
+ "host1\t ARM");
+
+ // --------------------------------
+ // fail scenario argument filter missing
+ // --------------------------------
+ args = new String[] {"-attributestonodes", "-attributes"};
+ assertTrue(
+ "It should not success since attributes for filter are not specified",
+ 0 != runTool(args));
+ assertErrorContains(NodeAttributesCLI.MISSING_ARGUMENT);
+
+ // --------------------------------
+ // fail scenario argument filter missing
+ // --------------------------------
+ args = new String[] {"-attributestonodes", "-attributes", "fail/da/fail"};
+ assertTrue("It should not success since attributes format is not correct",
+ 0 != runTool(args));
+ assertErrorContains(
+ "Attribute format not correct. Should be <[prefix]/[name]> "
+ + ":fail/da/fail");
+ }
+
+ private void assertFailureMessageContains(String... messages) {
+ assertErrorContains(messages);
+ assertErrorContains(NodeAttributesCLI.USAGE_YARN_NODE_ATTRIBUTES);
+ }
+
+ private void assertErrorContains(String... messages) {
+ for (String message : messages) {
+ if (!errOutput.contains(message)) {
+ fail(
+ "Expected output to contain '" + message + "' but err_output was:\n"
+ + errOutput);
+ }
+ }
+ }
+
+ private void assertSysOutContains(String... messages) {
+ for (String message : messages) {
+ if (!sysOutput.contains(message)) {
+ fail(
+ "Expected output to contain '" + message + "' but sys_output was:\n"
+ + sysOutput);
+ }
+ }
+ }
+
+ private int runTool(String... args) throws Exception {
+ errOutBytes.reset();
+ sysOutBytes.reset();
+ LOG.info("Running: NodeAttributesCLI " + Joiner.on(" ").join(args));
+ int ret = nodeAttributesCLI.run(args);
+ errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
+ sysOutput = new String(sysOutBytes.toByteArray(), Charsets.UTF_8);
+ LOG.info("Err_output:\n" + errOutput);
+ LOG.info("Sys_output:\n" + sysOutput);
+ return ret;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index 20c96032919..a600895eefd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.yarn.client.cli;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeType;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
@@ -1544,8 +1546,8 @@ public void testListClusterNodes() throws Exception {
public void testNodeStatus() throws Exception {
NodeId nodeId = NodeId.newInstance("host0", 0);
NodeCLI cli = new NodeCLI();
- when(client.getNodeReports()).thenReturn(
- getNodeReports(3, NodeState.RUNNING, false));
+ when(client.getNodeReports())
+ .thenReturn(getNodeReports(3, NodeState.RUNNING, false, false, false));
cli.setClient(client);
cli.setSysOutPrintStream(sysOut);
cli.setSysErrPrintStream(sysErr);
@@ -1568,6 +1570,8 @@ public void testNodeStatus() throws Exception {
pw.println("\tCPU-Used : 0 vcores");
pw.println("\tCPU-Capacity : 0 vcores");
pw.println("\tNode-Labels : a,b,c,x,y,z");
+ pw.println("\tNode Attributes : rm.yarn.io/GPU(STRING)=ARM");
+ pw.println("\t rm.yarn.io/CPU(STRING)=ARM");
pw.println("\tResource Utilization by Node : PMem:2048 MB, VMem:4096 MB, VCores:8.0");
pw.println("\tResource Utilization by Containers : PMem:1024 MB, VMem:2048 MB, VCores:4.0");
pw.close();
@@ -1604,6 +1608,7 @@ public void testNodeStatusWithEmptyNodeLabels() throws Exception {
pw.println("\tCPU-Used : 0 vcores");
pw.println("\tCPU-Capacity : 0 vcores");
pw.println("\tNode-Labels : ");
+ pw.println("\tNode Attributes : ");
pw.println("\tResource Utilization by Node : PMem:2048 MB, VMem:4096 MB, VCores:8.0");
pw.println("\tResource Utilization by Containers : PMem:1024 MB, VMem:2048 MB, VCores:4.0");
pw.close();
@@ -1616,8 +1621,8 @@ public void testNodeStatusWithEmptyNodeLabels() throws Exception {
public void testNodeStatusWithEmptyResourceUtilization() throws Exception {
NodeId nodeId = NodeId.newInstance("host0", 0);
NodeCLI cli = new NodeCLI();
- when(client.getNodeReports()).thenReturn(
- getNodeReports(3, NodeState.RUNNING, false, true));
+ when(client.getNodeReports())
+ .thenReturn(getNodeReports(3, NodeState.RUNNING, false, true, true));
cli.setClient(client);
cli.setSysOutPrintStream(sysOut);
cli.setSysErrPrintStream(sysErr);
@@ -1640,6 +1645,7 @@ public void testNodeStatusWithEmptyResourceUtilization() throws Exception {
pw.println("\tCPU-Used : 0 vcores");
pw.println("\tCPU-Capacity : 0 vcores");
pw.println("\tNode-Labels : a,b,c,x,y,z");
+ pw.println("\tNode Attributes : ");
pw.println("\tResource Utilization by Node : ");
pw.println("\tResource Utilization by Containers : ");
pw.close();
@@ -2049,18 +2055,20 @@ private void verifyUsageInfo(YarnCLI cli) throws Exception {
cli.run(new String[] { "application" });
verify(sysErr).println("Invalid Command Usage : ");
}
-
+
private List getNodeReports(int noOfNodes, NodeState state) {
- return getNodeReports(noOfNodes, state, true, false);
+ return getNodeReports(noOfNodes, state, true, false, true);
}
private List getNodeReports(int noOfNodes, NodeState state,
- boolean emptyNodeLabel) {
- return getNodeReports(noOfNodes, state, emptyNodeLabel, false);
+ boolean emptyNodeLabel, boolean emptyAttributes) {
+ return getNodeReports(noOfNodes, state, emptyNodeLabel, false,
+ emptyAttributes);
}
private List getNodeReports(int noOfNodes, NodeState state,
- boolean emptyNodeLabel, boolean emptyResourceUtilization) {
+ boolean emptyNodeLabel, boolean emptyResourceUtilization,
+ boolean emptyAttributes) {
List nodeReports = new ArrayList();
for (int i = 0; i < noOfNodes; i++) {
@@ -2082,6 +2090,11 @@ private void verifyUsageInfo(YarnCLI cli) throws Exception {
nodeReport.setAggregatedContainersUtilization(containersUtilization);
nodeReport.setNodeUtilization(nodeUtilization);
}
+ if (!emptyAttributes) {
+ nodeReport.setNodeAttributes(ImmutableSet.of(NodeAttribute
+ .newInstance("GPU", NodeAttributeType.STRING, "ARM"),
+ NodeAttribute.newInstance("CPU", NodeAttributeType.STRING, "ARM")));
+ }
nodeReports.add(nodeReport);
}
return nodeReports;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java
index fd5096a7b37..1bebbe28f1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java
@@ -43,8 +43,12 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
@@ -61,6 +65,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
@@ -107,8 +113,12 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAttributesToNodesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAttributesToNodesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeAttributesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeAttributesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl;
@@ -125,6 +135,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewReservationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewReservationResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToAttributesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToAttributesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl;
@@ -673,4 +685,47 @@ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
return null;
}
}
+
+ @Override
+ public GetAttributesToNodesResponse getAttributesToNodes(
+ GetAttributesToNodesRequest request) throws YarnException, IOException {
+ YarnServiceProtos.GetAttributesToNodesRequestProto requestProto =
+ ((GetAttributesToNodesRequestPBImpl) request).getProto();
+ try {
+ return new GetAttributesToNodesResponsePBImpl(
+ proxy.getAttributesToNodes(null, requestProto));
+ } catch (ServiceException e) {
+ RPCUtil.unwrapAndThrowException(e);
+ return null;
+ }
+ }
+
+ @Override
+ public GetClusterNodeAttributesResponse getClusterNodeAttributes(
+ GetClusterNodeAttributesRequest request)
+ throws YarnException, IOException {
+ YarnServiceProtos.GetClusterNodeAttributesRequestProto requestProto =
+ ((GetClusterNodeAttributesRequestPBImpl) request).getProto();
+ try {
+ return new GetClusterNodeAttributesResponsePBImpl(
+ proxy.getClusterNodeAttributes(null, requestProto));
+ } catch (ServiceException e) {
+ RPCUtil.unwrapAndThrowException(e);
+ return null;
+ }
+ }
+
+ @Override
+ public GetNodesToAttributesResponse getNodesToAttributes(
+ GetNodesToAttributesRequest request) throws YarnException, IOException {
+ YarnServiceProtos.GetNodesToAttributesRequestProto requestProto =
+ ((GetNodesToAttributesRequestPBImpl) request).getProto();
+ try {
+ return new GetNodesToAttributesResponsePBImpl(
+ proxy.getNodesToAttributes(null, requestProto));
+ } catch (ServiceException e) {
+ RPCUtil.unwrapAndThrowException(e);
+ return null;
+ }
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java
index 423287e9105..2c296cd866b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java
@@ -35,7 +35,10 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
@@ -44,6 +47,7 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
@@ -73,8 +77,12 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAttributesToNodesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAttributesToNodesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeAttributesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeAttributesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl;
@@ -91,6 +99,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewReservationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewReservationResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToAttributesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToAttributesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl;
@@ -184,6 +194,9 @@
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceProfilesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodeAttributesResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAttributesToNodesResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToAttributesResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@@ -694,4 +707,56 @@ public GetAllResourceTypeInfoResponseProto getResourceTypeInfo(
throw new ServiceException(ie);
}
}
+
+ @Override
+ public GetClusterNodeAttributesResponseProto getClusterNodeAttributes(
+ RpcController controller,
+ YarnServiceProtos.GetClusterNodeAttributesRequestProto proto)
+ throws ServiceException {
+ GetClusterNodeAttributesRequest req =
+ new GetClusterNodeAttributesRequestPBImpl(proto);
+ try {
+ GetClusterNodeAttributesResponse resp =
+ real.getClusterNodeAttributes(req);
+ return ((GetClusterNodeAttributesResponsePBImpl) resp).getProto();
+ } catch (YarnException ye) {
+ throw new ServiceException(ye);
+ } catch (IOException ie) {
+ throw new ServiceException(ie);
+ }
+ }
+
+ @Override
+ public GetAttributesToNodesResponseProto getAttributesToNodes(
+ RpcController controller,
+ YarnServiceProtos.GetAttributesToNodesRequestProto proto)
+ throws ServiceException {
+ GetAttributesToNodesRequestPBImpl req =
+ new GetAttributesToNodesRequestPBImpl(proto);
+ try {
+ GetAttributesToNodesResponse resp = real.getAttributesToNodes(req);
+ return ((GetAttributesToNodesResponsePBImpl) resp).getProto();
+ } catch (YarnException ye) {
+ throw new ServiceException(ye);
+ } catch (IOException ie) {
+ throw new ServiceException(ie);
+ }
+ }
+
+ @Override
+ public GetNodesToAttributesResponseProto getNodesToAttributes(
+ RpcController controller,
+ YarnServiceProtos.GetNodesToAttributesRequestProto proto)
+ throws ServiceException {
+ GetNodesToAttributesRequestPBImpl req =
+ new GetNodesToAttributesRequestPBImpl(proto);
+ try {
+ GetNodesToAttributesResponse resp = real.getNodesToAttributes(req);
+ return ((GetNodesToAttributesResponsePBImpl) resp).getProto();
+ } catch (YarnException ye) {
+ throw new ServiceException(ye);
+ } catch (IOException ie) {
+ throw new ServiceException(ie);
+ }
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
index 926b6fa2793..447905e2b3b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintFromProtoConverter.java
@@ -26,6 +26,7 @@
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
@@ -37,6 +38,7 @@
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeOpCodeProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
@@ -73,7 +75,8 @@ private SingleConstraint convert(SimplePlacementConstraintProto proto) {
}
return new SingleConstraint(proto.getScope(), proto.getMinCardinality(),
- proto.getMaxCardinality(), targets);
+ proto.getMaxCardinality(),
+ convertFromProtoFormat(proto.getAttributeOpCode()), targets);
}
private TargetExpression convert(PlacementConstraintTargetProto proto) {
@@ -113,4 +116,9 @@ private TimedPlacementConstraint convert(
return new TimedPlacementConstraint(pConstraint, proto.getSchedulingDelay(),
ProtoUtils.convertFromProtoFormat(proto.getDelayUnit()));
}
+
+ private static NodeAttributeOpCode convertFromProtoFormat(
+ NodeAttributeOpCodeProto p) {
+ return NodeAttributeOpCode.valueOf(p.name());
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
index 7816e181dd2..30f774136dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/pb/PlacementConstraintToProtoConverter.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.api.pb;
import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.NodeAttributeOpCode;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
import org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
@@ -34,6 +35,7 @@
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto;
import org.apache.hadoop.yarn.proto.YarnProtos.CompositePlacementConstraintProto.CompositeType;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeOpCodeProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PlacementConstraintTargetProto;
import org.apache.hadoop.yarn.proto.YarnProtos.SimplePlacementConstraintProto;
@@ -72,6 +74,10 @@ public GeneratedMessage visit(SingleConstraint constraint) {
}
sb.setMinCardinality(constraint.getMinCardinality());
sb.setMaxCardinality(constraint.getMaxCardinality());
+ if (constraint.getNodeAttributeOpCode() != null) {
+ sb.setAttributeOpCode(
+ convertToProtoFormat(constraint.getNodeAttributeOpCode()));
+ }
if (constraint.getTargetExpressions() != null) {
for (TargetExpression target : constraint.getTargetExpressions()) {
sb.addTargetExpressions(
@@ -171,4 +177,9 @@ public GeneratedMessage visit(TimedPlacementConstraint constraint) {
return tb.build();
}
+
+ private static NodeAttributeOpCodeProto convertToProtoFormat(
+ NodeAttributeOpCode p) {
+ return NodeAttributeOpCodeProto.valueOf(p.name());
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAttributesToNodesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAttributesToNodesRequestPBImpl.java
new file mode 100644
index 00000000000..15a360cf631
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAttributesToNodesRequestPBImpl.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeAttributeKeyPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeKeyProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAttributesToNodesRequestProto;
+
+import com.google.protobuf.TextFormat;
+
+/**
+ * Attributes to nodes mapping request.
+ */
+@Private
+@Unstable
+public class GetAttributesToNodesRequestPBImpl
+ extends GetAttributesToNodesRequest {
+
+ private Set nodeAttributes = null;
+
+ private GetAttributesToNodesRequestProto proto =
+ GetAttributesToNodesRequestProto.getDefaultInstance();
+ private GetAttributesToNodesRequestProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ public GetAttributesToNodesRequestPBImpl() {
+ builder = GetAttributesToNodesRequestProto.newBuilder();
+ }
+
+ public GetAttributesToNodesRequestPBImpl(
+ GetAttributesToNodesRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetAttributesToNodesRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto) {
+ maybeInitBuilder();
+ }
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.nodeAttributes != null) {
+ addLocalAttributesToProto();
+ }
+ }
+
+ private void addLocalAttributesToProto() {
+ maybeInitBuilder();
+ builder.clearNodeAttributes();
+ if (nodeAttributes == null) {
+ return;
+ }
+ Iterable iterable =
+ () -> new Iterator() {
+ private Iterator iter = nodeAttributes.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public NodeAttributeKeyProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ builder.addAllNodeAttributes(iterable);
+ }
+
+ private NodeAttributeKeyPBImpl convertFromProtoFormat(
+ NodeAttributeKeyProto p) {
+ return new NodeAttributeKeyPBImpl(p);
+ }
+
+ private NodeAttributeKeyProto convertToProtoFormat(NodeAttributeKey t) {
+ return ((NodeAttributeKeyPBImpl) t).getProto();
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetAttributesToNodesRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ private void initNodeAttributes() {
+ if (this.nodeAttributes != null) {
+ return;
+ }
+ YarnServiceProtos.GetAttributesToNodesRequestProtoOrBuilder p =
+ viaProto ? proto : builder;
+ List nodeAttributesList = p.getNodeAttributesList();
+ this.nodeAttributes = new HashSet<>();
+ nodeAttributesList
+ .forEach((v) -> nodeAttributes.add(convertFromProtoFormat(v)));
+ }
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null) {
+ return false;
+ }
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return TextFormat.shortDebugString(getProto());
+ }
+
+ @Override
+ public void setNodeAttributes(Set attributes) {
+ maybeInitBuilder();
+ if (nodeAttributes == null) {
+ builder.clearNodeAttributes();
+ }
+ this.nodeAttributes = attributes;
+ }
+
+ @Override
+ public Set getNodeAttributes() {
+ initNodeAttributes();
+ return this.nodeAttributes;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAttributesToNodesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAttributesToNodesResponsePBImpl.java
new file mode 100644
index 00000000000..0d4c6e0d812
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAttributesToNodesResponsePBImpl.java
@@ -0,0 +1,207 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeToAttributeValue;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeAttributeKeyPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeToAttributeValuePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.AttributeToNodesProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeKeyProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeToAttributeValueProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAttributesToNodesResponseProto;
+
+/**
+ * Attributes to nodes response.
+ */
+@Private
+@Unstable
+public class GetAttributesToNodesResponsePBImpl
+ extends GetAttributesToNodesResponse {
+
+ private GetAttributesToNodesResponseProto proto =
+ GetAttributesToNodesResponseProto.getDefaultInstance();
+ private GetAttributesToNodesResponseProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ private Map> attributesToNodes;
+
+ public GetAttributesToNodesResponsePBImpl() {
+ this.builder = GetAttributesToNodesResponseProto.newBuilder();
+ }
+
+ public GetAttributesToNodesResponsePBImpl(
+ GetAttributesToNodesResponseProto proto) {
+ this.proto = proto;
+ this.viaProto = true;
+ }
+
+ private void initAttributesToNodes() {
+ if (this.attributesToNodes != null) {
+ return;
+ }
+ YarnServiceProtos.GetAttributesToNodesResponseProtoOrBuilder p =
+ viaProto ? proto : builder;
+ List list = p.getAttributesToNodesList();
+ this.attributesToNodes = new HashMap<>();
+
+ for (AttributeToNodesProto c : list) {
+ List nodeValueMapList =
+ c.getNodeValueMapList();
+ List nodeToAttributeValue = new ArrayList<>();
+ for (NodeToAttributeValueProto valueProto : nodeValueMapList) {
+ nodeToAttributeValue.add(convertFromProtoFormat(valueProto));
+ }
+ if (!nodeToAttributeValue.isEmpty()) {
+ this.attributesToNodes.put(convertFromProtoFormat(c.getNodeAttribute()),
+ nodeToAttributeValue);
+ }
+ }
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetAttributesToNodesResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ private void addAttributesToNodesToProto() {
+ maybeInitBuilder();
+ builder.clearAttributesToNodes();
+ if (attributesToNodes == null) {
+ return;
+ }
+ Iterable iterable =
+ () -> new Iterator() {
+
+ private Iterator>> iter = attributesToNodes.entrySet()
+ .iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public AttributeToNodesProto next() {
+ Map.Entry> attrToNodes
+ = iter.next();
+
+ AttributeToNodesProto.Builder attrToNodesBuilder =
+ AttributeToNodesProto.newBuilder().setNodeAttribute(
+ convertToProtoFormat(attrToNodes.getKey()));
+ for (NodeToAttributeValue hostToAttrVal : attrToNodes.getValue()) {
+ attrToNodesBuilder
+ .addNodeValueMap(convertToProtoFormat(hostToAttrVal));
+ }
+
+ return attrToNodesBuilder.build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+ };
+ builder.addAllAttributesToNodes(iterable);
+ }
+
+ private NodeAttributeKey convertFromProtoFormat(NodeAttributeKeyProto p) {
+ return new NodeAttributeKeyPBImpl(p);
+ }
+
+ private NodeAttributeKeyProto convertToProtoFormat(NodeAttributeKey t) {
+ return ((NodeAttributeKeyPBImpl) t).getProto();
+ }
+
+ private NodeToAttributeValue convertFromProtoFormat(
+ NodeToAttributeValueProto p) {
+ return new NodeToAttributeValuePBImpl(p);
+ }
+
+ private NodeToAttributeValueProto convertToProtoFormat(
+ NodeToAttributeValue t) {
+ return ((NodeToAttributeValuePBImpl) t).getProto();
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.attributesToNodes != null) {
+ addAttributesToNodesToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto) {
+ maybeInitBuilder();
+ }
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ public GetAttributesToNodesResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ @Override
+ public int hashCode() {
+ assert false : "hashCode not designed";
+ return 0;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null) {
+ return false;
+ }
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public void setAttributeToNodes(
+ Map> map) {
+ initAttributesToNodes();
+ attributesToNodes.clear();
+ attributesToNodes.putAll(map);
+ }
+
+ @Override
+ public Map> getAttributesToNodes() {
+ initAttributesToNodes();
+ return this.attributesToNodes;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodeAttributesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodeAttributesRequestPBImpl.java
new file mode 100644
index 00000000000..bf5ab4084e4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodeAttributesRequestPBImpl.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import com.google.protobuf.TextFormat;
+import static org.apache.hadoop.classification.InterfaceAudience.*;
+import static org.apache.hadoop.classification.InterfaceStability.*;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodeAttributesRequestProto;
+
+/**
+ * Request to get cluster node attributes.
+ */
+@Private
+@Unstable
+public class GetClusterNodeAttributesRequestPBImpl
+ extends GetClusterNodeAttributesRequest {
+
+ private GetClusterNodeAttributesRequestProto proto =
+ GetClusterNodeAttributesRequestProto.getDefaultInstance();
+ private GetClusterNodeAttributesRequestProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ public GetClusterNodeAttributesRequestPBImpl() {
+ builder = GetClusterNodeAttributesRequestProto.newBuilder();
+ }
+
+ public GetClusterNodeAttributesRequestPBImpl(
+ GetClusterNodeAttributesRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetClusterNodeAttributesRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null) {
+ return false;
+ }
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return TextFormat.shortDebugString(getProto());
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodeAttributesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodeAttributesResponsePBImpl.java
new file mode 100644
index 00000000000..41cd808d6aa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodeAttributesResponsePBImpl.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeAttributeInfoPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeInfoProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodeAttributesResponseProto;
+
+/**
+ * Cluster node attributes response.
+ */
+@Private
+@Unstable
+public class GetClusterNodeAttributesResponsePBImpl
+ extends GetClusterNodeAttributesResponse {
+
+ private GetClusterNodeAttributesResponseProto proto =
+ GetClusterNodeAttributesResponseProto.getDefaultInstance();
+ private GetClusterNodeAttributesResponseProto.Builder builder = null;
+ private Set clusterNodeAttributes;
+ private boolean viaProto = false;
+
+ public GetClusterNodeAttributesResponsePBImpl() {
+ builder = GetClusterNodeAttributesResponseProto.newBuilder();
+ }
+
+ public GetClusterNodeAttributesResponsePBImpl(
+ GetClusterNodeAttributesResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public synchronized GetClusterNodeAttributesResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto) {
+ maybeInitBuilder();
+ }
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.clusterNodeAttributes != null) {
+ addNodeAttributesToProto();
+ }
+ }
+
+ private void addNodeAttributesToProto() {
+ maybeInitBuilder();
+ builder.clearNodeAttributes();
+ if (clusterNodeAttributes == null || clusterNodeAttributes.isEmpty()) {
+ return;
+ }
+
+ builder.addAllNodeAttributes(clusterNodeAttributes.stream()
+ .map(s -> convertToProtoFormat(s)).collect(Collectors.toSet()));
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null) {
+ return false;
+ }
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ assert false : "hashCode not designed";
+ return 0;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetClusterNodeAttributesResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public synchronized void setNodeAttributes(
+ Set attributes) {
+ maybeInitBuilder();
+ this.clusterNodeAttributes = new HashSet<>();
+ if (attributes == null) {
+ builder.clearNodeAttributes();
+ return;
+ }
+ this.clusterNodeAttributes.addAll(attributes);
+ }
+
+ @Override
+ public synchronized Set getNodeAttributes() {
+ if (this.clusterNodeAttributes != null) {
+ return this.clusterNodeAttributes;
+ }
+ initLocalNodeAttributes();
+ return this.clusterNodeAttributes;
+ }
+
+ private void initLocalNodeAttributes() {
+ YarnServiceProtos.GetClusterNodeAttributesResponseProtoOrBuilder p =
+ viaProto ? proto : builder;
+ List attributesProtoList =
+ p.getNodeAttributesList();
+ this.clusterNodeAttributes = new HashSet<>();
+ clusterNodeAttributes.addAll(attributesProtoList.stream()
+ .map(attr -> convertFromProtoFormat(attr)).collect(Collectors.toSet()));
+ }
+
+ private NodeAttributeInfoProto convertToProtoFormat(
+ NodeAttributeInfo attributeInfo) {
+ return ((NodeAttributeInfoPBImpl)attributeInfo).getProto();
+ }
+
+ private NodeAttributeInfo convertFromProtoFormat(
+ NodeAttributeInfoProto nodeAttributeInfoProto) {
+ return new NodeAttributeInfoPBImpl(nodeAttributeInfoProto);
+ }
+
+ @Override
+ public String toString() {
+ return getProto().toString();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToAttributesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToAttributesRequestPBImpl.java
new file mode 100644
index 00000000000..0d9b722b6bc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToAttributesRequestPBImpl.java
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import com.google.protobuf.TextFormat;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNodesToAttributesRequestProto;
+
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Request to get hostname to attributes mapping.
+ */
+public class GetNodesToAttributesRequestPBImpl
+ extends GetNodesToAttributesRequest {
+
+ private GetNodesToAttributesRequestProto proto =
+ GetNodesToAttributesRequestProto.getDefaultInstance();
+ private GetNodesToAttributesRequestProto.Builder builder = null;
+
+ private Set hostNames = null;
+ private boolean viaProto = false;
+
+ public GetNodesToAttributesRequestPBImpl() {
+ builder = GetNodesToAttributesRequestProto.newBuilder();
+ }
+
+ public GetNodesToAttributesRequestPBImpl(
+ GetNodesToAttributesRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetNodesToAttributesRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto) {
+ maybeInitBuilder();
+ }
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (hostNames != null && !hostNames.isEmpty()) {
+ builder.clearHostnames();
+ builder.addAllHostnames(hostNames);
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null) {
+ return false;
+ }
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return TextFormat.shortDebugString(getProto());
+ }
+
+ @Override
+ public void setHostNames(Set hostnames) {
+ maybeInitBuilder();
+ if (hostNames == null) {
+ builder.clearHostnames();
+ }
+ this.hostNames = hostnames;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder =
+ YarnServiceProtos.GetNodesToAttributesRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public Set getHostNames() {
+ initNodeToAttributes();
+ return this.hostNames;
+ }
+
+ private void initNodeToAttributes() {
+ if (this.hostNames != null) {
+ return;
+ }
+ YarnServiceProtos.GetNodesToAttributesRequestProtoOrBuilder p =
+ viaProto ? proto : builder;
+ List hostNamesList = p.getHostnamesList();
+ this.hostNames = new HashSet<>();
+ this.hostNames.addAll(hostNamesList);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToAttributesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToAttributesResponsePBImpl.java
new file mode 100644
index 00000000000..1114d142062
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNodesToAttributesResponsePBImpl.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeAttributePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Nodes to attributes request response.
+ */
+public class GetNodesToAttributesResponsePBImpl
+ extends GetNodesToAttributesResponse {
+
+ private YarnServiceProtos.GetNodesToAttributesResponseProto proto =
+ YarnServiceProtos.GetNodesToAttributesResponseProto.getDefaultInstance();
+ private YarnServiceProtos.GetNodesToAttributesResponseProto.Builder builder =
+ null;
+ private boolean viaProto = false;
+
+ private Map> nodesToAttributes;
+
+ public GetNodesToAttributesResponsePBImpl() {
+ this.builder =
+ YarnServiceProtos.GetNodesToAttributesResponseProto.newBuilder();
+ }
+
+ public GetNodesToAttributesResponsePBImpl(
+ YarnServiceProtos.GetNodesToAttributesResponseProto proto) {
+ this.proto = proto;
+ this.viaProto = true;
+ }
+
+ private void initNodesToAttributes() {
+ if (this.nodesToAttributes != null) {
+ return;
+ }
+ YarnServiceProtos.GetNodesToAttributesResponseProtoOrBuilder p =
+ viaProto ? proto : builder;
+ List list = p.getNodesToAttributesList();
+ this.nodesToAttributes = new HashMap<>();
+ for (YarnProtos.NodeToAttributesProto c : list) {
+ HashSet attributes = new HashSet<>();
+ for (YarnProtos.NodeAttributeProto nodeAttrProto : c
+ .getNodeAttributesList()) {
+ attributes.add(new NodeAttributePBImpl(nodeAttrProto));
+ }
+ nodesToAttributes.put(c.getNode(), attributes);
+ }
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder =
+ YarnServiceProtos.GetNodesToAttributesResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ private void addNodesToAttributesToProto() {
+ maybeInitBuilder();
+ builder.clearNodesToAttributes();
+ if (nodesToAttributes == null) {
+ return;
+ }
+ Iterable iterable =
+ () -> new Iterator() {
+
+ private Iterator>> iter =
+ nodesToAttributes.entrySet().iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public YarnProtos.NodeToAttributesProto next() {
+ Map.Entry> now = iter.next();
+ Set protoSet = new HashSet<>();
+ for (NodeAttribute nodeAttribute : now.getValue()) {
+ protoSet.add(convertToProtoFormat(nodeAttribute));
+ }
+ return YarnProtos.NodeToAttributesProto.newBuilder()
+ .setNode(now.getKey()).addAllNodeAttributes(protoSet).build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+ };
+ builder.addAllNodesToAttributes(iterable);
+ }
+
+ private NodeAttributePBImpl convertFromProtoFormat(
+ YarnProtos.NodeAttributeProto p) {
+ return new NodeAttributePBImpl(p);
+ }
+
+ private YarnProtos.NodeAttributeProto convertToProtoFormat(NodeAttribute t) {
+ return ((NodeAttributePBImpl) t).getProto();
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.nodesToAttributes != null) {
+ addNodesToAttributesToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto) {
+ maybeInitBuilder();
+ }
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ public YarnServiceProtos.GetNodesToAttributesResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ @Override
+ public int hashCode() {
+ assert false : "hashCode not designed";
+ return 0;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null) {
+ return false;
+ }
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public void setNodeToAttributes(Map> map) {
+ initNodesToAttributes();
+ nodesToAttributes.clear();
+ nodesToAttributes.putAll(map);
+ }
+
+ @Override
+ public Map> getNodeToAttributes() {
+ initNodesToAttributes();
+ return nodesToAttributes;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributeInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributeInfoPBImpl.java
new file mode 100644
index 00000000000..e2db5686614
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributeInfoPBImpl.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
+import org.apache.hadoop.yarn.api.records.NodeAttributeType;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeKeyProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeInfoProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeInfoProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeTypeProto;
+
+/**
+ * Implementation for NodeAttributeInfo.
+ *
+ */
+public class NodeAttributeInfoPBImpl extends NodeAttributeInfo {
+ private NodeAttributeInfoProto proto =
+ NodeAttributeInfoProto.getDefaultInstance();
+ private NodeAttributeInfoProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ public NodeAttributeInfoPBImpl() {
+ builder = NodeAttributeInfoProto.newBuilder();
+ }
+
+ public NodeAttributeInfoPBImpl(NodeAttributeInfoProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public NodeAttributeInfoProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = NodeAttributeInfoProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public NodeAttributeKey getAttributeKey() {
+ NodeAttributeInfoProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasAttributeKey()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getAttributeKey());
+ }
+
+ @Override
+ public void setAttributeKey(NodeAttributeKey attributeKey) {
+ maybeInitBuilder();
+ if (attributeKey == null) {
+ builder.clearAttributeKey();
+ return;
+ }
+ builder.setAttributeKey(convertToProtoFormat(attributeKey));
+ }
+
+ @Override
+ public NodeAttributeType getAttributeType() {
+ NodeAttributeInfoProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasAttributeType()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getAttributeType());
+ }
+
+ @Override
+ public void setAttributeType(NodeAttributeType attributeType) {
+ maybeInitBuilder();
+ if (attributeType == null) {
+ builder.clearAttributeType();
+ return;
+ }
+ builder.setAttributeType(convertToProtoFormat(attributeType));
+ }
+
+ private NodeAttributeTypeProto convertToProtoFormat(
+ NodeAttributeType attributeType) {
+ return NodeAttributeTypeProto.valueOf(attributeType.name());
+ }
+
+ private NodeAttributeType convertFromProtoFormat(
+ NodeAttributeTypeProto containerState) {
+ return NodeAttributeType.valueOf(containerState.name());
+ }
+
+ private NodeAttributeKeyPBImpl convertFromProtoFormat(
+ NodeAttributeKeyProto attributeKeyProto) {
+ return new NodeAttributeKeyPBImpl(attributeKeyProto);
+ }
+
+ private NodeAttributeKeyProto convertToProtoFormat(
+ NodeAttributeKey attributeKey) {
+ return ((NodeAttributeKeyPBImpl) attributeKey).getProto();
+ }
+
+ @Override
+ public int hashCode() {
+ return getAttributeKey().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (obj instanceof NodeAttributeInfo) {
+ NodeAttributeInfo other = (NodeAttributeInfo) obj;
+ return getAttributeKey().equals(other.getAttributeKey());
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder strBuilder = new StringBuilder();
+ NodeAttributeKey key = this.getAttributeKey();
+ strBuilder.append(key.getAttributePrefix()).append("/")
+ .append(key.getAttributeName()).append("(")
+ .append(this.getAttributeType()).append(")");
+ return strBuilder.toString();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributeKeyPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributeKeyPBImpl.java
new file mode 100644
index 00000000000..921e767c006
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributeKeyPBImpl.java
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeKeyProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeKeyProtoOrBuilder;
+
+/**
+ * Implementation for NodeAttributeKey.
+ *
+ */
+@Private
+@Unstable
+public class NodeAttributeKeyPBImpl extends NodeAttributeKey {
+ private NodeAttributeKeyProto proto =
+ NodeAttributeKeyProto.getDefaultInstance();
+ private NodeAttributeKeyProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ public NodeAttributeKeyPBImpl() {
+ builder = NodeAttributeKeyProto.newBuilder();
+ }
+
+ public NodeAttributeKeyPBImpl(NodeAttributeKeyProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public NodeAttributeKeyProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = NodeAttributeKeyProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public String getAttributePrefix() {
+ NodeAttributeKeyProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getAttributePrefix();
+ }
+
+ @Override
+ public void setAttributePrefix(String attributePrefix) {
+ maybeInitBuilder();
+ if (attributePrefix == null) {
+ builder.clearAttributePrefix();
+ return;
+ }
+ builder.setAttributePrefix(attributePrefix);
+ }
+
+ @Override
+ public String getAttributeName() {
+ NodeAttributeKeyProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasAttributeName()) {
+ return null;
+ }
+ return p.getAttributeName();
+ }
+
+ @Override
+ public void setAttributeName(String attributeName) {
+ maybeInitBuilder();
+ if (attributeName == null) {
+ builder.clearAttributeName();
+ return;
+ }
+ builder.setAttributeName(attributeName);
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((getAttributePrefix() == null) ? 0
+ : getAttributePrefix().hashCode());
+ result = prime * result
+ + ((getAttributeName() == null) ? 0 : getAttributeName().hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (obj instanceof NodeAttributeKey) {
+ NodeAttributeKey other = (NodeAttributeKey) obj;
+ if (!compare(getAttributePrefix(), other.getAttributePrefix())) {
+ return false;
+ }
+ if (!compare(getAttributeName(), other.getAttributeName())) {
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ private static boolean compare(Object left, Object right) {
+ if (left == null) {
+ return right == null;
+ } else {
+ return left.equals(right);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "Prefix-" + getAttributePrefix() + " :Name-" + getAttributeName();
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributePBImpl.java
new file mode 100644
index 00000000000..689e2e6b1d2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeAttributePBImpl.java
@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeAttributeType;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeKeyProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeTypeProto;
+
+/**
+ * Implementation for NodeAttribute.
+ */
+@Private
+@Unstable
+public class NodeAttributePBImpl extends NodeAttribute {
+ private NodeAttributeProto proto = NodeAttributeProto.getDefaultInstance();
+ private NodeAttributeProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ public NodeAttributePBImpl() {
+ builder = NodeAttributeProto.newBuilder();
+ }
+
+ public NodeAttributePBImpl(NodeAttributeProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public NodeAttributeProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = NodeAttributeProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public NodeAttributeKey getAttributeKey() {
+ NodeAttributeProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasAttributeKey()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getAttributeKey());
+ }
+
+ @Override
+ public void setAttributeKey(NodeAttributeKey attributeKey) {
+ maybeInitBuilder();
+ if(attributeKey == null) {
+ builder.clearAttributeKey();
+ return;
+ }
+ builder.setAttributeKey(convertToProtoFormat(attributeKey));
+ }
+
+ @Override
+ public String getAttributeValue() {
+ NodeAttributeProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasAttributeValue()) {
+ return null;
+ }
+ return p.getAttributeValue();
+ }
+
+ @Override
+ public void setAttributeValue(String attributeValue) {
+ maybeInitBuilder();
+ if(attributeValue == null) {
+ builder.clearAttributeValue();
+ return;
+ }
+ builder.setAttributeValue(attributeValue);
+ }
+
+ @Override
+ public NodeAttributeType getAttributeType() {
+ NodeAttributeProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasAttributeType()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getAttributeType());
+ }
+
+ @Override
+ public void setAttributeType(NodeAttributeType attributeType) {
+ maybeInitBuilder();
+ if (attributeType == null) {
+ builder.clearAttributeType();
+ return;
+ }
+ builder.setAttributeType(convertToProtoFormat(attributeType));
+ }
+
+ private NodeAttributeTypeProto convertToProtoFormat(
+ NodeAttributeType attributeType) {
+ return NodeAttributeTypeProto.valueOf(attributeType.name());
+ }
+
+ private NodeAttributeType convertFromProtoFormat(
+ NodeAttributeTypeProto containerState) {
+ return NodeAttributeType.valueOf(containerState.name());
+ }
+
+ private NodeAttributeKeyPBImpl convertFromProtoFormat(
+ NodeAttributeKeyProto attributeKeyProto) {
+ return new NodeAttributeKeyPBImpl(attributeKeyProto);
+ }
+
+ private NodeAttributeKeyProto convertToProtoFormat(
+ NodeAttributeKey attributeKey) {
+ return ((NodeAttributeKeyPBImpl)attributeKey).getProto();
+ }
+
+ @Override
+ public int hashCode() {
+ return getAttributeKey().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (obj instanceof NodeAttribute) {
+ NodeAttribute other = (NodeAttribute) obj;
+ return getAttributeKey().equals(other.getAttributeKey());
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder strBuilder = new StringBuilder();
+ NodeAttributeKey key = this.getAttributeKey();
+ strBuilder.append(key.getAttributePrefix()).append("/")
+ .append(key.getAttributeName()).append("(")
+ .append(this.getAttributeType()).append(")=")
+ .append(this.getAttributeValue());
+ return strBuilder.toString();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java
index ced588d30e0..7d5a06a6b4f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java
@@ -18,17 +18,21 @@
package org.apache.hadoop.yarn.api.records.impl.pb;
+import java.util.ArrayList;
import java.util.HashSet;
+import java.util.List;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.NodeUpdateType;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProtoOrBuilder;
@@ -50,6 +54,7 @@
private ResourceUtilization containersUtilization = null;
private ResourceUtilization nodeUtilization = null;
Set labels;
+ private Set nodeAttributes;
public NodeReportPBImpl() {
builder = NodeReportProto.newBuilder();
@@ -268,6 +273,14 @@ private void mergeLocalToBuilder() {
builder.clearNodeLabels();
builder.addAllNodeLabels(this.labels);
}
+ if (this.nodeAttributes != null) {
+ builder.clearNodeAttributes();
+ List attrList = new ArrayList<>();
+ for (NodeAttribute attr : this.nodeAttributes) {
+ attrList.add(convertToProtoFormat(attr));
+ }
+ builder.addAllNodeAttributes(attrList);
+ }
if (this.nodeUtilization != null
&& !((ResourceUtilizationPBImpl) this.nodeUtilization).getProto()
.equals(builder.getNodeUtilization())) {
@@ -306,7 +319,16 @@ private NodeIdPBImpl convertFromProtoFormat(NodeIdProto p) {
private NodeIdProto convertToProtoFormat(NodeId nodeId) {
return ((NodeIdPBImpl) nodeId).getProto();
}
-
+
+ private NodeAttributeProto convertToProtoFormat(NodeAttribute nodeAttr) {
+ return ((NodeAttributePBImpl) nodeAttr).getProto();
+ }
+
+ private NodeAttributePBImpl convertFromProtoFormat(
+ NodeAttributeProto nodeAttr) {
+ return new NodeAttributePBImpl(nodeAttr);
+ }
+
private ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
return new ResourcePBImpl(p);
}
@@ -427,4 +449,24 @@ public void setNodeUpdateType(NodeUpdateType nodeUpdateType) {
}
builder.setNodeUpdateType(ProtoUtils.convertToProtoFormat(nodeUpdateType));
}
+
+ @Override
+ public void setNodeAttributes(Set nodeAttrs) {
+ maybeInitBuilder();
+ builder.clearNodeAttributes();
+ this.nodeAttributes = nodeAttrs;
+ }
+
+ @Override
+ public Set getNodeAttributes() {
+ if (nodeAttributes != null) {
+ return nodeAttributes;
+ }
+ NodeReportProtoOrBuilder p = viaProto ? proto : builder;
+ this.nodeAttributes = new HashSet<>();
+ for (NodeAttributeProto nattrProto : p.getNodeAttributesList()) {
+ nodeAttributes.add(convertFromProtoFormat(nattrProto));
+ }
+ return nodeAttributes;
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeToAttributeValuePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeToAttributeValuePBImpl.java
new file mode 100644
index 00000000000..7a79876f290
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeToAttributeValuePBImpl.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.NodeToAttributeValue;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeToAttributeValueProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeToAttributeValueProtoOrBuilder;
+
+/**
+ * PB Implementation for NodeToAttributeValue.
+ *
+ */
+public class NodeToAttributeValuePBImpl extends NodeToAttributeValue {
+ private NodeToAttributeValueProto proto =
+ NodeToAttributeValueProto.getDefaultInstance();
+ private NodeToAttributeValueProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ public NodeToAttributeValuePBImpl() {
+ builder = NodeToAttributeValueProto.newBuilder();
+ }
+
+ public NodeToAttributeValuePBImpl(NodeToAttributeValueProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public NodeToAttributeValueProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = NodeToAttributeValueProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public String getAttributeValue() {
+ NodeToAttributeValueProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getAttributeValue();
+ }
+
+ @Override
+ public void setAttributeValue(String attributeValue) {
+ maybeInitBuilder();
+ if (attributeValue == null) {
+ builder.clearAttributeValue();
+ return;
+ }
+ builder.setAttributeValue(attributeValue);
+ }
+
+ @Override
+ public String getHostname() {
+ NodeToAttributeValueProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasHostname()) {
+ return null;
+ }
+ return p.getHostname();
+ }
+
+ @Override
+ public void setHostname(String hostname) {
+ maybeInitBuilder();
+ if (hostname == null) {
+ builder.clearHostname();
+ return;
+ }
+ builder.setHostname(hostname);
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result
+ + ((getAttributeValue() == null) ? 0 : getAttributeValue().hashCode());
+ result = prime * result
+ + ((getHostname() == null) ? 0 : getHostname().hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (obj instanceof NodeToAttributeValue) {
+ NodeToAttributeValue other = (NodeToAttributeValue) obj;
+ if (!compare(getAttributeValue(), other.getAttributeValue())) {
+ return false;
+ }
+ if (!compare(getHostname(), other.getHostname())) {
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ private static boolean compare(Object left, Object right) {
+ if (left == null) {
+ return right == null;
+ } else {
+ return left.equals(right);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "Name-" + getHostname() + " : Attribute Value-"
+ + getAttributeValue();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/AbstractLabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/AbstractLabel.java
new file mode 100644
index 00000000000..6a44574abfd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/AbstractLabel.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.nodelabels;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.resource.Resources;
+
+/**
+ * Generic class capturing the information required commonly across Partitions
+ * and Attributes.
+ */
+public abstract class AbstractLabel {
+
+ private Resource resource;
+ private int numActiveNMs;
+ private String labelName;
+
+ public AbstractLabel() {
+ super();
+ }
+
+ public AbstractLabel(String labelName) {
+ this(labelName, Resource.newInstance(0, 0), 0);
+ }
+
+ public AbstractLabel(String labelName, Resource resource, int numActiveNMs) {
+ super();
+ this.resource = resource;
+ this.numActiveNMs = numActiveNMs;
+ this.labelName = labelName;
+ }
+
+ public void addNode(Resource nodeRes) {
+ Resources.addTo(resource, nodeRes);
+ numActiveNMs++;
+ }
+
+ public void removeNode(Resource nodeRes) {
+ Resources.subtractFrom(resource, nodeRes);
+ numActiveNMs--;
+ }
+
+ public Resource getResource() {
+ return Resource.newInstance(this.resource);
+ }
+
+ public int getNumActiveNMs() {
+ return numActiveNMs;
+ }
+
+ public String getLabelName() {
+ return labelName;
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/AttributeExpressionOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/AttributeExpressionOperation.java
new file mode 100644
index 00000000000..8754314c51a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/AttributeExpressionOperation.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.nodelabels;
+
+/**
+ * Operations which are allowed in Node Attributes Expression.
+ */
+public enum AttributeExpressionOperation {
+ LT, GT, IN, NOTIN
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/AttributeValue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/AttributeValue.java
new file mode 100644
index 00000000000..d1d75cf1e92
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/AttributeValue.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.nodelabels;
+
+import java.io.IOException;
+
+/**
+ * Interface to capture operations on AttributeValue.
+ */
+public interface AttributeValue {
+
+ /**
+ * @return original value which was set.
+ */
+ String getValue();
+
+ /**
+ * validate the value based on the type and initialize for further compare
+ * operations.
+ *
+ * @param value
+ * @throws IOException
+ */
+ void validateAndInitializeValue(String value) throws IOException;
+
+ /**
+ * compare the value against the other based on the
+ * AttributeExpressionOperation.
+ *
+ * @param other
+ * @param op
+ * @return true if value other matches the current value for the
+ * operation op.
+ */
+ boolean compareForOperation(AttributeValue other,
+ AttributeExpressionOperation op);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index 612b7010d65..19254c15b1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -35,7 +35,6 @@
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
-import java.util.regex.Pattern;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
@@ -65,15 +64,12 @@
@Private
public class CommonNodeLabelsManager extends AbstractService {
protected static final Log LOG = LogFactory.getLog(CommonNodeLabelsManager.class);
- private static final int MAX_LABEL_LENGTH = 255;
public static final Set EMPTY_STRING_SET = Collections
.unmodifiableSet(new HashSet(0));
public static final Set EMPTY_NODELABEL_SET = Collections
.unmodifiableSet(new HashSet(0));
public static final String ANY = "*";
public static final Set ACCESS_ANY_LABEL_SET = ImmutableSet.of(ANY);
- private static final Pattern LABEL_PATTERN = Pattern
- .compile("^[0-9a-zA-Z][0-9a-zA-Z-_]*");
public static final int WILDCARD_PORT = 0;
// Flag to identify startup for removelabel
private boolean initNodeLabelStoreInProgress = false;
@@ -112,7 +108,7 @@
/**
* A Host can have multiple Nodes
*/
- protected static class Host {
+ public static class Host {
public Set labels;
public Map nms;
@@ -238,7 +234,10 @@ protected boolean isInitNodeLabelStoreInProgress() {
return initNodeLabelStoreInProgress;
}
- boolean isCentralizedConfiguration() {
+ /**
+ * @return true if node label configuration type is not distributed.
+ */
+ public boolean isCentralizedConfiguration() {
return isCentralizedNodeLabelConfiguration;
}
@@ -249,8 +248,7 @@ protected void initNodeLabelStore(Configuration conf) throws Exception {
conf.getClass(YarnConfiguration.FS_NODE_LABELS_STORE_IMPL_CLASS,
FileSystemNodeLabelsStore.class, NodeLabelsStore.class),
conf);
- this.store.setNodeLabelsManager(this);
- this.store.init(conf);
+ this.store.init(conf, this);
this.store.recover();
}
@@ -317,7 +315,7 @@ public void addToCluserNodeLabels(Collection labels)
// do a check before actual adding them, will throw exception if any of them
// doesn't meet label name requirement
for (NodeLabel label : labels) {
- checkAndThrowLabelName(label.getName());
+ NodeLabelUtil.checkAndThrowLabelName(label.getName());
}
for (NodeLabel label : labels) {
@@ -969,22 +967,6 @@ public boolean isExclusiveNodeLabel(String nodeLabel) throws IOException {
}
}
- public static void checkAndThrowLabelName(String label) throws IOException {
- if (label == null || label.isEmpty() || label.length() > MAX_LABEL_LENGTH) {
- throw new IOException("label added is empty or exceeds "
- + MAX_LABEL_LENGTH + " character(s)");
- }
- label = label.trim();
-
- boolean match = LABEL_PATTERN.matcher(label).matches();
-
- if (!match) {
- throw new IOException("label name should only contains "
- + "{0-9, a-z, A-Z, -, _} and should not started with {-,_}"
- + ", now it is=" + label);
- }
- }
-
private void checkExclusivityMatch(Collection labels)
throws IOException {
ArrayList mismatchlabels = new ArrayList();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
index 0ec4ea42f73..6c459c2739d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/FileSystemNodeLabelsStore.java
@@ -18,275 +18,91 @@
package org.apache.hadoop.yarn.nodelabels;
-import java.io.EOFException;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
-import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
-import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest;
-import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
-import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
-import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RemoveFromClusterNodeLabelsRequestPBImpl;
-import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
+import org.apache.hadoop.yarn.nodelabels.store.AbstractFSNodeStore;
-import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.nodelabels.store.op.AddClusterLabelOp;
+import org.apache.hadoop.yarn.nodelabels.store.FSStoreOpHandler.StoreType;
+import org.apache.hadoop.yarn.nodelabels.store.op.NodeToLabelOp;
+import org.apache.hadoop.yarn.nodelabels.store.op.RemoveClusterLabelOp;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
-public class FileSystemNodeLabelsStore extends NodeLabelsStore {
- protected static final Log LOG = LogFactory.getLog(FileSystemNodeLabelsStore.class);
+/**
+ * FileSystemNodeLabelsStore for storing node labels.
+ */
+public class FileSystemNodeLabelsStore
+ extends AbstractFSNodeStore
+ implements NodeLabelsStore {
+ protected static final Log LOG =
+ LogFactory.getLog(FileSystemNodeLabelsStore.class);
protected static final String DEFAULT_DIR_NAME = "node-labels";
protected static final String MIRROR_FILENAME = "nodelabel.mirror";
protected static final String EDITLOG_FILENAME = "nodelabel.editlog";
-
- protected enum SerializedLogType {
- ADD_LABELS, NODE_TO_LABELS, REMOVE_LABELS
+
+ FileSystemNodeLabelsStore() {
+ super(StoreType.NODE_LABEL_STORE);
}
- Path fsWorkingPath;
- FileSystem fs;
- private FSDataOutputStream editlogOs;
- private Path editLogPath;
-
private String getDefaultFSNodeLabelsRootDir() throws IOException {
// default is in local: /tmp/hadoop-yarn-${user}/node-labels/
- return "file:///tmp/hadoop-yarn-"
- + UserGroupInformation.getCurrentUser().getShortUserName() + "/"
- + DEFAULT_DIR_NAME;
+ return "file:///tmp/hadoop-yarn-" + UserGroupInformation.getCurrentUser()
+ .getShortUserName() + "/" + DEFAULT_DIR_NAME;
}
@Override
- public void init(Configuration conf) throws Exception {
- fsWorkingPath =
- new Path(conf.get(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
- getDefaultFSNodeLabelsRootDir()));
-
- setFileSystem(conf);
-
- // mkdir of root dir path
- fs.mkdirs(fsWorkingPath);
+ public void init(Configuration conf, CommonNodeLabelsManager mgr)
+ throws Exception {
+ StoreSchema schema = new StoreSchema(EDITLOG_FILENAME, MIRROR_FILENAME);
+ initStore(conf, new Path(
+ conf.get(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
+ getDefaultFSNodeLabelsRootDir())), schema, mgr);
}
@Override
public void close() throws IOException {
- IOUtils.cleanup(LOG, fs, editlogOs);
- }
-
- void setFileSystem(Configuration conf) throws IOException {
- Configuration confCopy = new Configuration(conf);
- fs = fsWorkingPath.getFileSystem(confCopy);
-
- // if it's local file system, use RawLocalFileSystem instead of
- // LocalFileSystem, the latter one doesn't support append.
- if (fs.getScheme().equals("file")) {
- fs = ((LocalFileSystem)fs).getRaw();
- }
- }
-
- private void ensureAppendEditlogFile() throws IOException {
- editlogOs = fs.append(editLogPath);
- }
-
- private void ensureCloseEditlogFile() throws IOException {
- editlogOs.close();
+ super.closeFSStore();
}
@Override
- public void updateNodeToLabelsMappings(
- Map> nodeToLabels) throws IOException {
- try {
- ensureAppendEditlogFile();
- editlogOs.writeInt(SerializedLogType.NODE_TO_LABELS.ordinal());
- ((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest
- .newInstance(nodeToLabels)).getProto().writeDelimitedTo(editlogOs);
- } finally {
- ensureCloseEditlogFile();
- }
+ public void updateNodeToLabelsMappings(Map> nodeToLabels)
+ throws IOException {
+ NodeToLabelOp op = new NodeToLabelOp();
+ writeToLog(op.setNodeToLabels(nodeToLabels));
}
@Override
public void storeNewClusterNodeLabels(List labels)
throws IOException {
- try {
- ensureAppendEditlogFile();
- editlogOs.writeInt(SerializedLogType.ADD_LABELS.ordinal());
- ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest
- .newInstance(labels)).getProto().writeDelimitedTo(editlogOs);
- } finally {
- ensureCloseEditlogFile();
- }
+ AddClusterLabelOp op = new AddClusterLabelOp();
+ writeToLog(op.setLabels(labels));
}
@Override
public void removeClusterNodeLabels(Collection labels)
throws IOException {
- try {
- ensureAppendEditlogFile();
- editlogOs.writeInt(SerializedLogType.REMOVE_LABELS.ordinal());
- ((RemoveFromClusterNodeLabelsRequestPBImpl) RemoveFromClusterNodeLabelsRequest.newInstance(Sets
- .newHashSet(labels.iterator()))).getProto().writeDelimitedTo(editlogOs);
- } finally {
- ensureCloseEditlogFile();
- }
- }
-
- protected void loadFromMirror(Path newMirrorPath, Path oldMirrorPath)
- throws IOException {
- // If mirror.new exists, read from mirror.new,
- FSDataInputStream is = null;
- try {
- is = fs.open(newMirrorPath);
- } catch (FileNotFoundException e) {
- try {
- is = fs.open(oldMirrorPath);
- } catch (FileNotFoundException ignored) {
-
- }
- }
- if (null != is) {
- List labels = new AddToClusterNodeLabelsRequestPBImpl(
- AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is))
- .getNodeLabels();
- mgr.addToCluserNodeLabels(labels);
-
- if (mgr.isCentralizedConfiguration()) {
- // Only load node to labels mapping while using centralized configuration
- Map> nodeToLabels =
- new ReplaceLabelsOnNodeRequestPBImpl(
- ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is))
- .getNodeToLabels();
- mgr.replaceLabelsOnNode(nodeToLabels);
- }
- is.close();
- }
+ RemoveClusterLabelOp op = new RemoveClusterLabelOp();
+ writeToLog(op.setLabels(labels));
}
/* (non-Javadoc)
- * @see org.apache.hadoop.yarn.nodelabels.NodeLabelsStore#recover(boolean)
- */
- @Override
- public void recover() throws YarnException,
- IOException {
- /*
- * Steps of recover
- * 1) Read from last mirror (from mirror or mirror.old)
- * 2) Read from last edit log, and apply such edit log
- * 3) Write new mirror to mirror.writing
- * 4) Rename mirror to mirror.old
- * 5) Move mirror.writing to mirror
- * 6) Remove mirror.old
- * 7) Remove edit log and create a new empty edit log
+ * @see org.apache.hadoop.yarn.nodelabels.NodeLabelsStore#recover(boolean)
*/
-
- // Open mirror from serialized file
- Path mirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME);
- Path oldMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".old");
-
- loadFromMirror(mirrorPath, oldMirrorPath);
-
- // Open and process editlog
- editLogPath = new Path(fsWorkingPath, EDITLOG_FILENAME);
- FSDataInputStream is;
- try {
- is = fs.open(editLogPath);
- } catch (FileNotFoundException e) {
- is = null;
- }
- if (null != is) {
-
- while (true) {
- try {
- // read edit log one by one
- SerializedLogType type = SerializedLogType.values()[is.readInt()];
-
- switch (type) {
- case ADD_LABELS: {
- List labels =
- new AddToClusterNodeLabelsRequestPBImpl(
- AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is))
- .getNodeLabels();
- mgr.addToCluserNodeLabels(labels);
- break;
- }
- case REMOVE_LABELS: {
- Collection labels =
- RemoveFromClusterNodeLabelsRequestProto.parseDelimitedFrom(is)
- .getNodeLabelsList();
- mgr.removeFromClusterNodeLabels(labels);
- break;
- }
- case NODE_TO_LABELS: {
- Map> map =
- new ReplaceLabelsOnNodeRequestPBImpl(
- ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is))
- .getNodeToLabels();
- if (mgr.isCentralizedConfiguration()) {
- /*
- * In case of Distributed NodeLabels setup,
- * ignoreNodeToLabelsMappings will be set to true and recover will
- * be invoked. As RM will collect the node labels from NM through
- * registration/HB
- */
- mgr.replaceLabelsOnNode(map);
- }
- break;
- }
- }
- } catch (EOFException e) {
- // EOF hit, break
- break;
- }
- }
- is.close();
- }
-
- // Serialize current mirror to mirror.writing
- Path writingMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".writing");
- FSDataOutputStream os = fs.create(writingMirrorPath, true);
- ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequestPBImpl
- .newInstance(mgr.getClusterNodeLabels())).getProto().writeDelimitedTo(os);
- ((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest
- .newInstance(mgr.getNodeLabels())).getProto().writeDelimitedTo(os);
- os.close();
-
- // Move mirror to mirror.old
- if (fs.exists(mirrorPath)) {
- fs.delete(oldMirrorPath, false);
- fs.rename(mirrorPath, oldMirrorPath);
- }
-
- // move mirror.writing to mirror
- fs.rename(writingMirrorPath, mirrorPath);
- fs.delete(writingMirrorPath, false);
-
- // remove mirror.old
- fs.delete(oldMirrorPath, false);
-
- // create a new editlog file
- editlogOs = fs.create(editLogPath, true);
- editlogOs.close();
-
- LOG.info("Finished write mirror at:" + mirrorPath.toString());
- LOG.info("Finished create editlog file at:" + editLogPath.toString());
+ @Override
+ public void recover() throws YarnException, IOException {
+ super.recoverFromStore();
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeAttributeStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeAttributeStore.java
new file mode 100644
index 00000000000..8e9f9ff9f0f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeAttributeStore.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.nodelabels;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Interface class for Node label store.
+ */
+public interface NodeAttributeStore extends Closeable {
+
+ /**
+ * Replace labels on node.
+ *
+ * @param nodeToAttribute node to attribute list.
+ * @throws IOException
+ */
+ void replaceNodeAttributes(List nodeToAttribute)
+ throws IOException;
+
+ /**
+ * Add attribute to node.
+ *
+ * @param nodeToAttribute node to attribute list.
+ * @throws IOException
+ */
+ void addNodeAttributes(List nodeToAttribute)
+ throws IOException;
+
+ /**
+ * Remove attribute from node.
+ *
+ * @param nodeToAttribute node to attribute list.
+ * @throws IOException
+ */
+ void removeNodeAttributes(List nodeToAttribute)
+ throws IOException;
+
+ /**
+ * Initialize based on configuration and NodeAttributesManager.
+ *
+ * @param configuration configuration instance.
+ * @param mgr nodeattributemanager instance.
+ * @throws Exception
+ */
+ void init(Configuration configuration, NodeAttributesManager mgr)
+ throws Exception;
+
+ /**
+ * Recover store on resourcemanager startup.
+ * @throws IOException
+ * @throws YarnException
+ */
+ void recover() throws IOException, YarnException;
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeAttributesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeAttributesManager.java
new file mode 100644
index 00000000000..ca04e8da96e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeAttributesManager.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.nodelabels;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+
+/**
+ * This class captures all interactions for Attributes with RM.
+ */
+public abstract class NodeAttributesManager extends AbstractService {
+ public NodeAttributesManager(String name) {
+ super(name);
+ }
+
+ /**
+ * To completely replace the mappings for a given node with the new Set of
+ * Attributes which are under a given prefix. If the mapping contains an
+ * attribute whose type does not match a previously existing Attribute
+ * under the same prefix (name space) then exception is thrown.
+ * Key would be name of the node and value would be set of Attributes to
+ * be mapped. If the prefix is null, then all node attributes will be
+ * replaced regardless of what prefix they have.
+ *
+ * @param prefix node attribute prefix
+ * @param nodeAttributeMapping host name to a set of node attributes mapping
+ * @throws IOException if failed to replace attributes
+ */
+ public abstract void replaceNodeAttributes(String prefix,
+ Map> nodeAttributeMapping) throws IOException;
+
+ /**
+ * It adds or updates the attribute mapping for a given node with out
+ * impacting other existing attribute mapping. Key would be name of the node
+ * and value would be set of Attributes to be mapped.
+ *
+ * @param nodeAttributeMapping
+ * @throws IOException
+ */
+ public abstract void addNodeAttributes(
+ Map> nodeAttributeMapping) throws IOException;
+
+ /**
+ * It removes the specified attribute mapping for a given node with out
+ * impacting other existing attribute mapping. Key would be name of the node
+ * and value would be set of Attributes to be removed.
+ *
+ * @param nodeAttributeMapping
+ * @throws IOException
+ */
+ public abstract void removeNodeAttributes(
+ Map> nodeAttributeMapping) throws IOException;
+
+ /**
+ * Returns a set of node attributes whose prefix is one of the given
+ * prefixes; if the prefix set is null or empty, all attributes are returned;
+ * if prefix set is given but no mapping could be found, an empty set
+ * is returned.
+ *
+ * @param prefix set of prefix string's for which the attributes needs to
+ * returned
+ * @return Set of node Attributes
+ */
+ public abstract Set getClusterNodeAttributes(
+ Set prefix);
+
+ /**
+ * Return a map of Nodes to attribute value for the given NodeAttributeKeys.
+ * If the attributeKeys set is null or empty, then mapping for all attributes
+ * are returned.
+ *
+ * @return a Map of attributeKeys to a map of hostnames to its attribute
+ * values.
+ */
+ public abstract Map> getAttributesToNodes(
+ Set attributes);
+
+ /**
+ * NodeAttribute to AttributeValue Map.
+ *
+ * @return Map mapping of Attribute to Value.
+ */
+ public abstract Map getAttributesForNode(
+ String hostName);
+
+ /**
+ * Get All node to Attributes list based on filter.
+ *
+ * @return List nodeToAttributes matching filter.If empty
+ * or null is passed as argument will return all.
+ */
+ public abstract List getNodeToAttributes(
+ Set prefix);
+
+ /**
+ * Get all node to Attributes mapping.
+ *
+ * @return Map> nodesToAttributes matching
+ * filter.If empty or null is passed as argument will return all.
+ */
+ public abstract Map> getNodesToAttributes(
+ Set hostNames);
+
+ // futuristic
+ // public set getNodesMatchingExpression(String nodeLabelExp);
+
+ /**
+ * Refresh node attributes on a given node during RM recovery.
+ * @param nodeId Node Id
+ */
+ public abstract void refreshNodeAttributesToScheduler(NodeId nodeId);
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java
new file mode 100644
index 00000000000..395ff8183ce
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelUtil.java
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels;
+
+import com.google.common.base.Strings;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+/**
+ * Utility class for all NodeLabel and NodeAttribute operations.
+ */
+public final class NodeLabelUtil {
+ private NodeLabelUtil() {
+ }
+
+ private static final int MAX_LABEL_LENGTH = 255;
+ private static final Pattern LABEL_OR_VALUE_PATTERN =
+ Pattern.compile("^[0-9a-zA-Z][0-9a-zA-Z-_]*");
+ private static final Pattern PREFIX_PATTERN =
+ Pattern.compile("^[0-9a-zA-Z][0-9a-zA-Z-_\\.]*");
+ private static final Pattern ATTRIBUTE_VALUE_PATTERN =
+ Pattern.compile("^[0-9a-zA-Z][0-9a-zA-Z-_.]*");
+
+ public static void checkAndThrowLabelName(String label) throws IOException {
+ if (label == null || label.isEmpty() || label.length() > MAX_LABEL_LENGTH) {
+ throw new IOException("label added is empty or exceeds "
+ + MAX_LABEL_LENGTH + " character(s)");
+ }
+ label = label.trim();
+
+ boolean match = LABEL_OR_VALUE_PATTERN.matcher(label).matches();
+
+ if (!match) {
+ throw new IOException("label name should only contains "
+ + "{0-9, a-z, A-Z, -, _} and should not started with {-,_}"
+ + ", now it is= " + label);
+ }
+ }
+
+ public static void checkAndThrowAttributeValue(String value)
+ throws IOException {
+ if (value == null) {
+ return;
+ } else if (value.trim().length() > MAX_LABEL_LENGTH) {
+ throw new IOException("Attribute value added exceeds " + MAX_LABEL_LENGTH
+ + " character(s)");
+
+ }
+ value = value.trim();
+ if(value.isEmpty()) {
+ return;
+ }
+
+ boolean match = ATTRIBUTE_VALUE_PATTERN.matcher(value).matches();
+
+ if (!match) {
+ throw new IOException("attribute value should only contains "
+ + "{0-9, a-z, A-Z, -, _} and should not started with {-,_}"
+ + ", now it is= " + value);
+ }
+ }
+
+ public static void checkAndThrowAttributePrefix(String prefix)
+ throws IOException {
+ if (prefix == null) {
+ throw new IOException("Attribute prefix cannot be null.");
+ }
+ if (prefix.trim().length() > MAX_LABEL_LENGTH) {
+ throw new IOException("Attribute value added exceeds " + MAX_LABEL_LENGTH
+ + " character(s)");
+ }
+ prefix = prefix.trim();
+ if(prefix.isEmpty()) {
+ return;
+ }
+
+ boolean match = PREFIX_PATTERN.matcher(prefix).matches();
+
+ if (!match) {
+ throw new IOException("attribute value should only contains "
+ + "{0-9, a-z, A-Z, -, _,.} and should not started with {-,_}"
+ + ", now it is= " + prefix);
+ }
+ }
+
+ /**
+ * Validate if a given set of attributes are valid. Attributes could be
+ * invalid if any of following conditions is met:
+ *
+ *
+ * Missing prefix: the attribute doesn't have prefix defined
+ * Malformed attribute prefix: the prefix is not in valid format
+ *
+ * @param attributeSet
+ * @throws IOException
+ */
+ public static void validateNodeAttributes(Set attributeSet)
+ throws IOException {
+ if (attributeSet != null && !attributeSet.isEmpty()) {
+ for (NodeAttribute nodeAttribute : attributeSet) {
+ NodeAttributeKey attributeKey = nodeAttribute.getAttributeKey();
+ if (attributeKey == null) {
+ throw new IOException("AttributeKey must be set");
+ }
+ String prefix = attributeKey.getAttributePrefix();
+ if (Strings.isNullOrEmpty(prefix)) {
+ throw new IOException("Attribute prefix must be set");
+ }
+ // Verify attribute prefix format.
+ checkAndThrowAttributePrefix(prefix);
+ // Verify attribute name format.
+ checkAndThrowLabelName(attributeKey.getAttributeName());
+ }
+ }
+ }
+
+ /**
+ * Filter a set of node attributes by a given prefix. Returns a filtered
+ * set of node attributes whose prefix equals the given prefix.
+ * If the prefix is null or empty, then the original set is returned.
+ * @param attributeSet node attribute set
+ * @param prefix node attribute prefix
+ * @return a filtered set of node attributes
+ */
+ public static Set filterAttributesByPrefix(
+ Set attributeSet, String prefix) {
+ if (Strings.isNullOrEmpty(prefix)) {
+ return attributeSet;
+ }
+ return attributeSet.stream()
+ .filter(nodeAttribute -> prefix
+ .equals(nodeAttribute.getAttributeKey().getAttributePrefix()))
+ .collect(Collectors.toSet());
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java
index aacb920c4a2..e4efd68f92b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java
@@ -30,25 +30,27 @@
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.exceptions.YarnException;
-public abstract class NodeLabelsStore implements Closeable {
- protected CommonNodeLabelsManager mgr;
-
+/**
+ * Interface class for Node label store.
+ */
+public interface NodeLabelsStore extends Closeable {
+
/**
- * Store node {@literal ->} label
+ * Store node {@literal ->} label.
*/
- public abstract void updateNodeToLabelsMappings(
+ void updateNodeToLabelsMappings(
Map> nodeToLabels) throws IOException;
/**
- * Store new labels
+ * Store new labels.
*/
- public abstract void storeNewClusterNodeLabels(List label)
+ void storeNewClusterNodeLabels(List label)
throws IOException;
/**
- * Remove labels
+ * Remove labels.
*/
- public abstract void removeClusterNodeLabels(Collection labels)
+ void removeClusterNodeLabels(Collection labels)
throws IOException;
/**
@@ -56,16 +58,14 @@ public abstract void removeClusterNodeLabels(Collection labels)
* ignoreNodeToLabelsMappings is true then node to labels mappings should not
* be recovered. In case of Distributed NodeLabels setup
* ignoreNodeToLabelsMappings will be set to true and recover will be invoked
- * as RM will collect the node labels from NM through registration/HB
+ * as RM will collect the node labels from NM through registration/HB.
*
* @throws IOException
* @throws YarnException
*/
- public abstract void recover() throws IOException, YarnException;
-
- public void init(Configuration conf) throws Exception {}
+ void recover() throws IOException, YarnException;
+
+ void init(Configuration conf, CommonNodeLabelsManager mgr)
+ throws Exception;
- public void setNodeLabelsManager(CommonNodeLabelsManager mgr) {
- this.mgr = mgr;
- }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
index 989f0279403..674703763ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NonAppendableFSNodeLabelStore.java
@@ -18,13 +18,6 @@
package org.apache.hadoop.yarn.nodelabels;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -32,11 +25,19 @@
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
-import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
-import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
-import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl;
+import org.apache.hadoop.yarn.nodelabels.store.FSStoreOpHandler;
+import org.apache.hadoop.yarn.nodelabels.store.StoreOp;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+/**
+ * Store implementation for Non Appendable File Store.
+ */
public class NonAppendableFSNodeLabelStore extends FileSystemNodeLabelsStore {
protected static final Log
LOG = LogFactory.getLog(NonAppendableFSNodeLabelStore.class);
@@ -52,7 +53,7 @@ public void recover() throws YarnException,
Path newMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new");
Path oldMirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME);
loadFromMirror(newMirrorPath, oldMirrorPath);
-
+
// if new mirror exists, remove old mirror and rename new mirror
if (fs.exists(newMirrorPath)) {
// remove old mirror
@@ -91,29 +92,18 @@ public void removeClusterNodeLabels(Collection labels)
}
private void writeNewMirror() throws IOException {
- ReentrantReadWriteLock.ReadLock readLock = mgr.readLock;
+ ReentrantReadWriteLock.ReadLock readLock = manager.readLock;
try {
// Acquire readlock to make sure we get cluster node labels and
// node-to-labels mapping atomically.
readLock.lock();
- List nodeLabels = mgr.getClusterNodeLabels();
- Map> nodeToLabels = mgr.getNodeLabels();
-
// Write mirror to mirror.new.tmp file
- Path newTmpPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new.tmp");
- FSDataOutputStream os = fs
- .create(newTmpPath, true);
- ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest
- .newInstance(nodeLabels)).getProto().writeDelimitedTo(os);
-
- if (mgr.isCentralizedConfiguration()) {
- // Only save node-to-labels mapping while using centralized configuration
- ((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest
- .newInstance(nodeToLabels)).getProto().writeDelimitedTo(os);
+ Path newTmpPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new.tmp");
+ try (FSDataOutputStream os = fs.create(newTmpPath, true)) {
+ StoreOp op = FSStoreOpHandler.getMirrorOp(getStoreType());
+ op.write(os, manager);
}
- os.close();
-
// Rename mirror.new.tmp to mirror.new (will remove .new if it's existed)
Path newPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new");
fs.delete(newPath, false);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeAttribute.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeAttribute.java
new file mode 100644
index 00000000000..ffe36c8c4ca
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeAttribute.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.nodelabels;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeType;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+/**
+ * Reference of NodeAttribute in RM.
+ */
+public class RMNodeAttribute extends AbstractLabel {
+
+ private NodeAttribute attribute;
+ // TODO need to revisit whether we need to make this concurrent implementation
+ private Map nodes = new HashMap<>();
+
+ public RMNodeAttribute(NodeAttribute attribute) {
+ this(attribute.getAttributeKey().getAttributeName(),
+ Resource.newInstance(0, 0), 0, attribute);
+ }
+
+ public RMNodeAttribute(String labelName, Resource res, int activeNMs,
+ NodeAttribute attribute) {
+ super(labelName, res, activeNMs);
+ this.attribute = attribute;
+ }
+
+ public NodeAttribute getAttribute() {
+ return attribute;
+ }
+
+ public void setAttribute(NodeAttribute attribute) {
+ this.attribute = attribute;
+ }
+
+ public NodeAttributeType getAttributeType() {
+ return attribute.getAttributeType();
+ }
+
+ public void addNode(String node, AttributeValue attributeValue) {
+ nodes.put(node, attributeValue);
+ }
+
+ public void removeNode(String node) {
+ nodes.remove(node);
+ }
+
+ public Map getAssociatedNodeIds() {
+ return new HashMap(nodes);
+ }
+
+ @Override
+ public int hashCode() {
+ return attribute.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ RMNodeAttribute other = (RMNodeAttribute) obj;
+ if (attribute == null) {
+ if (other.attribute != null) {
+ return false;
+ }
+ } else if (!attribute.equals(other.attribute)) {
+ return false;
+ }
+ return true;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeLabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeLabel.java
index 693a58a772c..5e755f4daeb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeLabel.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/RMNodeLabel.java
@@ -27,13 +27,14 @@
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.util.resource.Resources;
-public class RMNodeLabel implements Comparable {
- private Resource resource;
- private int numActiveNMs;
- private String labelName;
- private Set nodeIds;
+/**
+ * Partition representation in RM.
+ */
+public class RMNodeLabel extends AbstractLabel
+ implements Comparable {
private boolean exclusive;
private NodeLabel nodeLabel;
+ private Set nodeIds;
public RMNodeLabel(NodeLabel nodeLabel) {
this(nodeLabel.getName(), Resource.newInstance(0, 0), 0,
@@ -47,48 +48,12 @@ public RMNodeLabel(String labelName) {
protected RMNodeLabel(String labelName, Resource res, int activeNMs,
boolean exclusive) {
- this.labelName = labelName;
- this.resource = res;
- this.numActiveNMs = activeNMs;
- this.nodeIds = new HashSet();
+ super(labelName, res, activeNMs);
this.exclusive = exclusive;
this.nodeLabel = NodeLabel.newInstance(labelName, exclusive);
+ nodeIds = new HashSet();
}
- public void addNodeId(NodeId node) {
- nodeIds.add(node);
- }
-
- public void removeNodeId(NodeId node) {
- nodeIds.remove(node);
- }
-
- public Set getAssociatedNodeIds() {
- return new HashSet(nodeIds);
- }
-
- public void addNode(Resource nodeRes) {
- Resources.addTo(resource, nodeRes);
- numActiveNMs++;
- }
-
- public void removeNode(Resource nodeRes) {
- Resources.subtractFrom(resource, nodeRes);
- numActiveNMs--;
- }
-
- public Resource getResource() {
- return this.resource;
- }
-
- public int getNumActiveNMs() {
- return numActiveNMs;
- }
-
- public String getLabelName() {
- return labelName;
- }
-
public void setIsExclusive(boolean exclusive) {
this.exclusive = exclusive;
}
@@ -97,42 +62,57 @@ public boolean getIsExclusive() {
return this.exclusive;
}
+ @Override
+ public boolean equals(Object obj) {
+ if (obj instanceof RMNodeLabel) {
+ RMNodeLabel other = (RMNodeLabel) obj;
+ return Resources.equals(getResource(), other.getResource())
+ && StringUtils.equals(getLabelName(), other.getLabelName())
+ && (other.getNumActiveNMs() == getNumActiveNMs());
+ }
+ return false;
+ }
+
+
public RMNodeLabel getCopy() {
- return new RMNodeLabel(labelName, resource, numActiveNMs, exclusive);
+ return new RMNodeLabel(getLabelName(), getResource(), getNumActiveNMs(),
+ exclusive);
}
- public NodeLabel getNodeLabel() {
- return this.nodeLabel;
+ @Override
+ public int hashCode() {
+ final int prime = 502357;
+ return (int) ((((long) getLabelName().hashCode() << 8)
+ + (getResource().hashCode() << 4) + getNumActiveNMs()) % prime);
}
+
@Override
public int compareTo(RMNodeLabel o) {
// We should always put empty label entry first after sorting
- if (labelName.isEmpty() != o.getLabelName().isEmpty()) {
- if (labelName.isEmpty()) {
+ if (getLabelName().isEmpty() != o.getLabelName().isEmpty()) {
+ if (getLabelName().isEmpty()) {
return -1;
}
return 1;
}
- return labelName.compareTo(o.getLabelName());
+ return getLabelName().compareTo(o.getLabelName());
}
-
- @Override
- public boolean equals(Object obj) {
- if (obj instanceof RMNodeLabel) {
- RMNodeLabel other = (RMNodeLabel) obj;
- return Resources.equals(resource, other.getResource())
- && StringUtils.equals(labelName, other.getLabelName())
- && (other.getNumActiveNMs() == numActiveNMs);
- }
- return false;
+
+ public NodeLabel getNodeLabel() {
+ return this.nodeLabel;
}
-
- @Override
- public int hashCode() {
- final int prime = 502357;
- return (int) ((((long) labelName.hashCode() << 8)
- + (resource.hashCode() << 4) + numActiveNMs) % prime);
+
+ public void addNodeId(NodeId node) {
+ nodeIds.add(node);
+ }
+
+ public void removeNodeId(NodeId node) {
+ nodeIds.remove(node);
+ }
+
+ public Set getAssociatedNodeIds() {
+ return new HashSet(nodeIds);
}
}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/StringAttributeValue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/StringAttributeValue.java
new file mode 100644
index 00000000000..12343a73c3d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/StringAttributeValue.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.nodelabels;
+
+import java.io.IOException;
+
+/**
+ * Attribute value for String NodeAttributeType.
+ */
+public class StringAttributeValue implements AttributeValue {
+ private String value = "";
+
+ @Override
+ public boolean compareForOperation(AttributeValue other,
+ AttributeExpressionOperation op) {
+ if (other instanceof StringAttributeValue) {
+ StringAttributeValue otherString = (StringAttributeValue) other;
+ switch (op) {
+ case IN:
+ return value.equals(otherString.value);
+ case NOTIN:
+ return !value.equals(otherString.value);
+ default:
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public void validateAndInitializeValue(String valueStr) throws IOException {
+ NodeLabelUtil.checkAndThrowAttributeValue(valueStr);
+ this.value = valueStr;
+ }
+
+ @Override
+ public String getValue() {
+ return value;
+ }
+
+ public String toString() {
+ return getValue();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/AbstractFSNodeStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/AbstractFSNodeStore.java
new file mode 100644
index 00000000000..7127d112df3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/AbstractFSNodeStore.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.yarn.nodelabels.store.op.FSNodeStoreLogOp;
+import org.apache.hadoop.yarn.nodelabels.store.FSStoreOpHandler.StoreType;
+
+import java.io.EOFException;
+import java.io.IOException;
+
+/**
+ * Abstract class for File System based store.
+ *
+ * @param manager filesystem store.Currently nodelabel will use
+ * CommonNodeLabelManager.
+ */
+public abstract class AbstractFSNodeStore {
+
+ protected static final Log LOG = LogFactory.getLog(AbstractFSNodeStore.class);
+
+ private StoreType storeType;
+ private FSDataOutputStream editlogOs;
+
+ private Path editLogPath;
+ private StoreSchema schema;
+
+ protected M manager;
+ protected Path fsWorkingPath;
+ protected FileSystem fs;
+
+ public AbstractFSNodeStore(StoreType storeType) {
+ this.storeType = storeType;
+ }
+
+ protected void initStore(Configuration conf, Path fsStorePath,
+ StoreSchema schma, M mgr) throws IOException {
+ this.schema = schma;
+ this.fsWorkingPath = fsStorePath;
+ this.manager = mgr;
+ initFileSystem(conf);
+ // mkdir of root dir path
+ fs.mkdirs(fsWorkingPath);
+ LOG.info("Created store directory :" + fsWorkingPath);
+ }
+
+ /**
+ * Filesystem store schema define the log name and mirror name.
+ */
+ public static class StoreSchema {
+ private String editLogName;
+ private String mirrorName;
+
+ public StoreSchema(String editLogName, String mirrorName) {
+ this.editLogName = editLogName;
+ this.mirrorName = mirrorName;
+ }
+ }
+
+ public void initFileSystem(Configuration conf) throws IOException {
+ Configuration confCopy = new Configuration(conf);
+ fs = fsWorkingPath.getFileSystem(confCopy);
+ // if it's local file system, use RawLocalFileSystem instead of
+ // LocalFileSystem, the latter one doesn't support append.
+ if (fs.getScheme().equals("file")) {
+ fs = ((LocalFileSystem) fs).getRaw();
+ }
+ }
+
+ protected void writeToLog(FSNodeStoreLogOp op) throws IOException {
+ try {
+ ensureAppendEditLogFile();
+ editlogOs.writeInt(op.getOpCode());
+ op.write(editlogOs, manager);
+ } finally {
+ ensureCloseEditlogFile();
+ }
+ }
+
+ protected void ensureAppendEditLogFile() throws IOException {
+ editlogOs = fs.append(editLogPath);
+ }
+
+ protected void ensureCloseEditlogFile() throws IOException {
+ editlogOs.close();
+ }
+
+ protected void loadFromMirror(Path newMirrorPath, Path oldMirrorPath)
+ throws IOException {
+ // If mirror.new exists, read from mirror.new
+ Path mirrorToRead = fs.exists(newMirrorPath) ?
+ newMirrorPath :
+ fs.exists(oldMirrorPath) ? oldMirrorPath : null;
+ if (mirrorToRead != null) {
+ try (FSDataInputStream is = fs.open(mirrorToRead)) {
+ StoreOp op = FSStoreOpHandler.getMirrorOp(storeType);
+ op.recover(is, manager);
+ }
+ }
+ }
+
+ protected StoreType getStoreType() {
+ return storeType;
+ }
+
+ public Path getFsWorkingPath() {
+ return fsWorkingPath;
+ }
+
+ protected void recoverFromStore() throws IOException {
+ /*
+ * Steps of recover
+ * 1) Read from last mirror (from mirror or mirror.old)
+ * 2) Read from last edit log, and apply such edit log
+ * 3) Write new mirror to mirror.writing
+ * 4) Rename mirror to mirror.old
+ * 5) Move mirror.writing to mirror
+ * 6) Remove mirror.old
+ * 7) Remove edit log and create a new empty edit log
+ */
+
+ // Open mirror from serialized file
+ Path mirrorPath = new Path(fsWorkingPath, schema.mirrorName);
+ Path oldMirrorPath = new Path(fsWorkingPath, schema.mirrorName + ".old");
+
+ loadFromMirror(mirrorPath, oldMirrorPath);
+
+ // Open and process editlog
+ editLogPath = new Path(fsWorkingPath, schema.editLogName);
+
+ loadManagerFromEditLog(editLogPath);
+
+ // Serialize current mirror to mirror.writing
+ Path writingMirrorPath =
+ new Path(fsWorkingPath, schema.mirrorName + ".writing");
+
+ try(FSDataOutputStream os = fs.create(writingMirrorPath, true)){
+ StoreOp op = FSStoreOpHandler.getMirrorOp(storeType);
+ op.write(os, manager);
+ }
+ // Move mirror to mirror.old
+ if (fs.exists(mirrorPath)) {
+ fs.delete(oldMirrorPath, false);
+ fs.rename(mirrorPath, oldMirrorPath);
+ }
+
+ // move mirror.writing to mirror
+ fs.rename(writingMirrorPath, mirrorPath);
+ fs.delete(writingMirrorPath, false);
+
+ // remove mirror.old
+ fs.delete(oldMirrorPath, false);
+
+ // create a new editlog file
+ editlogOs = fs.create(editLogPath, true);
+ editlogOs.close();
+
+ LOG.info("Finished write mirror at:" + mirrorPath.toString());
+ LOG.info("Finished create editlog file at:" + editLogPath.toString());
+ }
+
+ protected void loadManagerFromEditLog(Path editPath) throws IOException {
+ if (!fs.exists(editPath)) {
+ return;
+ }
+ try (FSDataInputStream is = fs.open(editPath)) {
+ while (true) {
+ try {
+ StoreOp storeOp = FSStoreOpHandler.get(is.readInt(), storeType);
+ storeOp.recover(is, manager);
+ } catch (EOFException e) {
+ // EOF hit, break
+ break;
+ }
+ }
+ }
+ }
+
+ public FileSystem getFs() {
+ return fs;
+ }
+
+ public void setFs(FileSystem fs) {
+ this.fs = fs;
+ }
+
+ protected void closeFSStore() {
+ IOUtils.closeStreams(fs, editlogOs);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/FSStoreOpHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/FSStoreOpHandler.java
new file mode 100644
index 00000000000..59a1860e315
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/FSStoreOpHandler.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store;
+
+import static org.apache.hadoop.yarn.nodelabels.store.FSStoreOpHandler.StoreType.NODE_ATTRIBUTE;
+import static org.apache.hadoop.yarn.nodelabels.store.FSStoreOpHandler.StoreType.NODE_LABEL_STORE;
+import org.apache.hadoop.yarn.nodelabels.store.op.AddClusterLabelOp;
+import org.apache.hadoop.yarn.nodelabels.store.op.AddNodeToAttributeLogOp;
+import org.apache.hadoop.yarn.nodelabels.store.op.FSNodeStoreLogOp;
+import org.apache.hadoop.yarn.nodelabels.store.op.NodeAttributeMirrorOp;
+import org.apache.hadoop.yarn.nodelabels.store.op.NodeLabelMirrorOp;
+import org.apache.hadoop.yarn.nodelabels.store.op.NodeToLabelOp;
+import org.apache.hadoop.yarn.nodelabels.store.op.RemoveClusterLabelOp;
+import org.apache.hadoop.yarn.nodelabels.store.op.RemoveNodeToAttributeLogOp;
+import org.apache.hadoop.yarn.nodelabels.store.op.ReplaceNodeToAttributeLogOp;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * File system store op handler.
+ */
+public class FSStoreOpHandler {
+
+ private static Map>>
+ editLogOp;
+ private static Map> mirrorOp;
+
+ /**
+ * Store Type enum to hold label and attribute.
+ */
+ public enum StoreType {
+ NODE_LABEL_STORE,
+ NODE_ATTRIBUTE
+ }
+
+ static {
+ editLogOp = new HashMap<>();
+ mirrorOp = new HashMap<>();
+
+ // registerLog edit log operation
+
+ //Node Label Operations
+ registerLog(NODE_LABEL_STORE, AddClusterLabelOp.OPCODE,
+ AddClusterLabelOp.class);
+ registerLog(NODE_LABEL_STORE, NodeToLabelOp.OPCODE, NodeToLabelOp.class);
+ registerLog(NODE_LABEL_STORE, RemoveClusterLabelOp.OPCODE,
+ RemoveClusterLabelOp.class);
+
+ //NodeAttibute operation
+ registerLog(NODE_ATTRIBUTE, AddNodeToAttributeLogOp.OPCODE,
+ AddNodeToAttributeLogOp.class);
+ registerLog(NODE_ATTRIBUTE, RemoveNodeToAttributeLogOp.OPCODE,
+ RemoveNodeToAttributeLogOp.class);
+ registerLog(NODE_ATTRIBUTE, ReplaceNodeToAttributeLogOp.OPCODE,
+ ReplaceNodeToAttributeLogOp.class);
+
+ // registerLog Mirror op
+
+ // Node label mirror operation
+ registerMirror(NODE_LABEL_STORE, NodeLabelMirrorOp.class);
+ //Node attribute mirror operation
+ registerMirror(NODE_ATTRIBUTE, NodeAttributeMirrorOp.class);
+
+ }
+
+ private static void registerMirror(StoreType type,
+ Class extends FSNodeStoreLogOp> clazz) {
+ mirrorOp.put(type, clazz);
+ }
+
+ private static void registerLog(StoreType type, int opcode,
+ Class extends FSNodeStoreLogOp> clazz) {
+ Map> ops = editLogOp.get(type);
+ Integer code = Integer.valueOf(opcode);
+ if (ops == null) {
+ Map> newOps = new HashMap<>();
+ newOps.put(code, clazz);
+ editLogOp.put(type, newOps);
+ } else {
+ ops.put(code, clazz);
+ }
+ }
+
+ /**
+ * Get mirror operation of store Type.
+ *
+ * @param storeType
+ * @return instance of FSNodeStoreLogOp.
+ */
+ public static FSNodeStoreLogOp getMirrorOp(StoreType storeType) {
+ return newInstance(mirrorOp.get(storeType));
+ }
+
+ /**
+ * Will return StoreOp instance basead on opCode and StoreType.
+ * @param opCode
+ * @param storeType
+ * @return instance of FSNodeStoreLogOp.
+ */
+ public static FSNodeStoreLogOp get(int opCode, StoreType storeType) {
+ return newInstance(editLogOp.get(storeType).get(opCode));
+ }
+
+ private static T newInstance(Class clazz) {
+ FSNodeStoreLogOp instance = null;
+ if (clazz != null) {
+ try {
+ instance = clazz.newInstance();
+ } catch (Exception ex) {
+ throw new RuntimeException("Failed to instantiate " + clazz, ex);
+ }
+ }
+ return (T) instance;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/StoreOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/StoreOp.java
new file mode 100644
index 00000000000..e0b26da82e7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/StoreOp.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store;
+
+import java.io.IOException;
+
+/**
+ * Define the interface for store activity.
+ * Used by for FileSystem based operation.
+ *
+ * @param write to be done to
+ * @param read to be done from
+ * @param manager used
+ */
+public interface StoreOp {
+
+ /**
+ * Write operation to persistent storage.
+ *
+ * @param write write to be done to
+ * @param mgr manager used by store
+ * @throws IOException
+ */
+ void write(W write, M mgr) throws IOException;
+
+ /**
+ * Read and populate StoreOp.
+ *
+ * @param read read to be done from
+ * @param mgr manager used by store
+ * @throws IOException
+ */
+ void recover(R read, M mgr) throws IOException;
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/AddClusterLabelOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/AddClusterLabelOp.java
new file mode 100644
index 00000000000..f0259b37e62
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/AddClusterLabelOp.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store.op;
+
+import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos;
+import org.apache.hadoop.yarn.server.api.protocolrecords
+ .AddToClusterNodeLabelsRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb
+ .AddToClusterNodeLabelsRequestPBImpl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.List;
+
+/**
+ * Add label operation for file system.
+ */
+public class AddClusterLabelOp
+ extends FSNodeStoreLogOp {
+
+ private List labels;
+
+ public static final int OPCODE = 0;
+
+ @Override
+ public void write(OutputStream os, CommonNodeLabelsManager mgr)
+ throws IOException {
+ ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest
+ .newInstance(labels)).getProto().writeDelimitedTo(os);
+ }
+
+ @Override
+ public void recover(InputStream is, CommonNodeLabelsManager mgr)
+ throws IOException {
+ labels = new AddToClusterNodeLabelsRequestPBImpl(
+ YarnServerResourceManagerServiceProtos
+ .AddToClusterNodeLabelsRequestProto
+ .parseDelimitedFrom(is)).getNodeLabels();
+ mgr.addToCluserNodeLabels(labels);
+ }
+
+ public AddClusterLabelOp setLabels(List nodeLabels) {
+ this.labels = nodeLabels;
+ return this;
+ }
+
+ public List getLabels() {
+ return labels;
+ }
+
+ @Override
+ public int getOpCode() {
+ return OPCODE;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/AddNodeToAttributeLogOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/AddNodeToAttributeLogOp.java
new file mode 100644
index 00000000000..4b92bcf9cca
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/AddNodeToAttributeLogOp.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store.op;
+
+import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos;
+import org.apache.hadoop.yarn.server.api.protocolrecords.AttributeMappingOperationType;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingRequestPBImpl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.List;
+
+/**
+ * File system Add Node to attribute mapping.
+ */
+public class AddNodeToAttributeLogOp
+ extends FSNodeStoreLogOp {
+
+ private List attributes;
+
+ public static final int OPCODE = 0;
+
+ @Override
+ public void write(OutputStream os, NodeAttributesManager mgr)
+ throws IOException {
+ ((NodesToAttributesMappingRequestPBImpl) NodesToAttributesMappingRequest
+ .newInstance(AttributeMappingOperationType.ADD, attributes, false))
+ .getProto().writeDelimitedTo(os);
+ }
+
+ @Override
+ public void recover(InputStream is, NodeAttributesManager mgr)
+ throws IOException {
+ NodesToAttributesMappingRequest request =
+ new NodesToAttributesMappingRequestPBImpl(
+ YarnServerResourceManagerServiceProtos
+ .NodesToAttributesMappingRequestProto
+ .parseDelimitedFrom(is));
+ mgr.addNodeAttributes(getNodeToAttributesMap(request));
+ }
+
+ public AddNodeToAttributeLogOp setAttributes(
+ List attributesList) {
+ this.attributes = attributesList;
+ return this;
+ }
+
+ @Override
+ public int getOpCode() {
+ return OPCODE;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/FSNodeStoreLogOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/FSNodeStoreLogOp.java
new file mode 100644
index 00000000000..bf4d1b9196c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/FSNodeStoreLogOp.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store.op;
+
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.nodelabels.store.StoreOp;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Defines all FileSystem editlog operation. All node label and attribute
+ * store write or read operation will be defined in this class.
+ *
+ * @param Manager used for each operation.
+ */
+public abstract class FSNodeStoreLogOp
+ implements StoreOp {
+
+ public abstract int getOpCode();
+
+ protected Map> getNodeToAttributesMap(
+ NodesToAttributesMappingRequest request) {
+ List attributes = request.getNodesToAttributes();
+ Map> nodeToAttrMap = new HashMap<>();
+ attributes.forEach((v) -> nodeToAttrMap
+ .put(v.getNode(), new HashSet<>(v.getNodeAttributes())));
+ return nodeToAttrMap;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeAttributeMirrorOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeAttributeMirrorOp.java
new file mode 100644
index 00000000000..dca0555abc9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeAttributeMirrorOp.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store.op;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos;
+import org.apache.hadoop.yarn.server.api.protocolrecords.AttributeMappingOperationType;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingRequestPBImpl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ * File System Node Attribute Mirror read and write operation.
+ */
+public class NodeAttributeMirrorOp
+ extends FSNodeStoreLogOp {
+
+ @Override
+ public void write(OutputStream os, NodeAttributesManager mgr)
+ throws IOException {
+ ((NodesToAttributesMappingRequestPBImpl) NodesToAttributesMappingRequest
+ .newInstance(AttributeMappingOperationType.REPLACE,
+ mgr.getNodeToAttributes(
+ ImmutableSet.of(NodeAttribute.PREFIX_CENTRALIZED)), false))
+ .getProto().writeDelimitedTo(os);
+ }
+
+ @Override
+ public void recover(InputStream is, NodeAttributesManager mgr)
+ throws IOException {
+ NodesToAttributesMappingRequest request =
+ new NodesToAttributesMappingRequestPBImpl(
+ YarnServerResourceManagerServiceProtos
+ .NodesToAttributesMappingRequestProto
+ .parseDelimitedFrom(is));
+ mgr.replaceNodeAttributes(NodeAttribute.PREFIX_CENTRALIZED,
+ getNodeToAttributesMap(request));
+ }
+
+ @Override
+ public int getOpCode() {
+ return -1;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeLabelMirrorOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeLabelMirrorOp.java
new file mode 100644
index 00000000000..95004866458
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeLabelMirrorOp.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store.op;
+
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos;
+import org.apache.hadoop.yarn.server.api.protocolrecords
+ .ReplaceLabelsOnNodeRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb
+ .AddToClusterNodeLabelsRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb
+ .ReplaceLabelsOnNodeRequestPBImpl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * NodeLabel Mirror Op class.
+ */
+public class NodeLabelMirrorOp
+ extends FSNodeStoreLogOp {
+
+ public NodeLabelMirrorOp() {
+ super();
+ }
+
+ @Override
+ public void write(OutputStream os, CommonNodeLabelsManager mgr)
+ throws IOException {
+ ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequestPBImpl
+ .newInstance(mgr.getClusterNodeLabels())).getProto()
+ .writeDelimitedTo(os);
+ if (mgr.isCentralizedConfiguration()) {
+ ((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest
+ .newInstance(mgr.getNodeLabels())).getProto().writeDelimitedTo(os);
+ }
+ }
+
+ @Override
+ public void recover(InputStream is, CommonNodeLabelsManager mgr)
+ throws IOException {
+ List labels = new AddToClusterNodeLabelsRequestPBImpl(
+ YarnServerResourceManagerServiceProtos
+ .AddToClusterNodeLabelsRequestProto
+ .parseDelimitedFrom(is)).getNodeLabels();
+ mgr.addToCluserNodeLabels(labels);
+
+ if (mgr.isCentralizedConfiguration()) {
+ // Only load node to labels mapping while using centralized
+ // configuration
+ Map> nodeToLabels =
+ new ReplaceLabelsOnNodeRequestPBImpl(
+ YarnServerResourceManagerServiceProtos
+ .ReplaceLabelsOnNodeRequestProto
+ .parseDelimitedFrom(is)).getNodeToLabels();
+ mgr.replaceLabelsOnNode(nodeToLabels);
+ }
+ }
+
+ @Override
+ public int getOpCode() {
+ return -1;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeToLabelOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeToLabelOp.java
new file mode 100644
index 00000000000..df08ffcc15c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/NodeToLabelOp.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store.op;
+
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos;
+import org.apache.hadoop.yarn.server.api.protocolrecords
+ .ReplaceLabelsOnNodeRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb
+ .ReplaceLabelsOnNodeRequestPBImpl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Node to label mapping store operation for label.
+ */
+public class NodeToLabelOp
+ extends FSNodeStoreLogOp {
+
+ private Map> nodeToLabels;
+ public static final int OPCODE = 1;
+
+ @Override
+ public void write(OutputStream os, CommonNodeLabelsManager mgr)
+ throws IOException {
+ ((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest
+ .newInstance(nodeToLabels)).getProto().writeDelimitedTo(os);
+ }
+
+ @Override
+ public void recover(InputStream is, CommonNodeLabelsManager mgr)
+ throws IOException {
+ nodeToLabels = new ReplaceLabelsOnNodeRequestPBImpl(
+ YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto
+ .parseDelimitedFrom(is)).getNodeToLabels();
+ if (mgr.isCentralizedConfiguration()) {
+ mgr.replaceLabelsOnNode(nodeToLabels);
+ }
+ }
+
+ public NodeToLabelOp setNodeToLabels(
+ Map> nodeToLabelsList) {
+ this.nodeToLabels = nodeToLabelsList;
+ return this;
+ }
+
+ public Map> getNodeToLabels() {
+ return nodeToLabels;
+ }
+
+ @Override
+ public int getOpCode() {
+ return OPCODE;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveClusterLabelOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveClusterLabelOp.java
new file mode 100644
index 00000000000..2fc4ac3dfc2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveClusterLabelOp.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store.op;
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos;
+import org.apache.hadoop.yarn.server.api.protocolrecords
+ .RemoveFromClusterNodeLabelsRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb
+ .RemoveFromClusterNodeLabelsRequestPBImpl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Collection;
+
+/**
+ * Remove label from cluster log store operation.
+ */
+public class RemoveClusterLabelOp
+ extends FSNodeStoreLogOp {
+
+ private Collection labels;
+
+ public static final int OPCODE = 2;
+
+ @Override
+ public void write(OutputStream os, CommonNodeLabelsManager mgr)
+ throws IOException {
+ ((RemoveFromClusterNodeLabelsRequestPBImpl)
+ RemoveFromClusterNodeLabelsRequest
+ .newInstance(Sets.newHashSet(labels.iterator()))).getProto()
+ .writeDelimitedTo(os);
+ }
+
+ @Override
+ public void recover(InputStream is, CommonNodeLabelsManager mgr)
+ throws IOException {
+ labels =
+ YarnServerResourceManagerServiceProtos
+ .RemoveFromClusterNodeLabelsRequestProto
+ .parseDelimitedFrom(is).getNodeLabelsList();
+ mgr.removeFromClusterNodeLabels(labels);
+ }
+
+ public RemoveClusterLabelOp setLabels(Collection nodeLabels) {
+ this.labels = nodeLabels;
+ return this;
+ }
+
+ public Collection getLabels() {
+ return labels;
+ }
+
+ @Override
+ public int getOpCode() {
+ return OPCODE;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveNodeToAttributeLogOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveNodeToAttributeLogOp.java
new file mode 100644
index 00000000000..1d13077418c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/RemoveNodeToAttributeLogOp.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store.op;
+
+import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos;
+import org.apache.hadoop.yarn.server.api.protocolrecords.AttributeMappingOperationType;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingRequestPBImpl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.List;
+
+/**
+ * File system remove node attribute from node operation.
+ */
+public class RemoveNodeToAttributeLogOp
+ extends FSNodeStoreLogOp {
+
+ private List attributes;
+
+ public static final int OPCODE = 1;
+
+ @Override
+ public void write(OutputStream os, NodeAttributesManager mgr)
+ throws IOException {
+ ((NodesToAttributesMappingRequestPBImpl) NodesToAttributesMappingRequest
+ .newInstance(AttributeMappingOperationType.REMOVE, attributes, false))
+ .getProto().writeDelimitedTo(os);
+ }
+
+ @Override
+ public void recover(InputStream is, NodeAttributesManager mgr)
+ throws IOException {
+ NodesToAttributesMappingRequest request =
+ new NodesToAttributesMappingRequestPBImpl(
+ YarnServerResourceManagerServiceProtos
+ .NodesToAttributesMappingRequestProto
+ .parseDelimitedFrom(is));
+ mgr.removeNodeAttributes(getNodeToAttributesMap(request));
+ }
+
+ public RemoveNodeToAttributeLogOp setAttributes(
+ List attrs) {
+ this.attributes = attrs;
+ return this;
+ }
+
+ @Override
+ public int getOpCode() {
+ return OPCODE;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/ReplaceNodeToAttributeLogOp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/ReplaceNodeToAttributeLogOp.java
new file mode 100644
index 00000000000..54d7651c674
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/ReplaceNodeToAttributeLogOp.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels.store.op;
+
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.nodelabels.NodeAttributesManager;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos;
+import org.apache.hadoop.yarn.server.api.protocolrecords.AttributeMappingOperationType;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingRequestPBImpl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.List;
+
+/**
+ * File system replace node attribute from node operation.
+ */
+public class ReplaceNodeToAttributeLogOp
+ extends FSNodeStoreLogOp {
+
+ private List attributes;
+ public static final int OPCODE = 2;
+
+ @Override
+ public void write(OutputStream os, NodeAttributesManager mgr)
+ throws IOException {
+ ((NodesToAttributesMappingRequestPBImpl) NodesToAttributesMappingRequest
+ .newInstance(AttributeMappingOperationType.REPLACE, attributes, false))
+ .getProto().writeDelimitedTo(os);
+ }
+
+ @Override
+ public void recover(InputStream is, NodeAttributesManager mgr)
+ throws IOException {
+ NodesToAttributesMappingRequest request =
+ new NodesToAttributesMappingRequestPBImpl(
+ YarnServerResourceManagerServiceProtos
+ .NodesToAttributesMappingRequestProto
+ .parseDelimitedFrom(is));
+ //Only CENTRALIZED is stored to FS system
+ mgr.replaceNodeAttributes(NodeAttribute.PREFIX_CENTRALIZED,
+ getNodeToAttributesMap(request));
+ }
+
+ public ReplaceNodeToAttributeLogOp setAttributes(
+ List attrs) {
+ this.attributes = attrs;
+ return this;
+ }
+
+ @Override
+ public int getOpCode() {
+ return OPCODE;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/package-info.java
new file mode 100644
index 00000000000..f6fb3d3ecaa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/op/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.nodelabels.store.op;
+import org.apache.hadoop.classification.InterfaceAudience;
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/package-info.java
new file mode 100644
index 00000000000..0444807071a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/store/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.nodelabels.store;
+import org.apache.hadoop.classification.InterfaceAudience;
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
index 077edf34900..156ed053010 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
@@ -33,9 +33,11 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshClusterMaxPriorityRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto;
@@ -43,13 +45,14 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
@@ -76,11 +79,15 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshClusterMaxPriorityRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshClusterMaxPriorityResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
@@ -96,8 +103,6 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
-import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
-import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
import com.google.protobuf.ServiceException;
@@ -323,4 +328,19 @@ public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
return null;
}
}
+
+ @Override
+ public NodesToAttributesMappingResponse mapAttributesToNodes(
+ NodesToAttributesMappingRequest request)
+ throws YarnException, IOException {
+ NodesToAttributesMappingRequestProto requestProto =
+ ((NodesToAttributesMappingRequestPBImpl) request).getProto();
+ try {
+ return new NodesToAttributesMappingResponsePBImpl(
+ proxy.mapAttributesToNodes(null, requestProto));
+ } catch (ServiceException e) {
+ RPCUtil.unwrapAndThrowException(e);
+ return null;
+ }
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
index aafce0875e7..0036339bef2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
@@ -28,11 +28,15 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshClusterMaxPriorityRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshClusterMaxPriorityResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto;
@@ -48,13 +52,13 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto;
-import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesResponseProto;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse;
@@ -71,11 +75,15 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshClusterMaxPriorityRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshClusterMaxPriorityResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
@@ -91,8 +99,6 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
-import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
-import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@@ -336,4 +342,21 @@ public RefreshClusterMaxPriorityResponseProto refreshClusterMaxPriority(
throw new ServiceException(e);
}
}
+
+ @Override
+ public NodesToAttributesMappingResponseProto mapAttributesToNodes(
+ RpcController controller, NodesToAttributesMappingRequestProto proto)
+ throws ServiceException {
+ NodesToAttributesMappingRequest request =
+ new NodesToAttributesMappingRequestPBImpl(proto);
+ try {
+ NodesToAttributesMappingResponse response =
+ real.mapAttributesToNodes(request);
+ return ((NodesToAttributesMappingResponsePBImpl) response).getProto();
+ } catch (YarnException e) {
+ throw new ServiceException(e);
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeToAttributesPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeToAttributesPBImpl.java
new file mode 100644
index 00000000000..94b967b4adf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeToAttributesPBImpl.java
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeAttributePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeToAttributesProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeToAttributesProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+
+/**
+ * Proto class for Node to attributes mapping.
+ */
+public class NodeToAttributesPBImpl extends NodeToAttributes {
+ private NodeToAttributesProto proto =
+ NodeToAttributesProto.getDefaultInstance();
+ private NodeToAttributesProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ private List nodeAttributes = null;
+
+ public NodeToAttributesPBImpl() {
+ builder = NodeToAttributesProto.newBuilder();
+ }
+
+ public NodeToAttributesPBImpl(NodeToAttributesProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto) {
+ maybeInitBuilder();
+ }
+ if (this.nodeAttributes != null) {
+ for (NodeAttribute nodeAttribute : nodeAttributes) {
+ builder.addNodeAttributes(
+ ((NodeAttributePBImpl) nodeAttribute).getProto());
+ }
+ }
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ public NodeToAttributesProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = NodeToAttributesProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public String getNode() {
+ NodeToAttributesProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasNode()) {
+ return null;
+ }
+ return p.getNode();
+ }
+
+ @Override
+ public void setNode(String node) {
+ maybeInitBuilder();
+ builder.setNode(node);
+ }
+
+ private void initNodeAttributes() {
+ if (this.nodeAttributes != null) {
+ return;
+ }
+
+ NodeToAttributesProtoOrBuilder p = viaProto ? proto : builder;
+ List nodeAttributesProtoList =
+ p.getNodeAttributesList();
+ List attributes = new ArrayList<>();
+ if (nodeAttributesProtoList == null
+ || nodeAttributesProtoList.size() == 0) {
+ this.nodeAttributes = attributes;
+ return;
+ }
+ for (NodeAttributeProto nodeAttributeProto : nodeAttributesProtoList) {
+ attributes.add(new NodeAttributePBImpl(nodeAttributeProto));
+ }
+ this.nodeAttributes = attributes;
+ }
+
+ @Override
+ public List getNodeAttributes() {
+ initNodeAttributes();
+ return this.nodeAttributes;
+ }
+
+ @Override
+ public void setNodeAttributes(List attributes) {
+ if (nodeAttributes == null) {
+ nodeAttributes = new ArrayList<>();
+ }
+ nodeAttributes.clear();
+ nodeAttributes.addAll(attributes);
+ }
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (obj instanceof NodeToAttributes) {
+ NodeToAttributes other = (NodeToAttributes) obj;
+ if (getNodeAttributes() == null) {
+ if (other.getNodeAttributes() != null) {
+ return false;
+ }
+ } else if (!getNodeAttributes().containsAll(other.getNodeAttributes())) {
+ return false;
+ }
+
+ if (getNode() == null) {
+ if (other.getNode() != null) {
+ return false;
+ }
+ } else if (!getNode().equals(other.getNode())) {
+ return false;
+ }
+
+ return true;
+ }
+ return false;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodesToAttributesMappingRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodesToAttributesMappingRequestPBImpl.java
new file mode 100644
index 00000000000..c1a86b3cd3b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodesToAttributesMappingRequestPBImpl.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AttributeMappingOperationTypeProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeToAttributesProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.protocolrecords.AttributeMappingOperationType;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+
+/**
+ * Proto class for node to attributes mapping request.
+ */
+public class NodesToAttributesMappingRequestPBImpl
+ extends NodesToAttributesMappingRequest {
+ private NodesToAttributesMappingRequestProto proto =
+ NodesToAttributesMappingRequestProto.getDefaultInstance();
+ private NodesToAttributesMappingRequestProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ private List nodeAttributesMapping = null;
+
+ public NodesToAttributesMappingRequestPBImpl() {
+ builder = NodesToAttributesMappingRequestProto.newBuilder();
+ }
+
+ public NodesToAttributesMappingRequestPBImpl(
+ NodesToAttributesMappingRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto) {
+ maybeInitBuilder();
+ }
+ if (this.nodeAttributesMapping != null) {
+ for (NodeToAttributes nodeAttributes : nodeAttributesMapping) {
+ builder.addNodeToAttributes(
+ ((NodeToAttributesPBImpl) nodeAttributes).getProto());
+ }
+ }
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ public NodesToAttributesMappingRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = NodesToAttributesMappingRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public void setNodesToAttributes(List nodesToAttributes) {
+ if (nodeAttributesMapping == null) {
+ nodeAttributesMapping = new ArrayList<>();
+ }
+ if(nodesToAttributes == null) {
+ throw new IllegalArgumentException("nodesToAttributes cannot be null");
+ }
+ nodeAttributesMapping.clear();
+ nodeAttributesMapping.addAll(nodesToAttributes);
+ }
+
+ private void initNodeAttributesMapping() {
+ if (this.nodeAttributesMapping != null) {
+ return;
+ }
+
+ NodesToAttributesMappingRequestProtoOrBuilder p =
+ viaProto ? proto : builder;
+ List nodeAttributesProtoList =
+ p.getNodeToAttributesList();
+ List attributes = new ArrayList<>();
+ if (nodeAttributesProtoList == null
+ || nodeAttributesProtoList.size() == 0) {
+ this.nodeAttributesMapping = attributes;
+ return;
+ }
+ for (NodeToAttributesProto nodeAttributeProto : nodeAttributesProtoList) {
+ attributes.add(new NodeToAttributesPBImpl(nodeAttributeProto));
+ }
+ this.nodeAttributesMapping = attributes;
+ }
+
+ @Override
+ public List getNodesToAttributes() {
+ initNodeAttributesMapping();
+ return this.nodeAttributesMapping;
+ }
+
+ @Override
+ public void setFailOnUnknownNodes(boolean failOnUnknownNodes) {
+ maybeInitBuilder();
+ builder.setFailOnUnknownNodes(failOnUnknownNodes);
+ }
+
+ @Override
+ public boolean getFailOnUnknownNodes() {
+ NodesToAttributesMappingRequestProtoOrBuilder p =
+ viaProto ? proto : builder;
+ return p.getFailOnUnknownNodes();
+ }
+
+ @Override
+ public void setOperation(AttributeMappingOperationType operation) {
+ maybeInitBuilder();
+ builder.setOperation(convertToProtoFormat(operation));
+ }
+
+ private AttributeMappingOperationTypeProto convertToProtoFormat(
+ AttributeMappingOperationType operation) {
+ return AttributeMappingOperationTypeProto.valueOf(operation.name());
+ }
+
+ private AttributeMappingOperationType convertFromProtoFormat(
+ AttributeMappingOperationTypeProto operationTypeProto) {
+ return AttributeMappingOperationType.valueOf(operationTypeProto.name());
+ }
+
+ @Override
+ public AttributeMappingOperationType getOperation() {
+ NodesToAttributesMappingRequestProtoOrBuilder p =
+ viaProto ? proto : builder;
+ if (!p.hasOperation()) {
+ return null;
+ }
+ return convertFromProtoFormat(p.getOperation());
+ }
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (obj instanceof NodesToAttributesMappingRequest) {
+ NodesToAttributesMappingRequest other =
+ (NodesToAttributesMappingRequest) obj;
+ if (getNodesToAttributes() == null) {
+ if (other.getNodesToAttributes() != null) {
+ return false;
+ }
+ } else if (!getNodesToAttributes()
+ .containsAll(other.getNodesToAttributes())) {
+ return false;
+ }
+
+ if (getOperation() == null) {
+ if (other.getOperation() != null) {
+ return false;
+ }
+ } else if (!getOperation().equals(other.getOperation())) {
+ return false;
+ }
+
+ return getFailOnUnknownNodes() == other.getFailOnUnknownNodes();
+ }
+ return false;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodesToAttributesMappingResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodesToAttributesMappingResponsePBImpl.java
new file mode 100644
index 00000000000..955c3cbadfe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodesToAttributesMappingResponsePBImpl.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingResponseProto;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
+
+/**
+ * Proto class for node to attributes mapping response.
+ */
+public class NodesToAttributesMappingResponsePBImpl
+ extends NodesToAttributesMappingResponse {
+
+ private NodesToAttributesMappingResponseProto proto =
+ NodesToAttributesMappingResponseProto.getDefaultInstance();
+ private NodesToAttributesMappingResponseProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ public NodesToAttributesMappingResponsePBImpl() {
+ builder = NodesToAttributesMappingResponseProto.newBuilder();
+ }
+
+ public NodesToAttributesMappingResponsePBImpl(
+ NodesToAttributesMappingResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public NodesToAttributesMappingResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsRequestPBImpl.java
index a9358407149..afabcd919fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RemoveFromClusterNodeLabelsRequestPBImpl.java
@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -21,12 +21,18 @@
import java.util.HashSet;
import java.util.Set;
-import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProtoOrBuilder;
-import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
-import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos
+ .RemoveFromClusterNodeLabelsRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos
+ .RemoveFromClusterNodeLabelsRequestProto;
+import org.apache.hadoop.yarn.server.api.protocolrecords
+ .RemoveFromClusterNodeLabelsRequest;
-public class RemoveFromClusterNodeLabelsRequestPBImpl extends
- RemoveFromClusterNodeLabelsRequest {
+/**
+ * Proto class to handlde RemoveFromClusterNodeLabels request.
+ */
+public class RemoveFromClusterNodeLabelsRequestPBImpl
+ extends RemoveFromClusterNodeLabelsRequest {
Set labels;
RemoveFromClusterNodeLabelsRequestProto proto =
RemoveFromClusterNodeLabelsRequestProto.getDefaultInstance();
@@ -102,7 +108,7 @@ public int hashCode() {
assert false : "hashCode not designed";
return 0;
}
-
+
@Override
public boolean equals(Object other) {
if (other == null)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 72e42d86dcf..b74fccda5e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2894,6 +2894,68 @@
1800000
+
+
+
+ This property determines which provider will be plugged by the
+ node manager to collect node-attributes. Administrators can
+ configure "config", "script" or the class name of the provider.
+ Configured class needs to extend
+ org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeAttributesProvider.
+ If "config" is configured, then "ConfigurationNodeLabelsProvider" and if
+ "script" is configured, then "ScriptBasedNodeAttributesProvider"
+ will be used.
+
+ yarn.nodemanager.node-attributes.provider
+
+
+
+
+ The node attribute script NM runs to collect node attributes.
+ Script output Line starting with "NODE_ATTRIBUTE:" will be
+ considered as a record of node attribute, attribute name, type
+ and value should be delimited by comma. Each of such lines
+ will be parsed to a node attribute.
+
+ yarn.nodemanager.node-attributes.provider.script.path
+
+
+
+
+ Command arguments passed to the node attribute script.
+
+ yarn.nodemanager.node-attributes.provider.script.opts
+
+
+
+
+ Time interval that determines how long NM fetches node attributes
+ from a given provider. If -1 is configured then node labels are
+ retrieved from provider only during initialization. Defaults to 10 mins.
+
+ yarn.nodemanager.node-attributes.provider.fetch-interval-ms
+ 600000
+
+
+
+
+ Timeout period after which NM will interrupt the node attribute
+ provider script which queries node attributes. Defaults to 20 mins.
+
+ yarn.nodemanager.node-attributes.provider.fetch-timeout-ms
+ 1200000
+
+
+
+
+ When "yarn.nodemanager.node-attributes.provider" is configured with
+ "config" then ConfigurationNodeAttributesProvider fetches node attributes
+ from this parameter.
+
+ yarn.nodemanager.node-attributes.provider.configured-node-attributes
+
+
+
Timeout in seconds for YARN node graceful decommission.
@@ -3888,4 +3950,20 @@
yarn.nodemanager.elastic-memory-control.timeout-sec
5
+
+
+ URI for NodeAttributeManager. The default value is
+ /tmp/hadoop-yarn-${user}/node-attribute/ in the local filesystem.
+
+ yarn.node-attribute.fs-store.root-dir
+
+
+
+
+
+ Choose different implementation of node attribute's storage
+
+ yarn.node-attribute.fs-store.impl.class
+ org.apache.hadoop.yarn.server.resourcemanager.nodelabels.FileSystemNodeAttributeStore
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index 4c660c002ff..cdb6c4664fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.yarn.api;
import java.io.IOException;
+import java.util.Arrays;
import org.apache.commons.lang3.Range;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
@@ -53,8 +54,12 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAttributesToNodesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAttributesToNodesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterMetricsResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeAttributesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeAttributesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetClusterNodesRequestPBImpl;
@@ -71,6 +76,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetLabelsToNodesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNewApplicationResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToAttributesRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToAttributesResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetNodesToLabelsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoRequestPBImpl;
@@ -106,7 +113,6 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
-import org.apache.hadoop.yarn.api.records.CollectorInfo;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -114,6 +120,7 @@
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ApplicationTimeout;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
@@ -124,9 +131,13 @@
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
import org.apache.hadoop.yarn.api.records.NMToken;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeKey;
+import org.apache.hadoop.yarn.api.records.NodeAttributeInfo;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeToAttributeValue;
import org.apache.hadoop.yarn.api.records.PreemptionContainer;
import org.apache.hadoop.yarn.api.records.PreemptionContract;
import org.apache.hadoop.yarn.api.records.PreemptionMessage;
@@ -145,8 +156,8 @@
import org.apache.hadoop.yarn.api.records.ReservationRequests;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceAllocationRequest;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.ResourceSizing;
@@ -176,9 +187,13 @@
import org.apache.hadoop.yarn.api.records.impl.pb.ExecutionTypeRequestPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.LocalResourcePBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeAttributeKeyPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeAttributeInfoPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeAttributePBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeToAttributeValuePBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContainerPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionContractPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionMessagePBImpl;
@@ -214,9 +229,14 @@
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeRequestProto;
import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeKeyProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeInfoProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeToAttributeValueProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeToAttributesProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContractProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionMessageProto;
@@ -238,6 +258,7 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.NodesToAttributesMappingRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
@@ -263,6 +284,7 @@
import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceProfilesResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptReportResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationAttemptsRequestProto;
@@ -293,6 +315,8 @@
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.IncreaseContainersResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.IncreaseContainersResourceResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto;
@@ -317,13 +341,14 @@
import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceProfilesResponseProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileRequestProto;
-import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileResponseProto;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodeToAttributes;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodeToAttributesPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.NodesToAttributesMappingRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
@@ -351,7 +376,6 @@
import org.junit.Test;
import com.google.common.collect.ImmutableSet;
-import java.util.Arrays;
/**
* Test class for YARN API protocol records.
@@ -434,6 +458,13 @@ public static void setup() throws Exception {
generateByNewInstance(ResourceSizing.class);
generateByNewInstance(SchedulingRequest.class);
generateByNewInstance(RejectedSchedulingRequest.class);
+ //for Node attribute support
+ generateByNewInstance(NodeAttributeKey.class);
+ generateByNewInstance(NodeAttribute.class);
+ generateByNewInstance(NodeToAttributes.class);
+ generateByNewInstance(NodeToAttributeValue.class);
+ generateByNewInstance(NodeAttributeInfo.class);
+ generateByNewInstance(NodesToAttributesMappingRequest.class);
}
@Test
@@ -1228,4 +1259,75 @@ public void testGetAllResourceTypesInfoResponsePBImpl() throws Exception {
validatePBImplRecord(GetAllResourceTypeInfoResponsePBImpl.class,
YarnServiceProtos.GetAllResourceTypeInfoResponseProto.class);
}
+
+ @Test
+ public void testNodeAttributeKeyPBImpl() throws Exception {
+ validatePBImplRecord(NodeAttributeKeyPBImpl.class,
+ NodeAttributeKeyProto.class);
+ }
+
+ @Test
+ public void testNodeToAttributeValuePBImpl() throws Exception {
+ validatePBImplRecord(NodeToAttributeValuePBImpl.class,
+ NodeToAttributeValueProto.class);
+ }
+
+ @Test
+ public void testNodeAttributePBImpl() throws Exception {
+ validatePBImplRecord(NodeAttributePBImpl.class, NodeAttributeProto.class);
+ }
+
+ @Test
+ public void testNodeAttributeInfoPBImpl() throws Exception {
+ validatePBImplRecord(NodeAttributeInfoPBImpl.class,
+ NodeAttributeInfoProto.class);
+ }
+
+ @Test
+ public void testNodeToAttributesPBImpl() throws Exception {
+ validatePBImplRecord(NodeToAttributesPBImpl.class,
+ NodeToAttributesProto.class);
+ }
+
+ @Test
+ public void testNodesToAttributesMappingRequestPBImpl() throws Exception {
+ validatePBImplRecord(NodesToAttributesMappingRequestPBImpl.class,
+ NodesToAttributesMappingRequestProto.class);
+ }
+
+ @Test
+ public void testGetAttributesToNodesRequestPBImpl() throws Exception {
+ validatePBImplRecord(GetAttributesToNodesRequestPBImpl.class,
+ YarnServiceProtos.GetAttributesToNodesRequestProto.class);
+ }
+
+ @Test
+ public void testGetAttributesToNodesResponsePBImpl() throws Exception {
+ validatePBImplRecord(GetAttributesToNodesResponsePBImpl.class,
+ YarnServiceProtos.GetAttributesToNodesResponseProto.class);
+ }
+
+ @Test
+ public void testGetClusterNodeAttributesRequestPBImpl() throws Exception {
+ validatePBImplRecord(GetClusterNodeAttributesRequestPBImpl.class,
+ YarnServiceProtos.GetClusterNodeAttributesRequestProto.class);
+ }
+
+ @Test
+ public void testGetClusterNodeAttributesResponsePBImpl() throws Exception {
+ validatePBImplRecord(GetClusterNodeAttributesResponsePBImpl.class,
+ YarnServiceProtos.GetClusterNodeAttributesResponseProto.class);
+ }
+
+ @Test
+ public void testGetNodesToAttributesRequestPBImpl() throws Exception {
+ validatePBImplRecord(GetNodesToAttributesRequestPBImpl.class,
+ YarnServiceProtos.GetNodesToAttributesRequestProto.class);
+ }
+
+ @Test
+ public void testGetNodesToAttributesResponsePBImpl() throws Exception {
+ validatePBImplRecord(GetNodesToAttributesResponsePBImpl.class,
+ YarnServiceProtos.GetNodesToAttributesResponseProto.class);
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyCommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyCommonNodeLabelsManager.java
index 64c74c2baa8..61373dcdb04 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyCommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/DummyCommonNodeLabelsManager.java
@@ -43,6 +43,12 @@ public void recover()
throws IOException {
}
+ @Override
+ public void init(Configuration conf, CommonNodeLabelsManager mgr)
+ throws Exception {
+
+ }
+
@Override
public void removeClusterNodeLabels(Collection labels)
throws IOException {
@@ -65,8 +71,6 @@ public void close() throws IOException {
// do nothing
}
};
-
- this.store.setNodeLabelsManager(this);
}
@Override
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
index ed2f4aa6c74..36dbc2b9bc8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestFileSystemNodeLabelsStore.java
@@ -96,7 +96,7 @@ public void after() throws IOException {
if (mgr.store instanceof FileSystemNodeLabelsStore) {
FileSystemNodeLabelsStore fsStore =
((FileSystemNodeLabelsStore) mgr.store);
- fsStore.fs.delete(fsStore.fsWorkingPath, true);
+ fsStore.getFs().delete(fsStore.getFsWorkingPath(), true);
}
mgr.stop();
}
@@ -342,12 +342,12 @@ public void testSerilizationAfterRecovery() throws Exception {
public void testRootMkdirOnInitStore() throws Exception {
final FileSystem mockFs = Mockito.mock(FileSystem.class);
FileSystemNodeLabelsStore mockStore = new FileSystemNodeLabelsStore() {
- void setFileSystem(Configuration conf) throws IOException {
- fs = mockFs;
+ public void initFileSystem(Configuration config) throws IOException {
+ setFs(mockFs);
}
};
- mockStore.setNodeLabelsManager(mgr);
- mockStore.fs = mockFs;
+
+ mockStore.setFs(mockFs);
verifyMkdirsCount(mockStore, true, 1);
verifyMkdirsCount(mockStore, false, 2);
verifyMkdirsCount(mockStore, true, 3);
@@ -357,10 +357,10 @@ void setFileSystem(Configuration conf) throws IOException {
private void verifyMkdirsCount(FileSystemNodeLabelsStore store,
boolean existsRetVal, int expectedNumOfCalls)
throws Exception {
- Mockito.when(store.fs.exists(Mockito.any(
+ Mockito.when(store.getFs().exists(Mockito.any(
Path.class))).thenReturn(existsRetVal);
- store.init(conf);
- Mockito.verify(store.fs,Mockito.times(
+ store.init(conf, mgr);
+ Mockito.verify(store.getFs(), Mockito.times(
expectedNumOfCalls)).mkdirs(Mockito.any(Path
.class));
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelUtil.java
new file mode 100644
index 00000000000..afdfcbbb5f6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/nodelabels/TestNodeLabelUtil.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.nodelabels;
+
+import static org.junit.Assert.fail;
+import org.junit.Test;
+
+/**
+ * Test class to verify node label util ops.
+ */
+public class TestNodeLabelUtil {
+
+ @Test
+ public void testAttributeValueAddition() {
+ String[] values =
+ new String[]{"1_8", "1.8", "ABZ", "ABZ", "az", "a-z", "a_z",
+ "123456789"};
+ for (String val : values) {
+ try {
+ NodeLabelUtil.checkAndThrowAttributeValue(val);
+ } catch (Exception e) {
+ fail("Valid values for NodeAttributeValue :" + val);
+ }
+ }
+
+ String[] invalidVals = new String[]{"_18", "1,8", "1/5", ".15", "1\\5"};
+ for (String val : invalidVals) {
+ try {
+ NodeLabelUtil.checkAndThrowAttributeValue(val);
+ fail("Valid values for NodeAttributeValue :" + val);
+ } catch (Exception e) {
+ // IGNORE
+ }
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
index f238f79f172..4f9922507be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NodeHeartbeatRequest.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
public abstract class NodeHeartbeatRequest {
@@ -61,6 +62,18 @@ public static NodeHeartbeatRequest newInstance(NodeStatus nodeStatus,
return nodeHeartbeatRequest;
}
+ public static NodeHeartbeatRequest newInstance(NodeStatus nodeStatus,
+ MasterKey lastKnownContainerTokenMasterKey,
+ MasterKey lastKnownNMTokenMasterKey, Set nodeLabels,
+ Set nodeAttributes,
+ Map registeringCollectors) {
+ NodeHeartbeatRequest request = NodeHeartbeatRequest
+ .newInstance(nodeStatus, lastKnownContainerTokenMasterKey,
+ lastKnownNMTokenMasterKey, nodeLabels, registeringCollectors);
+ request.setNodeAttributes(nodeAttributes);
+ return request;
+ }
+
public abstract NodeStatus getNodeStatus();
public abstract void setNodeStatus(NodeStatus status);
@@ -85,4 +98,8 @@ public abstract void setLogAggregationReportsForApps(
public abstract void setRegisteringCollectors(Map appCollectorsMap);
+
+ public abstract Set getNodeAttributes();
+
+ public abstract void setNodeAttributes(Set nodeAttributes);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
index 1ffd223f8a6..c59127a74b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
@@ -27,6 +27,9 @@
import java.util.Set;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.impl.pb.NodeAttributePBImpl;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos;
import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
@@ -36,6 +39,7 @@
import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto;
import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProto;
@@ -60,6 +64,7 @@
private MasterKey lastKnownContainerTokenMasterKey = null;
private MasterKey lastKnownNMTokenMasterKey = null;
private Set labels = null;
+ private Set attributes = null;
private List logAggregationReportsForApps = null;
private Map registeringCollectors = null;
@@ -115,6 +120,15 @@ private void mergeLocalToBuilder() {
}
builder.setNodeLabels(newBuilder.build());
}
+ if (this.attributes != null) {
+ builder.clearNodeAttributes();
+ YarnServerCommonServiceProtos.NodeAttributesProto.Builder attBuilder =
+ YarnServerCommonServiceProtos.NodeAttributesProto.newBuilder();
+ for (NodeAttribute attribute : attributes) {
+ attBuilder.addNodeAttributes(convertToProtoFormat(attribute));
+ }
+ builder.setNodeAttributes(attBuilder.build());
+ }
if (this.logAggregationReportsForApps != null) {
addLogAggregationStatusForAppsToProto();
}
@@ -371,6 +385,44 @@ private NodeLabelProto convertToProtoFormat(NodeLabel t) {
return ((NodeLabelPBImpl)t).getProto();
}
+ @Override
+ public Set getNodeAttributes() {
+ initNodeAttributes();
+ return this.attributes;
+ }
+
+ private void initNodeAttributes() {
+ if (this.attributes != null) {
+ return;
+ }
+ NodeHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasNodeAttributes()) {
+ return;
+ }
+ YarnServerCommonServiceProtos.NodeAttributesProto nodeAttributes =
+ p.getNodeAttributes();
+ attributes = new HashSet<>();
+ for (NodeAttributeProto attributeProto :
+ nodeAttributes.getNodeAttributesList()) {
+ attributes.add(convertFromProtoFormat(attributeProto));
+ }
+ }
+
+ @Override
+ public void setNodeAttributes(Set nodeAttributes) {
+ maybeInitBuilder();
+ builder.clearNodeAttributes();
+ this.attributes = nodeAttributes;
+ }
+
+ private NodeAttributePBImpl convertFromProtoFormat(NodeAttributeProto p) {
+ return new NodeAttributePBImpl(p);
+ }
+
+ private NodeAttributeProto convertToProtoFormat(NodeAttribute attribute) {
+ return ((NodeAttributePBImpl) attribute).getProto();
+ }
+
@Override
public List getLogAggregationReportsForApps() {
if (this.logAggregationReportsForApps != null) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
index b6145c99ef0..b9f35a52a06 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
@@ -54,6 +54,7 @@
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.NodeState;
@@ -199,7 +200,7 @@ public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
NodeUpdateType nodeUpdateType) {
return newNodeReport(nodeId, nodeState, httpAddress, rackName, used,
capability, numContainers, healthReport, lastHealthReportTime,
- nodeLabels, null, null, decommissioningTimeout, nodeUpdateType);
+ nodeLabels, null, null, decommissioningTimeout, nodeUpdateType, null);
}
public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
@@ -207,7 +208,7 @@ public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
int numContainers, String healthReport, long lastHealthReportTime,
Set nodeLabels, ResourceUtilization containersUtilization,
ResourceUtilization nodeUtilization, Integer decommissioningTimeout,
- NodeUpdateType nodeUpdateType) {
+ NodeUpdateType nodeUpdateType, Set attrs) {
NodeReport nodeReport = recordFactory.newRecordInstance(NodeReport.class);
nodeReport.setNodeId(nodeId);
nodeReport.setNodeState(nodeState);
@@ -223,6 +224,7 @@ public static NodeReport newNodeReport(NodeId nodeId, NodeState nodeState,
nodeReport.setNodeUtilization(nodeUtilization);
nodeReport.setDecommissioningTimeout(decommissioningTimeout);
nodeReport.setNodeUpdateType(nodeUpdateType);
+ nodeReport.setNodeAttributes(attrs);
return nodeReport;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index 387ddb43213..0b8c4a384d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -58,6 +58,10 @@ message NodeLabelsProto {
repeated NodeLabelProto nodeLabels = 1;
}
+message NodeAttributesProto {
+ repeated NodeAttributeProto nodeAttributes = 1;
+}
+
message RegisterNodeManagerRequestProto {
optional NodeIdProto node_id = 1;
optional int32 http_port = 3;
@@ -95,6 +99,7 @@ message NodeHeartbeatRequestProto {
optional NodeLabelsProto nodeLabels = 4;
repeated LogAggregationReportProto log_aggregation_reports_for_apps = 5;
repeated AppCollectorDataProto registering_collectors = 6;
+ optional NodeAttributesProto nodeAttributes = 7;
}
message LogAggregationReportProto {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
index 9b4d91d3dc5..ab8b6ff8dd7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
@@ -53,8 +53,12 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
@@ -71,6 +75,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
@@ -163,6 +169,8 @@
import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.NodesToAttributesMappingResponse;
import com.google.common.base.Strings;
@@ -180,15 +188,15 @@
private HashSet applicationMap = new HashSet<>();
private HashSet keepContainerOnUams = new HashSet<>();
- private HashMap>
- applicationContainerIdMap = new HashMap<>();
+ private HashMap> applicationContainerIdMap = new HashMap<>();
private AtomicInteger containerIndex = new AtomicInteger(0);
private Configuration conf;
private int subClusterId;
final private AtomicInteger applicationCounter = new AtomicInteger(0);
// True if the Mock RM is running, false otherwise.
- // This property allows us to write tests for specific scenario as YARN RM
+ // This property allows us to write tests for specific scenario as Yarn RM
// down e.g. network issue, failover.
private boolean isRunning;
@@ -504,7 +512,6 @@ public KillApplicationResponse forceKillApplication(
throw new ApplicationNotFoundException(
"Trying to kill an absent application: " + appId);
}
- keepContainerOnUams.remove(appId);
}
LOG.info("Force killing application: " + appId);
return KillApplicationResponse.newInstance(true);
@@ -892,4 +899,30 @@ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
GetAllResourceTypeInfoRequest request) throws YarnException, IOException {
return null;
}
+
+ @Override
+ public GetAttributesToNodesResponse getAttributesToNodes(
+ GetAttributesToNodesRequest request) throws YarnException, IOException {
+ return null;
+ }
+
+ @Override
+ public GetClusterNodeAttributesResponse getClusterNodeAttributes(
+ GetClusterNodeAttributesRequest request)
+ throws YarnException, IOException {
+ return null;
+ }
+
+ @Override
+ public GetNodesToAttributesResponse getNodesToAttributes(
+ GetNodesToAttributesRequest request) throws YarnException, IOException {
+ return null;
+ }
+
+ @Override
+ public NodesToAttributesMappingResponse mapAttributesToNodes(
+ NodesToAttributesMappingRequest request)
+ throws YarnException, IOException {
+ return null;
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
index 74f19e5a4b9..e6e79d3f5dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java
@@ -24,7 +24,9 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import com.google.common.collect.Sets;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
@@ -39,6 +41,8 @@
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeType;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
@@ -173,6 +177,13 @@ public void testNodeHeartBeatRequest() throws IOException {
nodeStatus.setOpportunisticContainersStatus(opportunisticContainersStatus);
record.setNodeStatus(nodeStatus);
+ Set attributeSet =
+ Sets.newHashSet(NodeAttribute.newInstance("attributeA",
+ NodeAttributeType.STRING, "valueA"),
+ NodeAttribute.newInstance("attributeB",
+ NodeAttributeType.STRING, "valueB"));
+ record.setNodeAttributes(attributeSet);
+
NodeHeartbeatRequestPBImpl pb = new
NodeHeartbeatRequestPBImpl(
((NodeHeartbeatRequestPBImpl) record).getProto());
@@ -183,6 +194,7 @@ public void testNodeHeartBeatRequest() throws IOException {
Assert.assertEquals(321,
pb.getNodeStatus().getOpportunisticContainersStatus()
.getWaitQueueLength());
+ Assert.assertEquals(2, pb.getNodeAttributes().size());
}
@Test
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index b54a6b7400e..6eda4a80b77 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -66,6 +66,9 @@
import org.apache.hadoop.yarn.server.nodemanager.nodelabels.ConfigurationNodeLabelsProvider;
import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider;
import org.apache.hadoop.yarn.server.nodemanager.nodelabels.ScriptBasedNodeLabelsProvider;
+import org.apache.hadoop.yarn.server.nodemanager.nodelabels.ScriptBasedNodeAttributesProvider;
+import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeAttributesProvider;
+import org.apache.hadoop.yarn.server.nodemanager.nodelabels.ConfigurationNodeAttributesProvider;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMLeveldbStateStoreService;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
@@ -123,6 +126,7 @@ public int getExitCode() {
private ApplicationACLsManager aclsManager;
private NodeHealthCheckerService nodeHealthChecker;
private NodeLabelsProvider nodeLabelsProvider;
+ private NodeAttributesProvider nodeAttributesProvider;
private LocalDirsHandlerService dirsHandler;
private Context context;
private AsyncDispatcher dispatcher;
@@ -162,14 +166,45 @@ public static long getNMStartupTime() {
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
return new NodeStatusUpdaterImpl(context, dispatcher, healthChecker,
- metrics, nodeLabelsProvider);
+ metrics);
}
- protected NodeStatusUpdater createNodeStatusUpdater(Context context,
- Dispatcher dispatcher, NodeHealthCheckerService healthChecker,
- NodeLabelsProvider nodeLabelsProvider) {
- return new NodeStatusUpdaterImpl(context, dispatcher, healthChecker,
- metrics, nodeLabelsProvider);
+ protected NodeAttributesProvider createNodeAttributesProvider(
+ Configuration conf) throws IOException {
+ NodeAttributesProvider attributesProvider = null;
+ String providerString =
+ conf.get(YarnConfiguration.NM_NODE_ATTRIBUTES_PROVIDER_CONFIG, null);
+ if (providerString == null || providerString.trim().length() == 0) {
+ return attributesProvider;
+ }
+ switch (providerString.trim().toLowerCase()) {
+ case YarnConfiguration.CONFIG_NODE_DESCRIPTOR_PROVIDER:
+ attributesProvider = new ConfigurationNodeAttributesProvider();
+ break;
+ case YarnConfiguration.SCRIPT_NODE_DESCRIPTOR_PROVIDER:
+ attributesProvider = new ScriptBasedNodeAttributesProvider();
+ break;
+ default:
+ try {
+ Class extends NodeAttributesProvider> labelsProviderClass =
+ conf.getClass(YarnConfiguration.NM_NODE_ATTRIBUTES_PROVIDER_CONFIG,
+ null, NodeAttributesProvider.class);
+ attributesProvider = labelsProviderClass.newInstance();
+ } catch (InstantiationException | IllegalAccessException
+ | RuntimeException e) {
+ LOG.error("Failed to create NodeAttributesProvider"
+ + " based on Configuration", e);
+ throw new IOException(
+ "Failed to create NodeAttributesProvider : "
+ + e.getMessage(), e);
+ }
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Distributed Node Attributes is enabled"
+ + " with provider class as : "
+ + attributesProvider.getClass().toString());
+ }
+ return attributesProvider;
}
protected NodeLabelsProvider createNodeLabelsProvider(Configuration conf)
@@ -182,10 +217,10 @@ protected NodeLabelsProvider createNodeLabelsProvider(Configuration conf)
return provider;
}
switch (providerString.trim().toLowerCase()) {
- case YarnConfiguration.CONFIG_NODE_LABELS_PROVIDER:
+ case YarnConfiguration.CONFIG_NODE_DESCRIPTOR_PROVIDER:
provider = new ConfigurationNodeLabelsProvider();
break;
- case YarnConfiguration.SCRIPT_NODE_LABELS_PROVIDER:
+ case YarnConfiguration.SCRIPT_NODE_DESCRIPTOR_PROVIDER:
provider = new ScriptBasedNodeLabelsProvider();
break;
default:
@@ -407,16 +442,19 @@ protected void serviceInit(Configuration conf) throws Exception {
((NMContext)context).setContainerExecutor(exec);
((NMContext)context).setDeletionService(del);
- nodeLabelsProvider = createNodeLabelsProvider(conf);
+ nodeStatusUpdater =
+ createNodeStatusUpdater(context, dispatcher, nodeHealthChecker);
- if (null == nodeLabelsProvider) {
- nodeStatusUpdater =
- createNodeStatusUpdater(context, dispatcher, nodeHealthChecker);
- } else {
+ nodeLabelsProvider = createNodeLabelsProvider(conf);
+ if (nodeLabelsProvider != null) {
addIfService(nodeLabelsProvider);
- nodeStatusUpdater =
- createNodeStatusUpdater(context, dispatcher, nodeHealthChecker,
- nodeLabelsProvider);
+ nodeStatusUpdater.setNodeLabelsProvider(nodeLabelsProvider);
+ }
+
+ nodeAttributesProvider = createNodeAttributesProvider(conf);
+ if (nodeAttributesProvider != null) {
+ addIfService(nodeAttributesProvider);
+ nodeStatusUpdater.setNodeAttributesProvider(nodeAttributesProvider);
}
nodeResourceMonitor = createNodeResourceMonitor();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java
index 08892d20799..142cbbc9cbd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdater.java
@@ -20,6 +20,8 @@
import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeAttributesProvider;
+import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider;
public interface NodeStatusUpdater extends Service {
@@ -59,4 +61,16 @@
* @param ex exception that makes the node unhealthy
*/
void reportException(Exception ex);
+
+ /**
+ * Sets a node attributes provider to node manager.
+ * @param provider
+ */
+ void setNodeAttributesProvider(NodeAttributesProvider provider);
+
+ /**
+ * Sets a node labels provider to the node manager.
+ * @param provider
+ */
+ void setNodeLabelsProvider(NodeLabelsProvider provider);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index faf7adb0f16..df76ed715dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -34,11 +34,6 @@
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePlugin;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePluginManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputByteBuffer;
@@ -57,6 +52,7 @@
import org.apache.hadoop.yarn.api.records.NodeLabel;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -64,6 +60,7 @@
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
+import org.apache.hadoop.yarn.nodelabels.NodeLabelUtil;
import org.apache.hadoop.yarn.server.api.ResourceManagerConstants;
import org.apache.hadoop.yarn.server.api.ResourceTracker;
import org.apache.hadoop.yarn.server.api.ServerRMProxy;
@@ -76,23 +73,28 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
import org.apache.hadoop.yarn.server.api.records.ContainerQueuingLimit;
-import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus;
import org.apache.hadoop.yarn.server.api.records.NodeStatus;
+import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePlugin;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePluginManager;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeAttributesProvider;
import org.apache.hadoop.yarn.server.nodemanager.nodelabels.NodeLabelsProvider;
import org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher;
import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils;
-import org.apache.hadoop.yarn.util.resource.Resources;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
+import org.apache.hadoop.yarn.util.resource.Resources;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
@@ -152,21 +154,16 @@
Set pendingContainersToRemove = new HashSet();
private NMNodeLabelsHandler nodeLabelsHandler;
- private final NodeLabelsProvider nodeLabelsProvider;
+ private NMNodeAttributesHandler nodeAttributesHandler;
+ private NodeLabelsProvider nodeLabelsProvider;
+ private NodeAttributesProvider nodeAttributesProvider;
public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher,
NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics) {
- this(context, dispatcher, healthChecker, metrics, null);
- }
-
- public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher,
- NodeHealthCheckerService healthChecker, NodeManagerMetrics metrics,
- NodeLabelsProvider nodeLabelsProvider) {
super(NodeStatusUpdaterImpl.class.getName());
this.healthChecker = healthChecker;
this.context = context;
this.dispatcher = dispatcher;
- this.nodeLabelsProvider = nodeLabelsProvider;
this.metrics = metrics;
this.recentlyStoppedContainers = new LinkedHashMap();
this.pendingCompletedContainers =
@@ -175,6 +172,16 @@ public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher,
new ArrayList();
}
+ @Override
+ public void setNodeAttributesProvider(NodeAttributesProvider provider) {
+ this.nodeAttributesProvider = provider;
+ }
+
+ @Override
+ public void setNodeLabelsProvider(NodeLabelsProvider provider) {
+ this.nodeLabelsProvider = provider;
+ }
+
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.totalResource = NodeManagerHardwareUtils.getNodeResources(conf);
@@ -214,7 +221,11 @@ protected void serviceInit(Configuration conf) throws Exception {
YarnConfiguration.NM_RESOURCEMANAGER_MINIMUM_VERSION,
YarnConfiguration.DEFAULT_NM_RESOURCEMANAGER_MINIMUM_VERSION);
- nodeLabelsHandler = createNMNodeLabelsHandler(nodeLabelsProvider);
+ nodeLabelsHandler =
+ createNMNodeLabelsHandler(nodeLabelsProvider);
+ nodeAttributesHandler =
+ createNMNodeAttributesHandler(nodeAttributesProvider);
+
// Default duration to track stopped containers on nodemanager is 10Min.
// This should not be assigned very large value as it will remember all the
// containers stopped during that time.
@@ -856,6 +867,43 @@ private NMNodeLabelsHandler createNMNodeLabelsHandler(
}
}
+ /**
+ * Returns a handler based on the configured node attributes provider.
+ * returns null if no provider is configured.
+ * @param provider
+ * @return attributes handler
+ */
+ private NMNodeAttributesHandler createNMNodeAttributesHandler(
+ NodeAttributesProvider provider) {
+ return provider == null ? null :
+ new NMDistributedNodeAttributesHandler(nodeAttributesProvider);
+ }
+
+ private interface NMNodeAttributesHandler {
+
+ /**
+ * @return the node attributes of this node manager.
+ */
+ Set getNodeAttributesForHeartbeat();
+ }
+
+ private static class NMDistributedNodeAttributesHandler
+ implements NMNodeAttributesHandler {
+
+ private final NodeAttributesProvider attributesProvider;
+
+ protected NMDistributedNodeAttributesHandler(
+ NodeAttributesProvider provider) {
+ this.attributesProvider = provider;
+ }
+
+ @Override
+ public Set getNodeAttributesForHeartbeat() {
+ return attributesProvider.getDescriptors();
+ }
+ }
+
+
private static interface NMNodeLabelsHandler {
/**
* validates nodeLabels From Provider and returns it to the caller. Also
@@ -932,7 +980,7 @@ private NMDistributedNodeLabelsHandler(
@Override
public Set getNodeLabelsForRegistration() {
- Set nodeLabels = nodeLabelsProvider.getNodeLabels();
+ Set nodeLabels = nodeLabelsProvider.getDescriptors();
nodeLabels = (null == nodeLabels)
? CommonNodeLabelsManager.EMPTY_NODELABEL_SET : nodeLabels;
previousNodeLabels = nodeLabels;
@@ -967,7 +1015,7 @@ public String verifyRMRegistrationResponseForNodeLabels(
@Override
public Set getNodeLabelsForHeartbeat() {
Set nodeLabelsForHeartbeat =
- nodeLabelsProvider.getNodeLabels();
+ nodeLabelsProvider.getDescriptors();
// if the provider returns null then consider empty labels are set
nodeLabelsForHeartbeat = (nodeLabelsForHeartbeat == null)
? CommonNodeLabelsManager.EMPTY_NODELABEL_SET
@@ -1012,7 +1060,7 @@ private void validateNodeLabels(Set nodeLabelsForHeartbeat)
StringBuilder errorMsg = new StringBuilder("");
while (iterator.hasNext()) {
try {
- CommonNodeLabelsManager
+ NodeLabelUtil
.checkAndThrowLabelName(iterator.next().getName());
} catch (IOException e) {
errorMsg.append(e.getMessage());
@@ -1071,6 +1119,9 @@ public void run() {
NodeHeartbeatResponse response = null;
Set nodeLabelsForHeartbeat =
nodeLabelsHandler.getNodeLabelsForHeartbeat();
+ Set nodeAttributesForHeartbeat =
+ nodeAttributesHandler == null ? null :
+ nodeAttributesHandler.getNodeAttributesForHeartbeat();
NodeStatus nodeStatus = getNodeStatus(lastHeartbeatID);
NodeHeartbeatRequest request =
NodeHeartbeatRequest.newInstance(nodeStatus,
@@ -1079,6 +1130,7 @@ public void run() {
NodeStatusUpdaterImpl.this.context
.getNMTokenSecretManager().getCurrentKey(),
nodeLabelsForHeartbeat,
+ nodeAttributesForHeartbeat,
NodeStatusUpdaterImpl.this.context
.getRegisteringCollectors());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeLabelsProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeDescriptorsProvider.java
similarity index 50%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeLabelsProvider.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeDescriptorsProvider.java
index c810654a772..088c9cb2640 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeLabelsProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeDescriptorsProvider.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.yarn.server.nodemanager.nodelabels;
+import java.io.File;
+import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import java.util.Timer;
@@ -24,48 +26,52 @@
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.Collections;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.records.NodeLabel;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
/**
- * Provides base implementation of NodeLabelsProvider with Timer and expects
- * subclass to provide TimerTask which can fetch NodeLabels
+ * Provides base implementation of NodeDescriptorsProvider with Timer and
+ * expects subclass to provide TimerTask which can fetch node descriptors.
*/
-public abstract class AbstractNodeLabelsProvider extends AbstractService
- implements NodeLabelsProvider {
- public static final long DISABLE_NODE_LABELS_PROVIDER_FETCH_TIMER = -1;
+public abstract class AbstractNodeDescriptorsProvider
+ extends AbstractService implements NodeDescriptorsProvider {
+ public static final long DISABLE_NODE_DESCRIPTORS_PROVIDER_FETCH_TIMER = -1;
- // Delay after which timer task are triggered to fetch NodeLabels
- protected long intervalTime;
+ // Delay after which timer task are triggered to fetch node descriptors.
+ // Default interval is -1 means it is an one time task, each implementation
+ // will override this value from configuration.
+ private long intervalTime = -1;
- // Timer used to schedule node labels fetching
- protected Timer nodeLabelsScheduler;
-
- public static final String NODE_LABELS_SEPRATOR = ",";
+ // Timer used to schedule node descriptors fetching
+ private Timer scheduler;
protected Lock readLock = null;
protected Lock writeLock = null;
protected TimerTask timerTask;
- protected Set nodeLabels =
- CommonNodeLabelsManager.EMPTY_NODELABEL_SET;
-
+ private Set nodeDescriptors = Collections
+ .unmodifiableSet(new HashSet<>(0));
- public AbstractNodeLabelsProvider(String name) {
+ public AbstractNodeDescriptorsProvider(String name) {
super(name);
}
+ public long getIntervalTime() {
+ return intervalTime;
+ }
+
+ public void setIntervalTime(long intervalMS) {
+ this.intervalTime = intervalMS;
+ }
+
@Override
protected void serviceInit(Configuration conf) throws Exception {
- this.intervalTime =
- conf.getLong(YarnConfiguration.NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS,
- YarnConfiguration.DEFAULT_NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS);
-
ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
readLock = readWriteLock.readLock();
writeLock = readWriteLock.writeLock();
@@ -76,13 +82,13 @@ protected void serviceInit(Configuration conf) throws Exception {
protected void serviceStart() throws Exception {
timerTask = createTimerTask();
timerTask.run();
- if (intervalTime != DISABLE_NODE_LABELS_PROVIDER_FETCH_TIMER) {
- nodeLabelsScheduler =
- new Timer("DistributedNodeLabelsRunner-Timer", true);
+ long taskInterval = getIntervalTime();
+ if (taskInterval != DISABLE_NODE_DESCRIPTORS_PROVIDER_FETCH_TIMER) {
+ scheduler =
+ new Timer("DistributedNodeDescriptorsRunner-Timer", true);
// Start the timer task and then periodically at the configured interval
// time. Illegal values for intervalTime is handled by timer api
- nodeLabelsScheduler.scheduleAtFixedRate(timerTask, intervalTime,
- intervalTime);
+ scheduler.schedule(timerTask, taskInterval, taskInterval);
}
super.serviceStart();
}
@@ -93,8 +99,8 @@ protected void serviceStart() throws Exception {
*/
@Override
protected void serviceStop() throws Exception {
- if (nodeLabelsScheduler != null) {
- nodeLabelsScheduler.cancel();
+ if (scheduler != null) {
+ scheduler.cancel();
}
cleanUp();
super.serviceStop();
@@ -109,24 +115,56 @@ protected void serviceStop() throws Exception {
* @return Returns output from provider.
*/
@Override
- public Set getNodeLabels() {
+ public Set getDescriptors() {
readLock.lock();
try {
- return nodeLabels;
+ return this.nodeDescriptors;
} finally {
readLock.unlock();
}
}
- protected void setNodeLabels(Set nodeLabelsSet) {
+ @Override
+ public void setDescriptors(Set descriptorsSet) {
writeLock.lock();
try {
- nodeLabels = nodeLabelsSet;
+ this.nodeDescriptors = descriptorsSet;
} finally {
writeLock.unlock();
}
}
+ /**
+ * Method used to determine if or not node descriptors fetching script is
+ * configured and whether it is fit to run. Returns true if following
+ * conditions are met:
+ *
+ *
+ * Path to the script is not empty
+ * The script file exists
+ *
+ *
+ * @throws IOException
+ */
+ protected void verifyConfiguredScript(String scriptPath)
+ throws IOException {
+ boolean invalidConfiguration;
+ if (scriptPath == null
+ || scriptPath.trim().isEmpty()) {
+ invalidConfiguration = true;
+ } else {
+ File f = new File(scriptPath);
+ invalidConfiguration = !f.exists() || !FileUtil.canExecute(f);
+ }
+ if (invalidConfiguration) {
+ throw new IOException(
+ "Node descriptors provider script \"" + scriptPath
+ + "\" is not configured properly. Please check whether"
+ + " the script path exists, owner and the access rights"
+ + " are suitable for NM process to execute it");
+ }
+ }
+
static Set convertToNodeLabelSet(String partitionNodeLabel) {
if (null == partitionNodeLabel) {
return null;
@@ -145,5 +183,15 @@ TimerTask getTimerTask() {
return timerTask;
}
+ @VisibleForTesting
+ public Timer getScheduler() {
+ return this.scheduler;
+ }
+
+ /**
+ * Creates a timer task which be scheduled periodically by the provider,
+ * and the task is responsible to update node descriptors to the provider.
+ * @return a timer task.
+ */
public abstract TimerTask createTimerTask();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeAttributesProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeAttributesProvider.java
new file mode 100644
index 00000000000..ab8a8b1cd0e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeAttributesProvider.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.nodelabels;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Strings;
+import com.google.common.collect.ImmutableSet;
+import org.apache.commons.lang3.EnumUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+import org.apache.hadoop.yarn.api.records.NodeAttributeType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.nodelabels.NodeLabelUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.TimerTask;
+import java.util.Set;
+
+/**
+ * Configuration based node attributes provider.
+ */
+public class ConfigurationNodeAttributesProvider
+ extends NodeAttributesProvider {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ConfigurationNodeAttributesProvider.class);
+
+ private static final String NODE_ATTRIBUTES_DELIMITER = ":";
+ private static final String NODE_ATTRIBUTE_DELIMITER = ",";
+
+ public ConfigurationNodeAttributesProvider() {
+ super("Configuration Based Node Attributes Provider");
+ }
+
+ @Override
+ protected void serviceInit(Configuration conf) throws Exception {
+ long taskInterval = conf.getLong(YarnConfiguration
+ .NM_NODE_ATTRIBUTES_PROVIDER_FETCH_INTERVAL_MS,
+ YarnConfiguration
+ .DEFAULT_NM_NODE_ATTRIBUTES_PROVIDER_FETCH_INTERVAL_MS);
+ this.setIntervalTime(taskInterval);
+ super.serviceInit(conf);
+ }
+
+ private void updateNodeAttributesFromConfig(Configuration conf)
+ throws IOException {
+ String configuredNodeAttributes = conf.get(
+ YarnConfiguration.NM_PROVIDER_CONFIGURED_NODE_ATTRIBUTES, null);
+ setDescriptors(parseAttributes(configuredNodeAttributes));
+ }
+
+ @VisibleForTesting
+ public Set parseAttributes(String config)
+ throws IOException {
+ if (Strings.isNullOrEmpty(config)) {
+ return ImmutableSet.of();
+ }
+ Set attributeSet = new HashSet<>();
+ // Configuration value should be in one line, format:
+ // "ATTRIBUTE_NAME,ATTRIBUTE_TYPE,ATTRIBUTE_VALUE",
+ // multiple node-attributes are delimited by ":".
+ // Each attribute str should not container any space.
+ String[] attributeStrs = config.split(NODE_ATTRIBUTES_DELIMITER);
+ for (String attributeStr : attributeStrs) {
+ String[] fields = attributeStr.split(NODE_ATTRIBUTE_DELIMITER);
+ if (fields.length != 3) {
+ throw new IOException("Invalid value for "
+ + YarnConfiguration.NM_PROVIDER_CONFIGURED_NODE_ATTRIBUTES
+ + "=" + config);
+ }
+
+ // We don't allow user config to overwrite our dist prefix,
+ // so disallow any prefix set in the configuration.
+ if (fields[0].contains("/")) {
+ throw new IOException("Node attribute set in "
+ + YarnConfiguration.NM_PROVIDER_CONFIGURED_NODE_ATTRIBUTES
+ + " should not contain any prefix.");
+ }
+
+ // Make sure attribute type is valid.
+ if (!EnumUtils.isValidEnum(NodeAttributeType.class, fields[1])) {
+ throw new IOException("Invalid node attribute type: "
+ + fields[1] + ", valid values are "
+ + Arrays.asList(NodeAttributeType.values()));
+ }
+
+ // Automatically setup prefix for collected attributes
+ NodeAttribute na = NodeAttribute.newInstance(
+ NodeAttribute.PREFIX_DISTRIBUTED,
+ fields[0],
+ NodeAttributeType.valueOf(fields[1]),
+ fields[2]);
+
+ // Since a NodeAttribute is identical with another one as long as
+ // their prefix and name are same, to avoid attributes getting
+ // overwritten by ambiguous attribute, make sure it fails in such
+ // case.
+ if (!attributeSet.add(na)) {
+ throw new IOException("Ambiguous node attribute is found: "
+ + na.toString() + ", a same attribute already exists");
+ }
+ }
+
+ // Before updating the attributes to the provider,
+ // verify if they are valid
+ try {
+ NodeLabelUtil.validateNodeAttributes(attributeSet);
+ } catch (IOException e) {
+ throw new IOException("Node attributes set by configuration property: "
+ + YarnConfiguration.NM_PROVIDER_CONFIGURED_NODE_ATTRIBUTES
+ + " is not valid. Detail message: " + e.getMessage());
+ }
+ return attributeSet;
+ }
+
+ private class ConfigurationMonitorTimerTask extends TimerTask {
+ @Override
+ public void run() {
+ try {
+ updateNodeAttributesFromConfig(new YarnConfiguration());
+ } catch (Exception e) {
+ LOG.error("Failed to update node attributes from "
+ + YarnConfiguration.NM_PROVIDER_CONFIGURED_NODE_ATTRIBUTES, e);
+ }
+ }
+ }
+
+ @Override
+ protected void cleanUp() throws Exception {
+ // Nothing to cleanup
+ }
+
+ @Override
+ public TimerTask createTimerTask() {
+ return new ConfigurationMonitorTimerTask();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java
index 7490cc2efda..1c6af8ebabe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java
@@ -29,7 +29,7 @@
/**
* Provides Node's Labels by constantly monitoring the configuration.
*/
-public class ConfigurationNodeLabelsProvider extends AbstractNodeLabelsProvider {
+public class ConfigurationNodeLabelsProvider extends NodeLabelsProvider {
private static final Logger LOG =
LoggerFactory.getLogger(ConfigurationNodeLabelsProvider.class);
@@ -38,11 +38,20 @@ public ConfigurationNodeLabelsProvider() {
super("Configuration Based NodeLabels Provider");
}
+ @Override
+ protected void serviceInit(Configuration conf) throws Exception {
+ long taskInterval = conf.getLong(
+ YarnConfiguration.NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS,
+ YarnConfiguration.DEFAULT_NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS);
+ this.setIntervalTime(taskInterval);
+ super.serviceInit(conf);
+ }
+
private void updateNodeLabelsFromConfig(Configuration conf)
throws IOException {
String configuredNodePartition =
conf.get(YarnConfiguration.NM_PROVIDER_CONFIGURED_NODE_PARTITION, null);
- setNodeLabels(convertToNodeLabelSet(configuredNodePartition));
+ setDescriptors(convertToNodeLabelSet(configuredNodePartition));
}
private class ConfigurationMonitorTimerTask extends TimerTask {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeAttributesProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeAttributesProvider.java
new file mode 100644
index 00000000000..2a418009406
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeAttributesProvider.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.nodelabels;
+
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
+
+/**
+ * Abstract class which will be responsible for fetching the node attributes.
+ *
+ */
+public abstract class NodeAttributesProvider
+ extends AbstractNodeDescriptorsProvider {
+
+ public NodeAttributesProvider(String name) {
+ super(name);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeDescriptorsProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeDescriptorsProvider.java
new file mode 100644
index 00000000000..51608b54b57
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeDescriptorsProvider.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.nodelabels;
+
+import java.util.Set;
+
+/**
+ * Interface which will be responsible for fetching node descriptors,
+ * a node descriptor could be a
+ * {@link org.apache.hadoop.yarn.api.records.NodeLabel} or a
+ * {@link org.apache.hadoop.yarn.api.records.NodeAttribute}.
+ */
+public interface NodeDescriptorsProvider {
+
+ /**
+ * Provides the descriptors. The provider is expected to give same
+ * descriptors continuously until there is a change.
+ * If null is returned then an empty set is assumed by the caller.
+ *
+ * @return Set of node descriptors applicable for a node
+ */
+ Set getDescriptors();
+
+ /**
+ * Sets a set of descriptors to the provider.
+ * @param descriptors node descriptors.
+ */
+ void setDescriptors(Set descriptors);
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeDescriptorsScriptRunner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeDescriptorsScriptRunner.java
new file mode 100644
index 00000000000..6365f3bf9c6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/NodeDescriptorsScriptRunner.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.nodelabels;
+
+import org.apache.hadoop.util.Shell;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Set;
+import java.util.TimerTask;
+
+/**
+ * A node descriptors script runner periodically runs a script,
+ * parses the output to collect desired descriptors, and then
+ * post these descriptors to the given {@link NodeDescriptorsProvider}.
+ * @param