diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java index 62aa4972929..94f741a03e6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java @@ -66,6 +66,8 @@ import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; import org.apache.hadoop.yarn.api.records.SignalContainerCommand; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; @@ -517,4 +519,22 @@ public void killApplication(ApplicationId appId, String diagnostics) throws YarnException, IOException { client.killApplication(appId, diagnostics); } + + @Override + public Map getResourceProfiles() + throws YarnException, IOException { + return client.getResourceProfiles(); + } + + @Override + public Resource getResourceProfile(String profile) + throws YarnException, IOException { + return client.getResourceProfile(profile); + } + + @Override + public List getResourceTypeInfo() + throws YarnException, IOException { + return client.getResourceTypeInfo(); + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java index 65eac654845..a9b4626581b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java @@ -72,6 +72,10 @@ import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest; import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; @@ -104,6 +108,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; @@ -494,6 +500,26 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( throws YarnException, IOException { return null; } + + @Override + public GetAllResourceProfilesResponse getResourceProfiles( + GetAllResourceProfilesRequest request) + throws YarnException, IOException { + return null; + } + + @Override + public GetResourceProfileResponse getResourceProfile( + GetResourceProfileRequest request) throws YarnException, IOException { + return null; + } + + @Override + public GetAllResourceTypeInfoResponse getResourceTypeInfo( + GetAllResourceTypeInfoRequest request) + throws YarnException, IOException { + return null; + } } class HistoryService extends AMService implements HSClientProtocol { diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 6825a36ebdd..a5b40210ff4 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -154,6 +154,10 @@ + + + + @@ -599,4 +603,22 @@ + + + + + + + + + + + + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java index b7d925a6592..8e76a11dc27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java @@ -52,11 +52,13 @@ void init(ApplicationMasterServiceContext amsContext, * @param request Register Request. * @param response Register Response. * @throws IOException IOException. + * @throws YarnException in critical situation where invalid + * profiles/resources are added. */ - void registerApplicationMaster( - ApplicationAttemptId applicationAttemptId, + void registerApplicationMaster(ApplicationAttemptId applicationAttemptId, RegisterApplicationMasterRequest request, - RegisterApplicationMasterResponse response) throws IOException; + RegisterApplicationMasterResponse response) + throws IOException, YarnException; /** * Allocate call. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java index 394454f20bc..8456a8ec59f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java @@ -65,6 +65,12 @@ import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; @@ -75,6 +81,7 @@ import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException; /** *

The protocol between clients and the ResourceManager @@ -589,4 +596,50 @@ SignalContainerResponse signalToContainer( public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( UpdateApplicationTimeoutsRequest request) throws YarnException, IOException; + + /** + *

+ * The interface used by clients to get all the resource profiles that are + * available on the ResourceManager. + *

+ * @param request request to get all the resource profiles + * @return Response containing a map of the profile name to Resource + * capabilities + * @throws YARNFeatureNotEnabledException if resource-profile is disabled + * @throws YarnException if any error happens inside YARN + * @throws IOException in case of other errors + */ + @Public + @Unstable + GetAllResourceProfilesResponse getResourceProfiles( + GetAllResourceProfilesRequest request) throws YarnException, IOException; + + /** + *

+ * The interface to get the details for a specific resource profile. + *

+ * @param request request to get the details of a resource profile + * @return Response containing the details for a particular resource profile + * @throws YARNFeatureNotEnabledException if resource-profile is disabled + * @throws YarnException if any error happens inside YARN + * @throws IOException in case of other errors + */ + @Public + @Unstable + GetResourceProfileResponse getResourceProfile( + GetResourceProfileRequest request) throws YarnException, IOException; + + /** + *

+ * The interface to get the details for a specific resource profile. + *

+ * @param request request to get the details of a resource profile + * @return Response containing the details for a particular resource profile + * @throws YarnException if any error happens inside YARN + * @throws IOException in case of other errors + */ + @Public + @Unstable + GetAllResourceTypeInfoResponse getResourceTypeInfo( + GetAllResourceTypeInfoRequest request) throws YarnException, IOException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceProfilesRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceProfilesRequest.java new file mode 100644 index 00000000000..0bb9bf805d9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceProfilesRequest.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.util.Records; + +/** + * Request class for getting all the resource profiles from the RM. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public abstract class GetAllResourceProfilesRequest { + + public static GetAllResourceProfilesRequest newInstance() { + return Records.newRecord(GetAllResourceProfilesRequest.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceProfilesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceProfilesResponse.java new file mode 100644 index 00000000000..547770890d5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceProfilesResponse.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.util.Records; + +import java.util.Map; + +/** + * Response class for getting all the resource profiles from the RM. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public abstract class GetAllResourceProfilesResponse { + + public static GetAllResourceProfilesResponse newInstance() { + return Records.newRecord(GetAllResourceProfilesResponse.class); + } + + public abstract void setResourceProfiles(Map profiles); + + public abstract Map getResourceProfiles(); + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || !(other instanceof GetAllResourceProfilesResponse)) { + return false; + } + return ((GetAllResourceProfilesResponse) other).getResourceProfiles() + .equals(this.getResourceProfiles()); + } + + @Override + public int hashCode() { + return this.getResourceProfiles().hashCode(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoRequest.java new file mode 100644 index 00000000000..3bda4f54ec5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoRequest.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.util.Records; + +/** + * Request class for getting all the resource profiles from the RM. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public abstract class GetAllResourceTypeInfoRequest { + + public static GetAllResourceTypeInfoRequest newInstance() { + return Records.newRecord(GetAllResourceTypeInfoRequest.class); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoResponse.java new file mode 100644 index 00000000000..b57b96df3fd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoResponse.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; +import org.apache.hadoop.yarn.util.Records; + +import java.util.List; + +/** + * Response class for getting all the resource profiles from the RM. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public abstract class GetAllResourceTypeInfoResponse { + + public static GetAllResourceTypeInfoResponse newInstance() { + return Records.newRecord(GetAllResourceTypeInfoResponse.class); + } + + public abstract void setResourceTypeInfo(List resourceTypes); + + public abstract List getResourceTypeInfo(); + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || !(other instanceof GetAllResourceTypeInfoResponse)) { + return false; + } + return ((GetAllResourceTypeInfoResponse) other).getResourceTypeInfo() + .equals(this.getResourceTypeInfo()); + } + + @Override + public int hashCode() { + return this.getResourceTypeInfo().hashCode(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileRequest.java new file mode 100644 index 00000000000..3655be946da --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileRequest.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.util.Records; + +/** + * Request class for getting the details for a particular resource profile. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public abstract class GetResourceProfileRequest { + + public static GetResourceProfileRequest newInstance(String profile) { + GetResourceProfileRequest request = + Records.newRecord(GetResourceProfileRequest.class); + request.setProfileName(profile); + return request; + } + + public abstract void setProfileName(String profileName); + + public abstract String getProfileName(); + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || !(other instanceof GetResourceProfileRequest)) { + return false; + } + return this.getProfileName() + .equals(((GetResourceProfileRequest) other).getProfileName()); + } + + @Override + public int hashCode() { + return getProfileName().hashCode(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileResponse.java new file mode 100644 index 00000000000..a010644a792 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetResourceProfileResponse.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.util.Records; + +/** + * Response class for getting the details for a particular resource profile. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public abstract class GetResourceProfileResponse { + + public static GetResourceProfileResponse newInstance() { + return Records.newRecord(GetResourceProfileResponse.class); + } + + /** + * Get the resources that will be allocated if the profile was used. + * + * @return the resources that will be allocated if the profile was used. + */ + public abstract Resource getResource(); + + /** + * Set the resources that will be allocated if the profile is used. + * + * @param r Set the resources that will be allocated if the profile is used. + */ + public abstract void setResource(Resource r); + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || !(other instanceof GetResourceProfileResponse)) { + return false; + } + return this.getResource() + .equals(((GetResourceProfileResponse) other).getResource()); + } + + @Override + public int hashCode() { + return getResource().hashCode(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java index 0b886dd5c90..8fa8563e2d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java @@ -204,4 +204,12 @@ public abstract void setContainersFromPreviousAttempts( @Unstable public abstract void setSchedulerResourceTypes( EnumSet types); + + @Public + @Unstable + public abstract Map getResourceProfiles(); + + @Private + @Unstable + public abstract void setResourceProfiles(Map profiles); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ResourceTypes.java new file mode 100644 index 00000000000..dbd9c37ceec --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ResourceTypes.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords; + +/** + * Enum which represents the resource type. Currently, the only type allowed is + * COUNTABLE. + */ +public enum ResourceTypes { + COUNTABLE +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java index 3cf8f3defa3..d2e33ff9bca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; +import java.util.Map; + /** * Contains various scheduling metrics to be reported by UI and CLI. */ @@ -35,9 +37,9 @@ @Unstable public static ApplicationResourceUsageReport newInstance( int numUsedContainers, int numReservedContainers, Resource usedResources, - Resource reservedResources, Resource neededResources, long memorySeconds, - long vcoreSeconds, float queueUsagePerc, float clusterUsagePerc, - long preemptedMemorySeconds, long preemptedVcoresSeconds) { + Resource reservedResources, Resource neededResources, + Map resourceSecondsMap, float queueUsagePerc, + float clusterUsagePerc, Map preemtedResourceSecondsMap) { ApplicationResourceUsageReport report = Records.newRecord(ApplicationResourceUsageReport.class); report.setNumUsedContainers(numUsedContainers); @@ -45,12 +47,10 @@ public static ApplicationResourceUsageReport newInstance( report.setUsedResources(usedResources); report.setReservedResources(reservedResources); report.setNeededResources(neededResources); - report.setMemorySeconds(memorySeconds); - report.setVcoreSeconds(vcoreSeconds); + report.setResourceSecondsMap(resourceSecondsMap); report.setQueueUsagePercentage(queueUsagePerc); report.setClusterUsagePercentage(clusterUsagePerc); - report.setPreemptedMemorySeconds(preemptedMemorySeconds); - report.setPreemptedVcoreSeconds(preemptedVcoresSeconds); + report.setPreemptedResourceSecondsMap(preemtedResourceSecondsMap); return report; } @@ -229,4 +229,47 @@ public static ApplicationResourceUsageReport newInstance( @Public @Unstable public abstract long getPreemptedVcoreSeconds(); + + /** + * Get the aggregated number of resources that the application has + * allocated times the number of seconds the application has been running. + * @return map containing the resource name and aggregated resource-seconds + */ + @Public + @Unstable + public abstract Map getResourceSecondsMap(); + + /** + * Set the aggregated number of resources that the application has + * allocated times the number of seconds the application has been running. + * @param resourceSecondsMap map containing the resource name and aggregated + * resource-seconds + */ + @Private + @Unstable + public abstract void setResourceSecondsMap( + Map resourceSecondsMap); + + + /** + * Get the aggregated number of resources preempted that the application has + * allocated times the number of seconds the application has been running. + * @return map containing the resource name and aggregated preempted + * resource-seconds + */ + @Public + @Unstable + public abstract Map getPreemptedResourceSecondsMap(); + + /** + * Set the aggregated number of resources preempted that the application has + * allocated times the number of seconds the application has been running. + * @param preemptedResourceSecondsMap map containing the resource name and + * aggregated preempted resource-seconds + */ + @Private + @Unstable + public abstract void setPreemptedResourceSecondsMap( + Map preemptedResourceSecondsMap); + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java new file mode 100644 index 00000000000..2cb46704716 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java @@ -0,0 +1,174 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.util.Records; + +import java.util.Map; + +/** + * Class to capture capability requirements when using resource profiles. The + * ProfileCapability is meant to be used as part of the ResourceRequest. A + * profile capability has two pieces - the resource profile name and the + * overrides. The resource profile specifies the name of the resource profile + * to be used and the capability override is the overrides desired on specific + * resource types. + * + * For example, if you have a resource profile "small" that maps to + * {@literal <4096M, 2 cores, 1 gpu>} and you set the capability override to + * {@literal <8192M, 0 cores, 0 gpu>}, then the actual resource allocation on + * the ResourceManager will be {@literal <8192M, 2 cores, 1 gpu>}. + * + * Note that the conversion from the ProfileCapability to the Resource class + * with the actual resource requirements will be done by the ResourceManager, + * which has the actual profile to Resource mapping. + * + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public abstract class ProfileCapability { + + public static final String DEFAULT_PROFILE = "default"; + + public static ProfileCapability newInstance(Resource override) { + return newInstance(DEFAULT_PROFILE, override); + } + + public static ProfileCapability newInstance(String profile) { + Preconditions + .checkArgument(profile != null, "The profile name cannot be null"); + ProfileCapability obj = Records.newRecord(ProfileCapability.class); + obj.setProfileName(profile); + obj.setProfileCapabilityOverride(Resource.newInstance(0, 0)); + return obj; + } + + public static ProfileCapability newInstance(String profile, + Resource override) { + Preconditions + .checkArgument(profile != null, "The profile name cannot be null"); + ProfileCapability obj = Records.newRecord(ProfileCapability.class); + obj.setProfileName(profile); + obj.setProfileCapabilityOverride(override); + return obj; + } + + /** + * Get the profile name. + * @return the profile name + */ + public abstract String getProfileName(); + + /** + * Get the profile capability override. + * @return Resource object containing the override. + */ + public abstract Resource getProfileCapabilityOverride(); + + /** + * Set the resource profile name. + * @param profileName the resource profile name + */ + public abstract void setProfileName(String profileName); + + /** + * Set the capability override to override specific resource types on the + * resource profile. + * + * For example, if you have a resource profile "small" that maps to + * {@literal <4096M, 2 cores, 1 gpu>} and you set the capability override to + * {@literal <8192M, 0 cores, 0 gpu>}, then the actual resource allocation on + * the ResourceManager will be {@literal <8192M, 2 cores, 1 gpu>}. + * + * Note that the conversion from the ProfileCapability to the Resource class + * with the actual resource requirements will be done by the ResourceManager, + * which has the actual profile to Resource mapping. + * + * @param r Resource object containing the capability override + */ + public abstract void setProfileCapabilityOverride(Resource r); + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || !(other instanceof ProfileCapability)) { + return false; + } + return ((ProfileCapability) other).getProfileName() + .equals(this.getProfileName()) && ((ProfileCapability) other) + .getProfileCapabilityOverride() + .equals(this.getProfileCapabilityOverride()); + } + + @Override + public int hashCode() { + final int prime = 2153; + int result = 2459; + String name = getProfileName(); + Resource override = getProfileCapabilityOverride(); + result = prime * result + ((name == null) ? 0 : name.hashCode()); + result = prime * result + ((override == null) ? 0 : override.hashCode()); + return result; + } + + @Override + public String toString() { + return "{ profile: " + this.getProfileName() + ", capabilityOverride: " + + this.getProfileCapabilityOverride() + " }"; + } + + /** + * Get a representation of the capability as a Resource object. + * @param capability the capability we wish to convert + * @param resourceProfilesMap map of profile name to Resource object + * @return Resource object representing the capability + */ + public static Resource toResource(ProfileCapability capability, + Map resourceProfilesMap) { + Preconditions + .checkArgument(capability != null, "Capability cannot be null"); + Preconditions.checkArgument(resourceProfilesMap != null, + "Resource profiles map cannot be null"); + Resource none = Resource.newInstance(0, 0); + Resource resource = Resource.newInstance(0, 0); + String profileName = capability.getProfileName(); + if (profileName.isEmpty()) { + profileName = DEFAULT_PROFILE; + } + if (resourceProfilesMap.containsKey(profileName)) { + resource = Resource.newInstance(resourceProfilesMap.get(profileName)); + } + + if (capability.getProfileCapabilityOverride() != null && + !capability.getProfileCapabilityOverride().equals(none)) { + for (ResourceInformation entry : capability + .getProfileCapabilityOverride().getResources()) { + if (entry != null && entry.getValue() >= 0) { + resource.setResourceInformation(entry.getName(), entry); + } + } + } + return resource; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java index 0fd41a2f20b..d4b458d0417 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java @@ -18,12 +18,19 @@ package org.apache.hadoop.yarn.api.records; +import java.util.Arrays; + import org.apache.commons.lang.NotImplementedException; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; - +import org.apache.hadoop.yarn.api.records.impl.BaseResource; +import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException; +import org.apache.hadoop.yarn.util.Records; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; /** *

Resource models a set of computer resources in the @@ -38,10 +45,10 @@ * the average number of threads it expects to have runnable at a time.

* *

Virtual cores take integer values and thus currently CPU-scheduling is - * very coarse. A complementary axis for CPU requests that represents processing - * power will likely be added in the future to enable finer-grained resource - * configuration.

- * + * very coarse. A complementary axis for CPU requests that represents + * processing power will likely be added in the future to enable finer-grained + * resource configuration.

+ * *

Typically, applications request Resource of suitable * capability to run their component tasks.

* @@ -52,64 +59,86 @@ @Stable public abstract class Resource implements Comparable { - private static class SimpleResource extends Resource { - private long memory; - private long vcores; - SimpleResource(long memory, long vcores) { - this.memory = memory; - this.vcores = vcores; - } - @Override - public int getMemory() { - return (int)memory; - } - @Override - public void setMemory(int memory) { - this.memory = memory; - } - @Override - public long getMemorySize() { - return memory; - } - @Override - public void setMemorySize(long memory) { - this.memory = memory; - } - @Override - public int getVirtualCores() { - return (int)vcores; - } - @Override - public void setVirtualCores(int vcores) { - this.vcores = vcores; - } - } + protected ResourceInformation[] resources = null; + protected static final String MEMORY = ResourceInformation.MEMORY_MB.getName(); + protected static final String VCORES = ResourceInformation.VCORES.getName(); + + // Number of mandatory resources, this is added to avoid invoke + // MandatoryResources.values().length, since values() internally will + // copy array, etc. + protected static final int NUM_MANDATORY_RESOURCES = 2; + + protected static final int MEMORY_INDEX = 0; + protected static final int VCORES_INDEX = 1; @Public @Stable public static Resource newInstance(int memory, int vCores) { - return new SimpleResource(memory, vCores); + if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) { + Resource ret = Records.newRecord(Resource.class); + ret.setMemorySize(memory); + ret.setVirtualCores(vCores); + return ret; + } + return new BaseResource(memory, vCores); } @Public @Stable public static Resource newInstance(long memory, int vCores) { - return new SimpleResource(memory, vCores); + if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) { + Resource ret = Records.newRecord(Resource.class); + ret.setMemorySize(memory); + ret.setVirtualCores(vCores); + return ret; + } + return new BaseResource(memory, vCores); + } + + @InterfaceAudience.Private + @InterfaceStability.Unstable + public static Resource newInstance(Resource resource) { + Resource ret = Resource.newInstance(resource.getMemorySize(), + resource.getVirtualCores()); + if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) { + Resource.copy(resource, ret); + } + return ret; + } + + @InterfaceAudience.Private + @InterfaceStability.Unstable + public static void copy(Resource source, Resource dest) { + for (ResourceInformation entry : source.getResources()) { + dest.setResourceInformation(entry.getName(), entry); + } } /** * This method is DEPRECATED: * Use {@link Resource#getMemorySize()} instead * - * Get memory of the resource. - * @return memory of the resource + * Get memory of the resource. Note - while memory has + * never had a unit specified, all YARN configurations have specified memory + * in MB. The assumption has been that the daemons and applications are always + * using the same units. With the introduction of the ResourceInformation + * class we have support for units - so this function will continue to return + * memory but in the units of MB + * + * @return memory(in MB) of the resource */ @Public @Deprecated public abstract int getMemory(); /** - * Get memory of the resource. + * Get memory of the resource. Note - while memory has + * never had a unit specified, all YARN configurations have specified memory + * in MB. The assumption has been that the daemons and applications are always + * using the same units. With the introduction of the ResourceInformation + * class we have support for units - so this function will continue to return + * memory but in the units of MB + * * @return memory of the resource */ @Public @@ -120,8 +149,14 @@ public long getMemorySize() { } /** - * Set memory of the resource. - * @param memory memory of the resource + * Set memory of the resource. Note - while memory has + * never had a unit specified, all YARN configurations have specified memory + * in MB. The assumption has been that the daemons and applications are always + * using the same units. With the introduction of the ResourceInformation + * class we have support for units - so this function will continue to set + * memory but the assumption is that the value passed is in units of MB. + * + * @param memory memory(in MB) of the resource */ @Public @Deprecated @@ -138,72 +173,317 @@ public void setMemorySize(long memory) { "This method is implemented by ResourcePBImpl"); } - /** * Get number of virtual cpu cores of the resource. * * Virtual cores are a unit for expressing CPU parallelism. A node's capacity - * should be configured with virtual cores equal to its number of physical cores. - * A container should be requested with the number of cores it can saturate, i.e. - * the average number of threads it expects to have runnable at a time. - * + * should be configured with virtual cores equal to its number of physical + * cores. A container should be requested with the number of cores it can + * saturate, i.e. the average number of threads it expects to have runnable + * at a time. + * * @return num of virtual cpu cores of the resource */ @Public @Evolving public abstract int getVirtualCores(); - + /** * Set number of virtual cpu cores of the resource. * * Virtual cores are a unit for expressing CPU parallelism. A node's capacity - * should be configured with virtual cores equal to its number of physical cores. - * A container should be requested with the number of cores it can saturate, i.e. - * the average number of threads it expects to have runnable at a time. - * + * should be configured with virtual cores equal to its number of physical + * cores. A container should be requested with the number of cores it can + * saturate, i.e. the average number of threads it expects to have runnable + * at a time. + * * @param vCores number of virtual cpu cores of the resource */ @Public @Evolving public abstract void setVirtualCores(int vCores); - @Override - public int hashCode() { - final int prime = 263167; + /** + * Get ResourceInformation for all resources. + * + * @return Map of resource name to ResourceInformation + */ + @InterfaceAudience.Private + @Evolving + public ResourceInformation[] getResources() { + return resources; + } - int result = (int) (939769357 - + getMemorySize()); // prime * result = 939769357 initially - result = prime * result + getVirtualCores(); - return result; + /** + * Get ResourceInformation for a specified resource. + * + * @param resource name of the resource + * @return the ResourceInformation object for the resource + * @throws ResourceNotFoundException if the resource can't be found + */ + @Public + @InterfaceStability.Unstable + public ResourceInformation getResourceInformation(String resource) + throws ResourceNotFoundException { + Integer index = ResourceUtils.getResourceTypeIndex().get(resource); + ResourceInformation[] resources = getResources(); + if (index != null) { + return resources[index]; + } + throw new ResourceNotFoundException("Unknown resource '" + resource + + "'. Known resources are " + Arrays.toString(resources)); + } + + /** + * Get ResourceInformation for a specified resource from a given index. + * + * @param index + * of the resource + * @return the ResourceInformation object for the resource + * @throws ResourceNotFoundException + * if the resource can't be found + */ + @InterfaceAudience.Private + @InterfaceStability.Unstable + public ResourceInformation getResourceInformation(int index) + throws ResourceNotFoundException { + ResourceInformation ri = null; + try { + ResourceInformation[] resources = getResources(); + ri = resources[index]; + } catch (ArrayIndexOutOfBoundsException e) { + checkIndexAndThrowExceptionWhenArrayOutOfBound(index); + } + return ri; + } + + /** + * Get the value for a specified resource. No information about the units is + * returned. + * + * @param resource name of the resource + * @return the value for the resource + * @throws ResourceNotFoundException if the resource can't be found + */ + @Public + @InterfaceStability.Unstable + public long getResourceValue(String resource) + throws ResourceNotFoundException { + return getResourceInformation(resource).getValue(); + } + + /** + * Set the ResourceInformation object for a particular resource. + * + * @param resource the resource for which the ResourceInformation is provided + * @param resourceInformation ResourceInformation object + * @throws ResourceNotFoundException if the resource is not found + */ + @Public + @InterfaceStability.Unstable + public void setResourceInformation(String resource, + ResourceInformation resourceInformation) + throws ResourceNotFoundException { + if (resource.equals(MEMORY)) { + this.setMemorySize(resourceInformation.getValue()); + return; + } + if (resource.equals(VCORES)) { + this.setVirtualCores((int) resourceInformation.getValue()); + return; + } + ResourceInformation storedResourceInfo = getResourceInformation(resource); + ResourceInformation.copy(resourceInformation, storedResourceInfo); + } + + /** + * Set the ResourceInformation object for a particular resource. + * + * @param index + * the resource index for which the ResourceInformation is provided + * @param resourceInformation + * ResourceInformation object + * @throws ResourceNotFoundException + * if the resource is not found + */ + @InterfaceAudience.Private + @InterfaceStability.Unstable + public void setResourceInformation(int index, + ResourceInformation resourceInformation) + throws ResourceNotFoundException { + ResourceInformation[] resources = getResources(); + if (index < 0 || index >= resources.length) { + throw new ResourceNotFoundException("Unknown resource at index '" + index + + "'. Valid resources are " + Arrays.toString(resources)); + } + ResourceInformation.copy(resourceInformation, resources[index]); + } + + /** + * Set the value of a resource in the ResourceInformation object. The unit of + * the value is assumed to be the one in the ResourceInformation object. + * + * @param resource the resource for which the value is provided. + * @param value the value to set + * @throws ResourceNotFoundException if the resource is not found + */ + @Public + @InterfaceStability.Unstable + public void setResourceValue(String resource, long value) + throws ResourceNotFoundException { + if (resource.equals(MEMORY)) { + this.setMemorySize(value); + return; + } + if (resource.equals(VCORES)) { + this.setVirtualCores((int)value); + return; + } + + ResourceInformation storedResourceInfo = getResourceInformation(resource); + storedResourceInfo.setValue(value); + } + + /** + * Set the value of a resource in the ResourceInformation object. The unit of + * the value is assumed to be the one in the ResourceInformation object. + * + * @param index + * the resource index for which the value is provided. + * @param value + * the value to set + * @throws ResourceNotFoundException + * if the resource is not found + */ + @InterfaceAudience.Private + @InterfaceStability.Unstable + public void setResourceValue(int index, long value) + throws ResourceNotFoundException { + try { + ResourceInformation[] resources = getResources(); + resources[index].setValue(value); + } catch (ArrayIndexOutOfBoundsException e) { + checkIndexAndThrowExceptionWhenArrayOutOfBound(index); + } + } + + private void checkIndexAndThrowExceptionWhenArrayOutOfBound(int index) { + if (index < 0) { + throw new ResourceNotFoundException("Specified index must >= 0"); + } else { + throw new ResourceNotFoundException( + "Specified index beyond known #resource-types"); + } } @Override public boolean equals(Object obj) { - if (this == obj) + if (this == obj) { return true; - if (obj == null) + } + if (obj == null) { return false; - if (!(obj instanceof Resource)) + } + if (!(obj instanceof Resource)) { return false; + } Resource other = (Resource) obj; - if (getMemorySize() != other.getMemorySize() || - getVirtualCores() != other.getVirtualCores()) { + if (getMemorySize() != other.getMemorySize() + || getVirtualCores() != other.getVirtualCores()) { return false; } + + if (ResourceUtils.getNumberOfKnownResourceTypes() + <= NUM_MANDATORY_RESOURCES) { + return true; + } + + ResourceInformation[] myVectors = getResources(); + ResourceInformation[] otherVectors = other.getResources(); + + if (myVectors.length != otherVectors.length) { + return false; + } + + for (int i = 0; i < myVectors.length; i++) { + ResourceInformation a = myVectors[i]; + ResourceInformation b = otherVectors[i]; + if ((a != b) && ((a == null) || !a.equals(b))) { + return false; + } + } return true; } @Override public int compareTo(Resource other) { + ResourceInformation[] thisResources = this.getResources(); + ResourceInformation[] otherResources = other.getResources(); + + // compare memory and vcores first(in that order) to preserve + // existing behaviour long diff = this.getMemorySize() - other.getMemorySize(); if (diff == 0) { diff = this.getVirtualCores() - other.getVirtualCores(); } - return diff == 0 ? 0 : (diff > 0 ? 1 : -1); + if (diff == 0) { + diff = thisResources.length - otherResources.length; + if (diff == 0) { + int maxLength = ResourceUtils.getResourceTypesArray().length; + for (int i = 0; i < maxLength; i++) { + // For memory and vcores, we can skip the loop as it's already + // compared. + if (i < 2) { + continue; + } + + ResourceInformation entry = thisResources[i]; + ResourceInformation otherEntry = otherResources[i]; + if (entry.getName().equals(otherEntry.getName())) { + diff = entry.compareTo(otherEntry); + if (diff != 0) { + break; + } + } + } + } + } + return Long.compare(diff, 0); } @Override public String toString() { - return ""; + StringBuilder sb = new StringBuilder(); + sb.append(""); + return sb.toString(); + } + + @Override + public int hashCode() { + final int prime = 263167; + + int result = (int) (939769357 + + getMemorySize()); // prime * result = 939769357 initially + result = prime * result + getVirtualCores(); + for (ResourceInformation entry : getResources()) { + if (!entry.getName().equals(MEMORY) && !entry.getName().equals(VCORES)) { + result = prime * result + entry.hashCode(); + } + } + return result; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java new file mode 100644 index 00000000000..2a040948d58 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java @@ -0,0 +1,291 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records; + +import org.apache.curator.shaded.com.google.common.reflect.ClassPath; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.util.UnitsConversionUtil; + +/** + * Class to encapsulate information about a Resource - the name of the resource, + * the units(milli, micro, etc), the type(countable), and the value. + */ +public class ResourceInformation implements Comparable { + + private String name; + private String units; + private ResourceTypes resourceType; + private long value; + private long minimumAllocation; + private long maximumAllocation; + + public static final String MEMORY_URI = "memory-mb"; + public static final String VCORES_URI = "vcores"; + + public static final ResourceInformation MEMORY_MB = + ResourceInformation.newInstance(MEMORY_URI, "Mi"); + public static final ResourceInformation VCORES = + ResourceInformation.newInstance(VCORES_URI); + + /** + * Get the name for the resource. + * + * @return resource name + */ + public String getName() { + return name; + } + + /** + * Set the name for the resource. + * + * @param rName name for the resource + */ + public void setName(String rName) { + this.name = rName; + } + + /** + * Get units for the resource. + * + * @return units for the resource + */ + public String getUnits() { + return units; + } + + /** + * Set the units for the resource. + * + * @param rUnits units for the resource + */ + public void setUnits(String rUnits) { + if (!UnitsConversionUtil.KNOWN_UNITS.contains(rUnits)) { + throw new IllegalArgumentException( + "Unknown unit '" + rUnits + "'. Known units are " + + UnitsConversionUtil.KNOWN_UNITS); + } + this.units = rUnits; + } + + /** + * Checking if a unit included by KNOWN_UNITS is an expensive operation. This + * can be avoided in critical path in RM. + * @param rUnits units for the resource + */ + @InterfaceAudience.Private + public void setUnitsWithoutValidation(String rUnits) { + this.units = rUnits; + } + + /** + * Get the resource type. + * + * @return the resource type + */ + public ResourceTypes getResourceType() { + return resourceType; + } + + /** + * Set the resource type. + * + * @param type the resource type + */ + public void setResourceType(ResourceTypes type) { + this.resourceType = type; + } + + /** + * Get the value for the resource. + * + * @return the resource value + */ + public long getValue() { + return value; + } + + /** + * Set the value for the resource. + * + * @param rValue the resource value + */ + public void setValue(long rValue) { + this.value = rValue; + } + + /** + * Get the minimum allocation for the resource. + * + * @return the minimum allocation for the resource + */ + public long getMinimumAllocation() { + return minimumAllocation; + } + + /** + * Set the minimum allocation for the resource. + * + * @param minimumAllocation the minimum allocation for the resource + */ + public void setMinimumAllocation(long minimumAllocation) { + this.minimumAllocation = minimumAllocation; + } + + /** + * Get the maximum allocation for the resource. + * + * @return the maximum allocation for the resource + */ + public long getMaximumAllocation() { + return maximumAllocation; + } + + /** + * Set the maximum allocation for the resource. + * + * @param maximumAllocation the maximum allocation for the resource + */ + public void setMaximumAllocation(long maximumAllocation) { + this.maximumAllocation = maximumAllocation; + } + + /** + * Create a new instance of ResourceInformation from another object. + * + * @param other the object from which the new object should be created + * @return the new ResourceInformation object + */ + public static ResourceInformation newInstance(ResourceInformation other) { + ResourceInformation ret = new ResourceInformation(); + copy(other, ret); + return ret; + } + + public static ResourceInformation newInstance(String name, String units, + long value, ResourceTypes type, long minimumAllocation, + long maximumAllocation) { + ResourceInformation ret = new ResourceInformation(); + ret.setName(name); + ret.setResourceType(type); + ret.setUnits(units); + ret.setValue(value); + ret.setMinimumAllocation(minimumAllocation); + ret.setMaximumAllocation(maximumAllocation); + return ret; + } + + public static ResourceInformation newInstance(String name, String units, + long value) { + return ResourceInformation + .newInstance(name, units, value, ResourceTypes.COUNTABLE, 0L, + Long.MAX_VALUE); + } + + public static ResourceInformation newInstance(String name, String units) { + return ResourceInformation + .newInstance(name, units, 0L, ResourceTypes.COUNTABLE, 0L, + Long.MAX_VALUE); + } + + public static ResourceInformation newInstance(String name, long value) { + return ResourceInformation + .newInstance(name, "", value, ResourceTypes.COUNTABLE, 0L, + Long.MAX_VALUE); + } + + public static ResourceInformation newInstance(String name) { + return ResourceInformation.newInstance(name, ""); + } + + /** + * Copies the content of the source ResourceInformation object to the + * destination object, overwriting all properties of the destination object. + * @param src Source ResourceInformation object + * @param dst Destination ResourceInformation object + */ + + public static void copy(ResourceInformation src, ResourceInformation dst) { + dst.setName(src.getName()); + dst.setResourceType(src.getResourceType()); + dst.setUnits(src.getUnits()); + dst.setValue(src.getValue()); + dst.setMinimumAllocation(src.getMinimumAllocation()); + dst.setMaximumAllocation(src.getMaximumAllocation()); + } + + @Override + public String toString() { + return "name: " + this.name + ", units: " + this.units + ", type: " + + resourceType + ", value: " + value + ", minimum allocation: " + + minimumAllocation + ", maximum allocation: " + maximumAllocation; + } + + public String getShorthandRepresentation() { + return "" + this.value + this.units; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof ResourceInformation)) { + return false; + } + ResourceInformation r = (ResourceInformation) obj; + if (!this.name.equals(r.getName()) + || !this.resourceType.equals(r.getResourceType())) { + return false; + } + if (this.units.equals(r.units)) { + return this.value == r.value; + } + return (UnitsConversionUtil.compare(this.units, this.value, r.units, + r.value) == 0); + } + + @Override + public int hashCode() { + final int prime = 263167; + int result = + 939769357 + name.hashCode(); // prime * result = 939769357 initially + result = prime * result + resourceType.hashCode(); + result = prime * result + units.hashCode(); + result = prime * result + Long.hashCode(value); + return result; + } + + @Override + public int compareTo(ResourceInformation other) { + int diff = this.name.compareTo(other.name); + if (diff == 0) { + diff = UnitsConversionUtil + .compare(this.units, this.value, other.units, other.value); + if (diff == 0) { + diff = this.resourceType.compareTo(other.resourceType); + } + } + return diff; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java index 5bedc879ee3..21fa15f14aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java @@ -21,6 +21,7 @@ import java.io.Serializable; import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -98,7 +99,22 @@ public static ResourceRequest newInstance(Priority priority, String hostName, .resourceName(hostName).capability(capability) .numContainers(numContainers).relaxLocality(relaxLocality) .nodeLabelExpression(labelExpression) - .executionTypeRequest(executionTypeRequest).build(); + .executionTypeRequest(executionTypeRequest).profileCapability(null) + .build(); + } + + @Public + @Unstable + public static ResourceRequest newInstance(Priority priority, String hostName, + Resource capability, int numContainers, boolean relaxLocality, + String labelExpression, ExecutionTypeRequest executionTypeRequest, + ProfileCapability profile) { + return ResourceRequest.newBuilder().priority(priority) + .resourceName(hostName).capability(capability) + .numContainers(numContainers).relaxLocality(relaxLocality) + .nodeLabelExpression(labelExpression) + .executionTypeRequest(executionTypeRequest).profileCapability(profile) + .build(); } @Public @@ -124,6 +140,7 @@ private ResourceRequestBuilder() { resourceRequest.setRelaxLocality(true); resourceRequest.setExecutionTypeRequest( ExecutionTypeRequest.newInstance()); + resourceRequest.setProfileCapability(null); } /** @@ -238,6 +255,21 @@ public ResourceRequestBuilder allocationRequestId( } /** + * Set the resourceProfile of the request. + * @see ResourceRequest#setProfileCapability(ProfileCapability) + * @param profileCapability + * profileCapability of the request + * @return {@link ResourceRequestBuilder} + */ + @Public + @InterfaceStability.Unstable + public ResourceRequestBuilder profileCapability( + ProfileCapability profileCapability) { + resourceRequest.setProfileCapability(profileCapability); + return this; + } + + /** * Return generated {@link ResourceRequest} object. * @return {@link ResourceRequest} */ @@ -454,6 +486,14 @@ public ExecutionTypeRequest getExecutionTypeRequest() { @Evolving public abstract void setNodeLabelExpression(String nodelabelExpression); + @Public + @InterfaceStability.Unstable + public abstract ProfileCapability getProfileCapability(); + + @Public + @InterfaceStability.Unstable + public abstract void setProfileCapability(ProfileCapability p); + /** * Get the optional ID corresponding to this allocation request. This * ID is an identifier for different {@code ResourceRequest}s from the same @@ -529,12 +569,14 @@ public int hashCode() { Resource capability = getCapability(); String hostName = getResourceName(); Priority priority = getPriority(); + ProfileCapability profile = getProfileCapability(); result = prime * result + ((capability == null) ? 0 : capability.hashCode()); result = prime * result + ((hostName == null) ? 0 : hostName.hashCode()); result = prime * result + getNumContainers(); result = prime * result + ((priority == null) ? 0 : priority.hashCode()); result = prime * result + Long.valueOf(getAllocationRequestId()).hashCode(); + result = prime * result + ((profile == null) ? 0 : profile.hashCode()); return result; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java new file mode 100644 index 00000000000..b6f7f147658 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java @@ -0,0 +1,197 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.util.Records; + +/** + * Class to encapsulate information about a ResourceType - the name of the + * resource, the units(milli, micro, etc), the type(countable). + */ +public abstract class ResourceTypeInfo implements Comparable { + + /** + * Get the name for the resource. + * + * @return resource name + */ + public abstract String getName(); + + /** + * Set the name for the resource. + * + * @param rName + * name for the resource + */ + public abstract void setName(String rName); + + /** + * Get units for the resource. + * + * @return units for the resource + */ + public abstract String getDefaultUnit(); + + /** + * Set the units for the resource. + * + * @param rUnits + * units for the resource + */ + public abstract void setDefaultUnit(String rUnits); + + /** + * Get the resource type. + * + * @return the resource type + */ + public abstract ResourceTypes getResourceType(); + + /** + * Set the resource type. + * + * @param type + * the resource type + */ + public abstract void setResourceType(ResourceTypes type); + + /** + * Create a new instance of ResourceTypeInfo from another object. + * + * @param other + * the object from which the new object should be created + * @return the new ResourceTypeInfo object + */ + @InterfaceAudience.Public + @InterfaceStability.Unstable + public static ResourceTypeInfo newInstance(ResourceTypeInfo other) { + ResourceTypeInfo resourceType = Records.newRecord(ResourceTypeInfo.class); + copy(other, resourceType); + return resourceType; + } + + /** + * Create a new instance of ResourceTypeInfo from name, units and type. + * + * @param name name of resource type + * @param units units of resource type + * @param type such as countable, etc. + * @return the new ResourceTypeInfo object + */ + @InterfaceAudience.Public + @InterfaceStability.Unstable + public static ResourceTypeInfo newInstance(String name, String units, + ResourceTypes type) { + ResourceTypeInfo resourceType = Records.newRecord(ResourceTypeInfo.class); + resourceType.setName(name); + resourceType.setResourceType(type); + resourceType.setDefaultUnit(units); + return resourceType; + } + + /** + * Create a new instance of ResourceTypeInfo from name, units. + * + * @param name name of resource type + * @param units units of resource type + * @return the new ResourceTypeInfo object + */ + @InterfaceAudience.Public + @InterfaceStability.Unstable + public static ResourceTypeInfo newInstance(String name, String units) { + return ResourceTypeInfo.newInstance(name, units, ResourceTypes.COUNTABLE); + } + + /** + * Create a new instance of ResourceTypeInfo from name. + * + * @param name name of resource type + * @return the new ResourceTypeInfo object + */ + @InterfaceAudience.Public + @InterfaceStability.Unstable + public static ResourceTypeInfo newInstance(String name) { + return ResourceTypeInfo.newInstance(name, ""); + } + + /** + * Copies the content of the source ResourceTypeInfo object to the + * destination object, overwriting all properties of the destination object. + * + * @param src + * Source ResourceTypeInfo object + * @param dst + * Destination ResourceTypeInfo object + */ + + public static void copy(ResourceTypeInfo src, ResourceTypeInfo dst) { + dst.setName(src.getName()); + dst.setResourceType(src.getResourceType()); + dst.setDefaultUnit(src.getDefaultUnit()); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(""); + return sb.toString(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (!(obj instanceof ResourceTypeInfo)) { + return false; + } + ResourceTypeInfo r = (ResourceTypeInfo) obj; + return this.getName().equals(r.getName()) + && this.getResourceType().equals(r.getResourceType()) + && this.getDefaultUnit().equals(r.getDefaultUnit()); + } + + @Override + public int hashCode() { + final int prime = 47; + int result = prime + getName().hashCode(); + result = prime * result + getResourceType().hashCode(); + return result; + } + + @Override + public int compareTo(ResourceTypeInfo other) { + int diff = this.getName().compareTo(other.getName()); + if (diff == 0) { + diff = this.getDefaultUnit().compareTo(other.getDefaultUnit()); + if (diff == 0) { + diff = this.getResourceType().compareTo(other.getResourceType()); + } + } + return diff; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/BaseResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/BaseResource.java new file mode 100644 index 00000000000..4ed12ef7b26 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/BaseResource.java @@ -0,0 +1,153 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records.impl; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; + +import java.util.Arrays; + +import static org.apache.hadoop.yarn.api.records.ResourceInformation.MEMORY_MB; + +/** + *

+ * BaseResource extends Resource to handle base resources such + * as memory and CPU. + * TODO: We have a long term plan to use AbstractResource when additional + * resource types are to be handled as well. + *

+ * + *

+ * Currently it models both memory and CPU. + *

+ * + *

+ * The unit for memory is megabytes. CPU is modeled with virtual cores (vcores), + * a unit for expressing parallelism. A node's capacity should be configured + * with virtual cores equal to its number of physical cores. A container should + * be requested with the number of cores it can saturate, i.e. the average + * number of threads it expects to have runnable at a time. + *

+ * + *

+ * Virtual cores take integer values and thus currently CPU-scheduling is very + * coarse. A complementary axis for CPU requests that represents processing + * power will likely be added in the future to enable finer-grained resource + * configuration. + *

+ * + * @see Resource + */ +@InterfaceAudience.Private +@Unstable +public class BaseResource extends Resource { + + private ResourceInformation memoryResInfo; + private ResourceInformation vcoresResInfo; + + public BaseResource() { + // Base constructor. + } + + public BaseResource(long memory, long vcores) { + this.memoryResInfo = BaseResource.newDefaultMemoryInformation(memory); + this.vcoresResInfo = BaseResource.newDefaultVCoresInformation(vcores); + + resources = new ResourceInformation[NUM_MANDATORY_RESOURCES]; + resources[MEMORY_INDEX] = memoryResInfo; + resources[VCORES_INDEX] = vcoresResInfo; + } + + public static ResourceInformation newDefaultMemoryInformation(long value) { + ResourceInformation ri = new ResourceInformation(); + ri.setName(ResourceInformation.MEMORY_URI); + ri.setValue(value); + ri.setResourceType(ResourceTypes.COUNTABLE); + ri.setUnitsWithoutValidation(MEMORY_MB.getUnits()); + ri.setMinimumAllocation(0); + ri.setMaximumAllocation(Long.MAX_VALUE); + return ri; + } + + public static ResourceInformation newDefaultVCoresInformation(long value) { + ResourceInformation ri = new ResourceInformation(); + ri.setName(ResourceInformation.VCORES_URI); + ri.setValue(value); + ri.setResourceType(ResourceTypes.COUNTABLE); + ri.setUnitsWithoutValidation(""); + ri.setMinimumAllocation(0); + ri.setMaximumAllocation(Long.MAX_VALUE); + return ri; + } + + @Override + @SuppressWarnings("deprecation") + public int getMemory() { + return (int) memoryResInfo.getValue(); + } + + @Override + @SuppressWarnings("deprecation") + public void setMemory(int memory) { + this.memoryResInfo.setValue(memory); + } + + @Override + public long getMemorySize() { + return memoryResInfo.getValue(); + } + + @Override + public void setMemorySize(long memory) { + this.memoryResInfo.setValue(memory); + } + + @Override + public int getVirtualCores() { + return (int) vcoresResInfo.getValue(); + } + + @Override + public void setVirtualCores(int vcores) { + this.vcoresResInfo.setValue(vcores); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(""); + return sb.toString(); + } + + @Override + public int hashCode() { + final int prime = 263167; + + int result = (int) (939769357 + + getMemorySize()); // prime * result = 939769357 initially + result = prime * result + getVirtualCores(); + return result; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/package-info.java new file mode 100644 index 00000000000..b2420bc50f8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/package-info.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Package org.apache.hadoop.yarn.api.records.impl contains classes + * which define basic resources. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +package org.apache.hadoop.yarn.api.records.impl; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 49448217e9c..c8c61ff2477 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -61,8 +61,27 @@ public static final String CORE_SITE_CONFIGURATION_FILE = "core-site.xml"; @Private + public static final String RESOURCE_TYPES_CONFIGURATION_FILE = + "resource-types.xml"; + + @Private + public static final String NODE_RESOURCES_CONFIGURATION_FILE = + "node-resources.xml"; + + @Private public static final List RM_CONFIGURATION_FILES = Collections.unmodifiableList(Arrays.asList( + RESOURCE_TYPES_CONFIGURATION_FILE, + DR_CONFIGURATION_FILE, + CS_CONFIGURATION_FILE, + HADOOP_POLICY_CONFIGURATION_FILE, + YARN_SITE_CONFIGURATION_FILE, + CORE_SITE_CONFIGURATION_FILE)); + + @Private + public static final List NM_CONFIGURATION_FILES = + Collections.unmodifiableList(Arrays.asList( + NODE_RESOURCES_CONFIGURATION_FILE, DR_CONFIGURATION_FILE, CS_CONFIGURATION_FILE, HADOOP_POLICY_CONFIGURATION_FILE, @@ -106,6 +125,16 @@ private static void addDeprecatedKeys() { public static final String YARN_PREFIX = "yarn."; + ///////////////////////////// + // Resource types configs + //////////////////////////// + + public static final String RESOURCE_TYPES = + YarnConfiguration.YARN_PREFIX + "resource-types"; + + public static final String NM_RESOURCES_PREFIX = + YarnConfiguration.NM_PREFIX + "resource-type."; + /** Delay before deleting resource to ease debugging of NM issues */ public static final String DEBUG_NM_DELETE_DELAY_SEC = YarnConfiguration.NM_PREFIX + "delete.debug-delay-sec"; @@ -850,6 +879,29 @@ public static boolean isAclEnabled(Configuration conf) { public static final String RM_PROXY_USER_PREFIX = RM_PREFIX + "proxyuser."; /** + * Enable/disable resource profiles. + */ + @Public + @Unstable + public static final String RM_RESOURCE_PROFILES_ENABLED = + RM_PREFIX + "resource-profiles.enabled"; + @Public + @Unstable + public static final boolean DEFAULT_RM_RESOURCE_PROFILES_ENABLED = false; + + /** + * File containing resource profiles. + */ + @Public + @Unstable + public static final String RM_RESOURCE_PROFILES_SOURCE_FILE = + RM_PREFIX + "resource-profiles.source-file"; + @Public + @Unstable + public static final String DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE = + "resource-profiles.json"; + + /** * Timeout in seconds for YARN node graceful decommission. * This is the maximal time to wait for running containers and applications * to complete before transition a DECOMMISSIONING node into DECOMMISSIONED. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceNotFoundException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceNotFoundException.java new file mode 100644 index 00000000000..b5fece7dc8c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceNotFoundException.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.exceptions; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This exception is thrown when details of an unknown resource type + * are requested. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public class ResourceNotFoundException extends YarnRuntimeException { + + private static final long serialVersionUID = 10081982L; + + public ResourceNotFoundException(String message) { + super(message); + } + + public ResourceNotFoundException(Throwable cause) { + super(cause); + } + + public ResourceNotFoundException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceProfilesNotEnabledException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceProfilesNotEnabledException.java new file mode 100644 index 00000000000..558e075bea5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceProfilesNotEnabledException.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.exceptions; + +/** + * This exception is thrown when the client requests information about + * ResourceProfiles in the + * {@link org.apache.hadoop.yarn.api.ApplicationClientProtocol} but resource + * profiles is not enabled on the RM. + * + */ +public class ResourceProfilesNotEnabledException extends YarnException { + + private static final long serialVersionUID = 13498237L; + + public ResourceProfilesNotEnabledException(Throwable cause) { + super(cause); + } + + public ResourceProfilesNotEnabledException(String message) { + super(message); + } + + public ResourceProfilesNotEnabledException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YARNFeatureNotEnabledException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YARNFeatureNotEnabledException.java new file mode 100644 index 00000000000..62340fea363 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YARNFeatureNotEnabledException.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.exceptions; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This exception is thrown when a feature is being used which is not enabled + * yet. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public class YARNFeatureNotEnabledException extends YarnException { + private static final long serialVersionUID = 898023752676L; + + public YARNFeatureNotEnabledException(Throwable cause) { + super(cause); + } + + public YARNFeatureNotEnabledException(String message) { + super(message); + } + + public YARNFeatureNotEnabledException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java new file mode 100644 index 00000000000..7a212e163d9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java @@ -0,0 +1,221 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.util; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.math.BigInteger; +import java.util.*; + +/** + * A util to convert values in one unit to another. Units refers to whether + * the value is expressed in pico, nano, etc. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class UnitsConversionUtil { + + /** + * Helper class for encapsulating conversion values. + */ + public static class Converter { + private long numerator; + private long denominator; + + Converter(long n, long d) { + this.numerator = n; + this.denominator = d; + } + } + + private static final String[] UNITS = {"p", "n", "u", "m", "", "k", "M", "G", + "T", "P", "Ki", "Mi", "Gi", "Ti", "Pi"}; + private static final List SORTED_UNITS = Arrays.asList(UNITS); + public static final Set KNOWN_UNITS = createKnownUnitsSet(); + private static final Converter PICO = + new Converter(1L, 1000L * 1000L * 1000L * 1000L); + private static final Converter NANO = + new Converter(1L, 1000L * 1000L * 1000L); + private static final Converter MICRO = new Converter(1L, 1000L * 1000L); + private static final Converter MILLI = new Converter(1L, 1000L); + private static final Converter BASE = new Converter(1L, 1L); + private static final Converter KILO = new Converter(1000L, 1L); + private static final Converter MEGA = new Converter(1000L * 1000L, 1L); + private static final Converter GIGA = + new Converter(1000L * 1000L * 1000L, 1L); + private static final Converter TERA = + new Converter(1000L * 1000L * 1000L * 1000L, 1L); + private static final Converter PETA = + new Converter(1000L * 1000L * 1000L * 1000L * 1000L, 1L); + + private static final Converter KILO_BINARY = new Converter(1024L, 1L); + private static final Converter MEGA_BINARY = new Converter(1024L * 1024L, 1L); + private static final Converter GIGA_BINARY = + new Converter(1024L * 1024L * 1024L, 1L); + private static final Converter TERA_BINARY = + new Converter(1024L * 1024L * 1024L * 1024L, 1L); + private static final Converter PETA_BINARY = + new Converter(1024L * 1024L * 1024L * 1024L * 1024L, 1L); + + private static Set createKnownUnitsSet() { + Set ret = new HashSet<>(); + ret.addAll(Arrays.asList(UNITS)); + return ret; + } + + private static Converter getConverter(String unit) { + switch (unit) { + case "p": + return PICO; + case "n": + return NANO; + case "u": + return MICRO; + case "m": + return MILLI; + case "": + return BASE; + case "k": + return KILO; + case "M": + return MEGA; + case "G": + return GIGA; + case "T": + return TERA; + case "P": + return PETA; + case "Ki": + return KILO_BINARY; + case "Mi": + return MEGA_BINARY; + case "Gi": + return GIGA_BINARY; + case "Ti": + return TERA_BINARY; + case "Pi": + return PETA_BINARY; + default: + throw new IllegalArgumentException( + "Unknown unit '" + unit + "'. Known units are " + KNOWN_UNITS); + } + } + + /** + * Converts a value from one unit to another. Supported units can be obtained + * by inspecting the KNOWN_UNITS set. + * + * @param fromUnit the unit of the from value + * @param toUnit the target unit + * @param fromValue the value you wish to convert + * @return the value in toUnit + */ + public static long convert(String fromUnit, String toUnit, long fromValue) { + if (toUnit == null || fromUnit == null) { + throw new IllegalArgumentException("One or more arguments are null"); + } + + if (fromUnit.equals(toUnit)) { + return fromValue; + } + Converter fc = getConverter(fromUnit); + Converter tc = getConverter(toUnit); + long numerator = fc.numerator * tc.denominator; + long denominator = fc.denominator * tc.numerator; + long numeratorMultiplierLimit = Long.MAX_VALUE / numerator; + if (numerator < denominator) { + if (numeratorMultiplierLimit < fromValue) { + String overflowMsg = + "Converting " + fromValue + " from '" + fromUnit + "' to '" + toUnit + + "' will result in an overflow of Long"; + throw new IllegalArgumentException(overflowMsg); + } + return (fromValue * numerator) / denominator; + } + if (numeratorMultiplierLimit > fromValue) { + return (numerator * fromValue) / denominator; + } + long tmp = numerator / denominator; + if ((Long.MAX_VALUE / tmp) < fromValue) { + String overflowMsg = + "Converting " + fromValue + " from '" + fromUnit + "' to '" + toUnit + + "' will result in an overflow of Long"; + throw new IllegalArgumentException(overflowMsg); + } + return fromValue * tmp; + } + + /** + * Compare a value in a given unit with a value in another unit. The return + * value is equivalent to the value returned by compareTo. + * + * @param unitA first unit + * @param valueA first value + * @param unitB second unit + * @param valueB second value + * @return +1, 0 or -1 depending on whether the relationship is greater than, + * equal to or lesser than + */ + public static int compare(String unitA, long valueA, String unitB, + long valueB) { + if (unitA == null || unitB == null || !KNOWN_UNITS.contains(unitA) + || !KNOWN_UNITS.contains(unitB)) { + throw new IllegalArgumentException("Units cannot be null"); + } + if (!KNOWN_UNITS.contains(unitA)) { + throw new IllegalArgumentException("Unknown unit '" + unitA + "'"); + } + if (!KNOWN_UNITS.contains(unitB)) { + throw new IllegalArgumentException("Unknown unit '" + unitB + "'"); + } + if (unitA.equals(unitB)) { + return Long.compare(valueA, valueB); + } + Converter unitAC = getConverter(unitA); + Converter unitBC = getConverter(unitB); + int unitAPos = SORTED_UNITS.indexOf(unitA); + int unitBPos = SORTED_UNITS.indexOf(unitB); + try { + long tmpA = valueA; + long tmpB = valueB; + if (unitAPos < unitBPos) { + tmpB = convert(unitB, unitA, valueB); + } else { + tmpA = convert(unitA, unitB, valueA); + } + return Long.compare(tmpA, tmpB); + } catch (IllegalArgumentException ie) { + BigInteger tmpA = BigInteger.valueOf(valueA); + BigInteger tmpB = BigInteger.valueOf(valueB); + if (unitAPos < unitBPos) { + tmpB = tmpB.multiply(BigInteger.valueOf(unitBC.numerator)); + tmpB = tmpB.multiply(BigInteger.valueOf(unitAC.denominator)); + tmpB = tmpB.divide(BigInteger.valueOf(unitBC.denominator)); + tmpB = tmpB.divide(BigInteger.valueOf(unitAC.numerator)); + } else { + tmpA = tmpA.multiply(BigInteger.valueOf(unitAC.numerator)); + tmpA = tmpA.multiply(BigInteger.valueOf(unitBC.denominator)); + tmpA = tmpA.divide(BigInteger.valueOf(unitAC.denominator)); + tmpA = tmpA.divide(BigInteger.valueOf(unitBC.numerator)); + } + return tmpA.compareTo(tmpB); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java new file mode 100644 index 00000000000..94c2e97e9f0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java @@ -0,0 +1,585 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.util.resource; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; +import org.apache.hadoop.yarn.conf.ConfigurationProvider; +import org.apache.hadoop.yarn.conf.ConfigurationProviderFactory; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Helper class to read the resource-types to be supported by the system. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public class ResourceUtils { + + public static final String UNITS = ".units"; + public static final String TYPE = ".type"; + public static final String MINIMUM_ALLOCATION = ".minimum-allocation"; + public static final String MAXIMUM_ALLOCATION = ".maximum-allocation"; + + private static final String MEMORY = ResourceInformation.MEMORY_MB.getName(); + private static final String VCORES = ResourceInformation.VCORES.getName(); + + private static final Set DISALLOWED_NAMES = new HashSet<>(); + static { + DISALLOWED_NAMES.add("memory"); + DISALLOWED_NAMES.add(MEMORY); + DISALLOWED_NAMES.add(VCORES); + } + + private static volatile boolean initializedResources = false; + private static final Map RESOURCE_NAME_TO_INDEX = + new ConcurrentHashMap(); + private static volatile Map resourceTypes; + private static volatile String[] resourceNamesArray; + private static volatile ResourceInformation[] resourceTypesArray; + private static volatile boolean initializedNodeResources = false; + private static volatile Map readOnlyNodeResources; + private static volatile int numKnownResourceTypes = -1; + + static final Log LOG = LogFactory.getLog(ResourceUtils.class); + + private ResourceUtils() { + } + + private static void checkMandatatoryResources( + Map resourceInformationMap) + throws YarnRuntimeException { + if (resourceInformationMap.containsKey(MEMORY)) { + ResourceInformation memInfo = resourceInformationMap.get(MEMORY); + String memUnits = ResourceInformation.MEMORY_MB.getUnits(); + ResourceTypes memType = ResourceInformation.MEMORY_MB.getResourceType(); + if (!memInfo.getUnits().equals(memUnits) || !memInfo.getResourceType() + .equals(memType)) { + throw new YarnRuntimeException( + "Attempt to re-define mandatory resource 'memory-mb'. It can only" + + " be of type 'COUNTABLE' and have units 'Mi'."); + } + } + + if (resourceInformationMap.containsKey(VCORES)) { + ResourceInformation vcoreInfo = resourceInformationMap.get(VCORES); + String vcoreUnits = ResourceInformation.VCORES.getUnits(); + ResourceTypes vcoreType = ResourceInformation.VCORES.getResourceType(); + if (!vcoreInfo.getUnits().equals(vcoreUnits) || !vcoreInfo + .getResourceType().equals(vcoreType)) { + throw new YarnRuntimeException( + "Attempt to re-define mandatory resource 'vcores'. It can only be" + + " of type 'COUNTABLE' and have units ''(no units)."); + } + } + } + + private static void addManadtoryResources( + Map res) { + ResourceInformation ri; + if (!res.containsKey(MEMORY)) { + LOG.info("Adding resource type - name = " + MEMORY + ", units = " + + ResourceInformation.MEMORY_MB.getUnits() + ", type = " + + ResourceTypes.COUNTABLE); + ri = ResourceInformation + .newInstance(MEMORY, + ResourceInformation.MEMORY_MB.getUnits()); + res.put(MEMORY, ri); + } + if (!res.containsKey(VCORES)) { + LOG.info("Adding resource type - name = " + VCORES + ", units = , type = " + + ResourceTypes.COUNTABLE); + ri = + ResourceInformation.newInstance(VCORES); + res.put(VCORES, ri); + } + } + + private static void setMinimumAllocationForMandatoryResources( + Map res, Configuration conf) { + String[][] resourceTypesKeys = { + {ResourceInformation.MEMORY_MB.getName(), + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, + String.valueOf( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB), + ResourceInformation.MEMORY_MB.getName()}, + {ResourceInformation.VCORES.getName(), + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + String.valueOf( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES), + ResourceInformation.VCORES.getName()}}; + for (String[] arr : resourceTypesKeys) { + String resourceTypesKey = + YarnConfiguration.RESOURCE_TYPES + "." + arr[0] + MINIMUM_ALLOCATION; + long minimumResourceTypes = conf.getLong(resourceTypesKey, -1); + long minimumConf = conf.getLong(arr[1], -1); + long minimum; + if (minimumResourceTypes != -1) { + minimum = minimumResourceTypes; + if (minimumConf != -1) { + LOG.warn("Using minimum allocation for memory specified in " + + "resource-types config file with key " + + minimumResourceTypes + ", ignoring minimum specified using " + + arr[1]); + } + } else { + minimum = conf.getLong(arr[1], Long.parseLong(arr[2])); + } + ResourceInformation ri = res.get(arr[3]); + ri.setMinimumAllocation(minimum); + } + } + + private static void setMaximumAllocationForMandatoryResources( + Map res, Configuration conf) { + String[][] resourceTypesKeys = { + {ResourceInformation.MEMORY_MB.getName(), + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, + String.valueOf( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB), + ResourceInformation.MEMORY_MB.getName()}, + {ResourceInformation.VCORES.getName(), + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, + String.valueOf( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES), + ResourceInformation.VCORES.getName()}}; + for (String[] arr : resourceTypesKeys) { + String resourceTypesKey = + YarnConfiguration.RESOURCE_TYPES + "." + arr[0] + MAXIMUM_ALLOCATION; + long maximumResourceTypes = conf.getLong(resourceTypesKey, -1); + long maximumConf = conf.getLong(arr[1], -1); + long maximum; + if (maximumResourceTypes != -1) { + maximum = maximumResourceTypes; + if (maximumConf != -1) { + LOG.warn("Using maximum allocation for memory specified in " + + "resource-types config file with key " + + maximumResourceTypes + ", ignoring maximum specified using " + + arr[1]); + } + } else { + maximum = conf.getLong(arr[1], Long.parseLong(arr[2])); + } + ResourceInformation ri = res.get(arr[3]); + ri.setMaximumAllocation(maximum); + } + } + + @VisibleForTesting + static void initializeResourcesMap(Configuration conf) { + + Map resourceInformationMap = new HashMap<>(); + String[] resourceNames = conf.getStrings(YarnConfiguration.RESOURCE_TYPES); + + if (resourceNames != null && resourceNames.length != 0) { + for (String resourceName : resourceNames) { + String resourceUnits = conf.get( + YarnConfiguration.RESOURCE_TYPES + "." + resourceName + UNITS, ""); + String resourceTypeName = conf.get( + YarnConfiguration.RESOURCE_TYPES + "." + resourceName + TYPE, + ResourceTypes.COUNTABLE.toString()); + Long minimumAllocation = conf.getLong( + YarnConfiguration.RESOURCE_TYPES + "." + resourceName + + MINIMUM_ALLOCATION, 0L); + Long maximumAllocation = conf.getLong( + YarnConfiguration.RESOURCE_TYPES + "." + resourceName + + MAXIMUM_ALLOCATION, Long.MAX_VALUE); + if (resourceName == null || resourceName.isEmpty() + || resourceUnits == null || resourceTypeName == null) { + throw new YarnRuntimeException( + "Incomplete configuration for resource type '" + resourceName + + "'. One of name, units or type is configured incorrectly."); + } + if (DISALLOWED_NAMES.contains(resourceName)) { + throw new YarnRuntimeException( + "Resource type cannot be named '" + resourceName + + "'. That name is disallowed."); + } + ResourceTypes resourceType = ResourceTypes.valueOf(resourceTypeName); + LOG.info("Adding resource type - name = " + resourceName + ", units = " + + resourceUnits + ", type = " + resourceTypeName); + if (resourceInformationMap.containsKey(resourceName)) { + throw new YarnRuntimeException( + "Error in config, key '" + resourceName + "' specified twice"); + } + resourceInformationMap.put(resourceName, ResourceInformation + .newInstance(resourceName, resourceUnits, 0L, resourceType, + minimumAllocation, maximumAllocation)); + } + } + checkMandatatoryResources(resourceInformationMap); + addManadtoryResources(resourceInformationMap); + setMinimumAllocationForMandatoryResources(resourceInformationMap, conf); + setMaximumAllocationForMandatoryResources(resourceInformationMap, conf); + resourceTypes = Collections.unmodifiableMap(resourceInformationMap); + updateKnownResources(); + updateResourceTypeIndex(); + } + + private static void updateKnownResources() { + // Update resource names. + resourceNamesArray = new String[resourceTypes.size()]; + resourceTypesArray = new ResourceInformation[resourceTypes.size()]; + + int index = 2; + for (ResourceInformation resInfo : resourceTypes.values()) { + if (resInfo.getName().equals(MEMORY)) { + resourceTypesArray[0] = ResourceInformation + .newInstance(resourceTypes.get(MEMORY)); + resourceNamesArray[0] = MEMORY; + } else if (resInfo.getName().equals(VCORES)) { + resourceTypesArray[1] = ResourceInformation + .newInstance(resourceTypes.get(VCORES)); + resourceNamesArray[1] = VCORES; + } else { + resourceTypesArray[index] = ResourceInformation.newInstance(resInfo); + resourceNamesArray[index] = resInfo.getName(); + index++; + } + } + } + + private static void updateResourceTypeIndex() { + RESOURCE_NAME_TO_INDEX.clear(); + + for (int index = 0; index < resourceTypesArray.length; index++) { + ResourceInformation resInfo = resourceTypesArray[index]; + RESOURCE_NAME_TO_INDEX.put(resInfo.getName(), index); + } + } + + /** + * Get associate index of resource types such memory, cpu etc. + * This could help to access each resource types in a resource faster. + * @return Index map for all Resource Types. + */ + public static Map getResourceTypeIndex() { + return RESOURCE_NAME_TO_INDEX; + } + + /** + * Get the resource types to be supported by the system. + * @return A map of the resource name to a ResouceInformation object + * which contains details such as the unit. + */ + public static Map getResourceTypes() { + return getResourceTypes(null, + YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE); + } + + /** + * Get resource names array, this is mostly for performance perspective. Never + * modify returned array. + * + * @return resourceNamesArray + */ + public static String[] getResourceNamesArray() { + initializeResourceTypesIfNeeded(null, + YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE); + return resourceNamesArray; + } + + public static ResourceInformation[] getResourceTypesArray() { + initializeResourceTypesIfNeeded(null, + YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE); + return resourceTypesArray; + } + + public static int getNumberOfKnownResourceTypes() { + if (numKnownResourceTypes < 0) { + initializeResourceTypesIfNeeded(null, + YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE); + } + return numKnownResourceTypes; + } + + private static Map getResourceTypes( + Configuration conf) { + return getResourceTypes(conf, + YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE); + } + + private static void initializeResourceTypesIfNeeded(Configuration conf, + String resourceFile) { + if (!initializedResources) { + synchronized (ResourceUtils.class) { + if (!initializedResources) { + if (conf == null) { + conf = new YarnConfiguration(); + } + try { + addResourcesFileToConf(resourceFile, conf); + LOG.debug("Found " + resourceFile + ", adding to configuration"); + initializeResourcesMap(conf); + initializedResources = true; + } catch (FileNotFoundException fe) { + LOG.info("Unable to find '" + resourceFile + + "'. Falling back to memory and vcores as resources."); + initializeResourcesMap(conf); + initializedResources = true; + } + } + } + } + numKnownResourceTypes = resourceTypes.size(); + } + + private static Map getResourceTypes( + Configuration conf, String resourceFile) { + initializeResourceTypesIfNeeded(conf, resourceFile); + return resourceTypes; + } + + private static InputStream getConfInputStream(String resourceFile, + Configuration conf) throws IOException, YarnException { + + ConfigurationProvider provider = + ConfigurationProviderFactory.getConfigurationProvider(conf); + try { + provider.init(conf); + } catch (Exception e) { + throw new IOException(e); + } + + InputStream ris = provider.getConfigurationInputStream(conf, resourceFile); + if (ris == null) { + if (conf.getResource(resourceFile) == null) { + throw new FileNotFoundException("Unable to find " + resourceFile); + } + throw new IOException( + "Unable to open resource types file '" + resourceFile + + "'. Using provider " + provider); + } + return ris; + } + + private static void addResourcesFileToConf(String resourceFile, + Configuration conf) throws FileNotFoundException { + try { + InputStream ris = getConfInputStream(resourceFile, conf); + LOG.debug("Found " + resourceFile + ", adding to configuration"); + conf.addResource(ris); + } catch (FileNotFoundException fe) { + throw fe; + } catch (IOException ie) { + LOG.fatal("Exception trying to read resource types configuration '" + + resourceFile + "'.", ie); + throw new YarnRuntimeException(ie); + } catch (YarnException ye) { + LOG.fatal("YARN Exception trying to read resource types configuration '" + + resourceFile + "'.", ye); + throw new YarnRuntimeException(ye); + } + } + + @VisibleForTesting + synchronized static void resetResourceTypes() { + initializedResources = false; + } + + @VisibleForTesting + public static Map + resetResourceTypes(Configuration conf) { + synchronized (ResourceUtils.class) { + initializedResources = false; + } + return getResourceTypes(conf); + } + + public static String getUnits(String resourceValue) { + String units; + for (int i = 0; i < resourceValue.length(); i++) { + if (Character.isAlphabetic(resourceValue.charAt(i))) { + units = resourceValue.substring(i); + if (StringUtils.isAlpha(units)) { + return units; + } + } + } + return ""; + } + + /** + * Function to get the resources for a node. This function will look at the + * file {@link YarnConfiguration#NODE_RESOURCES_CONFIGURATION_FILE} to + * determine the node resources. + * + * @param conf configuration file + * @return a map to resource name to the ResourceInformation object. The map + * is guaranteed to have entries for memory and vcores + */ + public static Map getNodeResourceInformation( + Configuration conf) { + if (!initializedNodeResources) { + synchronized (ResourceUtils.class) { + if (!initializedNodeResources) { + Map nodeResources = initializeNodeResourceInformation( + conf); + addManadtoryResources(nodeResources); + checkMandatatoryResources(nodeResources); + setMinimumAllocationForMandatoryResources(nodeResources, conf); + setMaximumAllocationForMandatoryResources(nodeResources, conf); + readOnlyNodeResources = Collections.unmodifiableMap(nodeResources); + initializedNodeResources = true; + } + } + } + return readOnlyNodeResources; + } + + private static Map initializeNodeResourceInformation( + Configuration conf) { + Map nodeResources = new HashMap<>(); + try { + addResourcesFileToConf( + YarnConfiguration.NODE_RESOURCES_CONFIGURATION_FILE, conf); + for (Map.Entry entry : conf) { + String key = entry.getKey(); + String value = entry.getValue(); + if (key.startsWith(YarnConfiguration.NM_RESOURCES_PREFIX)) { + addResourceInformation(key, value, nodeResources); + } + } + } catch (FileNotFoundException fe) { + LOG.info("Couldn't find node resources file"); + } + return nodeResources; + } + + private static void addResourceInformation(String prop, String value, + Map nodeResources) { + String[] parts = prop.split("\\."); + LOG.info("Found resource entry " + prop); + if (parts.length == 4) { + String resourceType = parts[3]; + if (!nodeResources.containsKey(resourceType)) { + nodeResources + .put(resourceType, ResourceInformation.newInstance(resourceType)); + } + String units = getUnits(value); + Long resourceValue = + Long.valueOf(value.substring(0, value.length() - units.length())); + nodeResources.get(resourceType).setValue(resourceValue); + nodeResources.get(resourceType).setUnits(units); + LOG.debug("Setting value for resource type " + resourceType + " to " + + resourceValue + " with units " + units); + } + } + + @VisibleForTesting + synchronized public static void resetNodeResources() { + initializedNodeResources = false; + } + + public static Resource getResourceTypesMinimumAllocation() { + Resource ret = Resource.newInstance(0, 0); + for (ResourceInformation entry : resourceTypesArray) { + String name = entry.getName(); + if (name.equals(ResourceInformation.MEMORY_MB.getName())) { + ret.setMemorySize(entry.getMinimumAllocation()); + } else if (name.equals(ResourceInformation.VCORES.getName())) { + Long tmp = entry.getMinimumAllocation(); + if (tmp > Integer.MAX_VALUE) { + tmp = (long) Integer.MAX_VALUE; + } + ret.setVirtualCores(tmp.intValue()); + } else { + ret.setResourceValue(name, entry.getMinimumAllocation()); + } + } + return ret; + } + + /** + * Get a Resource object with for the maximum allocation possible. + * @return a Resource object with the maximum allocation for the scheduler + */ + public static Resource getResourceTypesMaximumAllocation() { + Resource ret = Resource.newInstance(0, 0); + for (ResourceInformation entry : resourceTypesArray) { + String name = entry.getName(); + if (name.equals(ResourceInformation.MEMORY_MB.getName())) { + ret.setMemorySize(entry.getMaximumAllocation()); + } else if (name.equals(ResourceInformation.VCORES.getName())) { + Long tmp = entry.getMaximumAllocation(); + if (tmp > Integer.MAX_VALUE) { + tmp = (long) Integer.MAX_VALUE; + } + ret.setVirtualCores(tmp.intValue()); + continue; + } else { + ret.setResourceValue(name, entry.getMaximumAllocation()); + } + } + return ret; + } + + /** + * Get default unit by given resource type. + * @param resourceType resourceType + * @return default unit + */ + public static String getDefaultUnit(String resourceType) { + ResourceInformation ri = getResourceTypes().get(resourceType); + if (null != ri) { + return ri.getUnits(); + } + return ""; + } + + /** + * Get all resource types information from known resource types. + * @return List of ResourceTypeInfo + */ + public static List getResourcesTypeInfo() { + List array = new ArrayList<>(); + // Add all resource types + Collection resourcesInfo = + ResourceUtils.getResourceTypes().values(); + for (ResourceInformation resourceInfo : resourcesInfo) { + array.add(ResourceTypeInfo + .newInstance(resourceInfo.getName(), resourceInfo.getUnits(), + resourceInfo.getResourceType())); + } + return array; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/package-info.java new file mode 100644 index 00000000000..1e925d7d57c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/package-info.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Package org.apache.hadoop.yarn.util.resource contains classes + * which is used as utility class for resource profile computations. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +package org.apache.hadoop.yarn.util.resource; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto index ba79db09a6f..81adef19335 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto @@ -61,4 +61,7 @@ service ApplicationClientProtocolService { rpc updateApplicationPriority (UpdateApplicationPriorityRequestProto) returns (UpdateApplicationPriorityResponseProto); rpc signalToContainer(SignalContainerRequestProto) returns (SignalContainerResponseProto); rpc updateApplicationTimeouts (UpdateApplicationTimeoutsRequestProto) returns (UpdateApplicationTimeoutsResponseProto); + rpc getResourceProfiles(GetAllResourceProfilesRequestProto) returns (GetAllResourceProfilesResponseProto); + rpc getResourceProfile(GetResourceProfileRequestProto) returns (GetResourceProfileResponseProto); + rpc getResourceTypeInfo(GetAllResourceTypeInfoRequestProto) returns (GetAllResourceTypeInfoResponseProto); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index c5f485fc3f7..9933e9ea8e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -53,9 +53,27 @@ message ContainerIdProto { optional int64 id = 3; } +enum ResourceTypesProto { + COUNTABLE = 0; +} + +message ResourceInformationProto { + required string key = 1; + optional int64 value = 2; + optional string units = 3; + optional ResourceTypesProto type = 4; +} + +message ResourceTypeInfoProto { + required string name = 1; + optional string units = 2; + optional ResourceTypesProto type = 3; +} + message ResourceProto { optional int64 memory = 1; optional int32 virtual_cores = 2; + repeated ResourceInformationProto resource_value_map = 3; } message ResourceUtilizationProto { @@ -69,6 +87,15 @@ message ResourceOptionProto { optional int32 over_commit_timeout = 2; } +message ResourceProfileEntry { + required string name = 1; + required ResourceProto resources = 2; +} + +message ResourceProfilesProto { + repeated ResourceProfileEntry resource_profiles_map = 1; +} + message NodeResourceMapProto { optional NodeIdProto node_id = 1; optional ResourceOptionProto resource_option = 2; @@ -174,6 +201,11 @@ message LocalResourceProto { optional bool should_be_uploaded_to_shared_cache = 7; } +message StringLongMapProto { + required string key = 1; + required int64 value = 2; +} + message ApplicationResourceUsageReportProto { optional int32 num_used_containers = 1; optional int32 num_reserved_containers = 2; @@ -186,6 +218,8 @@ message ApplicationResourceUsageReportProto { optional float cluster_usage_percentage = 9; optional int64 preempted_memory_seconds = 10; optional int64 preempted_vcore_seconds = 11; + repeated StringLongMapProto application_resource_usage_map = 12; + repeated StringLongMapProto application_preempted_resource_usage_map = 13; } message ApplicationReportProto { @@ -310,6 +344,11 @@ enum ExecutionTypeProto { //////////////////////////////////////////////////////////////////////// ////// From AM_RM_Protocol ///////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// +message ProfileCapabilityProto { + required string profile = 1; + required ResourceProto profileCapabilityOverride = 2; +} + message ResourceRequestProto { optional PriorityProto priority = 1; optional string resource_name = 2; @@ -319,6 +358,7 @@ message ResourceRequestProto { optional string node_label_expression = 6; optional ExecutionTypeRequestProto execution_type_request = 7; optional int64 allocation_request_id = 8 [default = 0]; + optional ProfileCapabilityProto profile = 9; } message ExecutionTypeRequestProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto index 7a7f03503ca..3da4ee7298a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto @@ -48,6 +48,7 @@ message RegisterApplicationMasterResponseProto { optional string queue = 5; repeated NMTokenProto nm_tokens_from_previous_attempts = 6; repeated SchedulerResourceTypes scheduler_resource_types = 7; + optional ResourceProfilesProto resource_profiles = 8; } message FinishApplicationMasterRequestProto { @@ -279,6 +280,28 @@ message UpdateApplicationTimeoutsResponseProto { repeated ApplicationUpdateTimeoutMapProto application_timeouts = 1; } +message GetAllResourceProfilesRequestProto { +} + +message GetAllResourceProfilesResponseProto { + required ResourceProfilesProto resource_profiles = 1; +} + +message GetResourceProfileRequestProto { + required string profile = 1; +} + +message GetResourceProfileResponseProto { + required ResourceProto resources = 1; +} + +message GetAllResourceTypeInfoRequestProto { +} + +message GetAllResourceTypeInfoResponseProto { + repeated ResourceTypeInfoProto resource_type_info = 1; +} + ////////////////////////////////////////////////////// /////// client_NM_Protocol /////////////////////////// ////////////////////////////////////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestResourceInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestResourceInformation.java new file mode 100644 index 00000000000..66bf3204bf6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestResourceInformation.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.conf; + +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test class to verify various resource informations in a given resource. + */ +public class TestResourceInformation { + + @Test + public void testName() { + String name = "yarn.io/test"; + ResourceInformation ri = ResourceInformation.newInstance(name); + Assert.assertEquals("Resource name incorrect", name, ri.getName()); + } + + @Test + public void testUnits() { + String name = "yarn.io/test"; + String units = "m"; + ResourceInformation ri = ResourceInformation.newInstance(name, units); + Assert.assertEquals("Resource name incorrect", name, ri.getName()); + Assert.assertEquals("Resource units incorrect", units, ri.getUnits()); + units = "z"; + try { + ResourceInformation.newInstance(name, units); + Assert.fail(units + "is not a valid unit"); + } catch (IllegalArgumentException ie) { + // do nothing + } + } + + @Test + public void testValue() { + String name = "yarn.io/test"; + long value = 1L; + ResourceInformation ri = ResourceInformation.newInstance(name, value); + Assert.assertEquals("Resource name incorrect", name, ri.getName()); + Assert.assertEquals("Resource value incorrect", value, ri.getValue()); + } + + @Test + public void testResourceInformation() { + String name = "yarn.io/test"; + long value = 1L; + String units = "m"; + ResourceInformation ri = + ResourceInformation.newInstance(name, units, value); + Assert.assertEquals("Resource name incorrect", name, ri.getName()); + Assert.assertEquals("Resource value incorrect", value, ri.getValue()); + Assert.assertEquals("Resource units incorrect", units, ri.getUnits()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java index bd7bf93566f..ce1239d4281 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java @@ -141,6 +141,10 @@ public void initializeMemberVariables() { // Used as Java command line properties, not XML configurationPrefixToSkipCompare.add("yarn.app.container"); + // Ignore default file name for resource profiles + configurationPropsToSkipCompare + .add(YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE); + // Ignore NodeManager "work in progress" variables configurationPrefixToSkipCompare .add(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/util/TestUnitsConversionUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/util/TestUnitsConversionUtil.java new file mode 100644 index 00000000000..a412faebed8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/util/TestUnitsConversionUtil.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.util; + +import org.junit.Assert; +import org.junit.Test; + +/** + * Test class to handle all test cases needed to verify basic unit conversion + * scenarios. + */ +public class TestUnitsConversionUtil { + + @Test + public void testUnitsConversion() { + int value = 5; + String fromUnit = ""; + long test = value; + Assert.assertEquals("pico test failed", + value * 1000L * 1000L * 1000L * 1000L, + UnitsConversionUtil.convert(fromUnit, "p", test)); + Assert.assertEquals("nano test failed", + value * 1000L * 1000L * 1000L, + UnitsConversionUtil.convert(fromUnit, "n", test)); + Assert + .assertEquals("micro test failed", value * 1000L * 1000L, + UnitsConversionUtil.convert(fromUnit, "u", test)); + Assert.assertEquals("milli test failed", value * 1000L, + UnitsConversionUtil.convert(fromUnit, "m", test)); + + test = value * 1000L * 1000L * 1000L * 1000L * 1000L; + fromUnit = ""; + Assert.assertEquals("kilo test failed", test / 1000L, + UnitsConversionUtil.convert(fromUnit, "k", test)); + + Assert + .assertEquals("mega test failed", test / (1000L * 1000L), + UnitsConversionUtil.convert(fromUnit, "M", test)); + Assert.assertEquals("giga test failed", + test / (1000L * 1000L * 1000L), + UnitsConversionUtil.convert(fromUnit, "G", test)); + Assert.assertEquals("tera test failed", + test / (1000L * 1000L * 1000L * 1000L), + UnitsConversionUtil.convert(fromUnit, "T", test)); + Assert.assertEquals("peta test failed", + test / (1000L * 1000L * 1000L * 1000L * 1000L), + UnitsConversionUtil.convert(fromUnit, "P", test)); + + Assert.assertEquals("nano to pico test failed", value * 1000L, + UnitsConversionUtil.convert("n", "p", value)); + + Assert.assertEquals("mega to giga test failed", value, + UnitsConversionUtil.convert("M", "G", value * 1000L)); + + Assert.assertEquals("Mi to Gi test failed", value, + UnitsConversionUtil.convert("Mi", "Gi", value * 1024L)); + + Assert.assertEquals("Mi to Ki test failed", value * 1024, + UnitsConversionUtil.convert("Mi", "Ki", value)); + + Assert.assertEquals("Ki to base units test failed", 5 * 1024, + UnitsConversionUtil.convert("Ki", "", 5)); + + Assert.assertEquals("Mi to k test failed", 1073741, + UnitsConversionUtil.convert("Mi", "k", 1024)); + + Assert.assertEquals("M to Mi test failed", 953, + UnitsConversionUtil.convert("M", "Mi", 1000)); + } + + @Test + public void testOverflow() { + long test = 5 * 1000L * 1000L * 1000L * 1000L * 1000L; + try { + UnitsConversionUtil.convert("P", "p", test); + Assert.fail("this operation should result in an overflow"); + } catch (IllegalArgumentException ie) { + // do nothing + } + try { + UnitsConversionUtil.convert("m", "p", Long.MAX_VALUE - 1); + Assert.fail("this operation should result in an overflow"); + } catch (IllegalArgumentException ie) { + // do nothing + } + } + + @Test + public void testCompare() { + String unitA = "P"; + long valueA = 1; + String unitB = "p"; + long valueB = 2; + Assert.assertEquals(1, + UnitsConversionUtil.compare(unitA, valueA, unitB, valueB)); + Assert.assertEquals(-1, + UnitsConversionUtil.compare(unitB, valueB, unitA, valueA)); + Assert.assertEquals(0, + UnitsConversionUtil.compare(unitA, valueA, unitA, valueA)); + Assert.assertEquals(-1, + UnitsConversionUtil.compare(unitA, valueA, unitA, valueB)); + Assert.assertEquals(1, + UnitsConversionUtil.compare(unitA, valueB, unitA, valueA)); + + unitB = "T"; + Assert.assertEquals(1, + UnitsConversionUtil.compare(unitA, valueA, unitB, valueB)); + Assert.assertEquals(-1, + UnitsConversionUtil.compare(unitB, valueB, unitA, valueA)); + Assert.assertEquals(0, + UnitsConversionUtil.compare(unitA, valueA, unitB, 1000L)); + + unitA = "p"; + unitB = "n"; + Assert.assertEquals(-1, + UnitsConversionUtil.compare(unitA, valueA, unitB, valueB)); + Assert.assertEquals(1, + UnitsConversionUtil.compare(unitB, valueB, unitA, valueA)); + Assert.assertEquals(0, + UnitsConversionUtil.compare(unitA, 1000L, unitB, valueA)); + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index a02af709116..c167862e58e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -87,6 +87,7 @@ import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.URL; @@ -103,6 +104,7 @@ import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.util.SystemClock; import org.apache.hadoop.yarn.util.TimelineServiceHelper; @@ -231,12 +233,18 @@ @VisibleForTesting protected int numTotalContainers = 1; // Memory to request for the container on which the shell command will run - private long containerMemory = 10; + private static final long DEFAULT_CONTAINER_MEMORY = 10; + private long containerMemory = DEFAULT_CONTAINER_MEMORY; // VirtualCores to request for the container on which the shell command will run - private int containerVirtualCores = 1; + private static final int DEFAULT_CONTAINER_VCORES = 1; + private int containerVirtualCores = DEFAULT_CONTAINER_VCORES; // Priority of the request private int requestPriority; + // Resource profile for the container + private String containerResourceProfile = ""; + Map resourceProfiles; + // Counter for completed containers ( complete denotes successful or failed ) private AtomicInteger numCompletedContainers = new AtomicInteger(); // Allocated container count so that we know how many containers has the RM @@ -407,6 +415,8 @@ public boolean init(String[] args) throws ParseException, IOException { "Amount of memory in MB to be requested to run the shell command"); opts.addOption("container_vcores", true, "Amount of virtual cores to be requested to run the shell command"); + opts.addOption("container_resource_profile", true, + "Resource profile to be requested to run the shell command"); opts.addOption("num_containers", true, "No. of containers on which the shell command needs to be executed"); opts.addOption("priority", true, "Application Priority. Default 0"); @@ -548,9 +558,11 @@ public boolean init(String[] args) throws ParseException, IOException { } containerMemory = Integer.parseInt(cliParser.getOptionValue( - "container_memory", "10")); + "container_memory", "-1")); containerVirtualCores = Integer.parseInt(cliParser.getOptionValue( - "container_vcores", "1")); + "container_vcores", "-1")); + containerResourceProfile = + cliParser.getOptionValue("container_resource_profile", ""); numTotalContainers = Integer.parseInt(cliParser.getOptionValue( "num_containers", "1")); if (numTotalContainers == 0) { @@ -669,6 +681,7 @@ public void run() throws YarnException, IOException, InterruptedException { RegisterApplicationMasterResponse response = amRMClient .registerApplicationMaster(appMasterHostname, appMasterRpcPort, appMasterTrackingUrl); + resourceProfiles = response.getResourceProfiles(); // Dump out information about cluster capability as seen by the // resource manager long maxMem = response.getMaximumResourceCapability().getMemorySize(); @@ -1216,12 +1229,8 @@ private ContainerRequest setupContainerAskForRM() { Priority pri = Priority.newInstance(requestPriority); // Set up resource type requirements - // For now, memory and CPU are supported so we set memory and cpu requirements - Resource capability = Resource.newInstance(containerMemory, - containerVirtualCores); - - ContainerRequest request = new ContainerRequest(capability, null, null, - pri); + ContainerRequest request = + new ContainerRequest(createProfileCapability(), null, null, pri); LOG.info("Requested container ask: " + request.toString()); return request; } @@ -1485,4 +1494,36 @@ public TimelinePutResponse run() throws Exception { } } + private ProfileCapability createProfileCapability() + throws YarnRuntimeException { + if (containerMemory < -1 || containerMemory == 0) { + throw new YarnRuntimeException("Value of AM memory '" + containerMemory + + "' has to be greater than 0"); + } + if (containerVirtualCores < -1 || containerVirtualCores == 0) { + throw new YarnRuntimeException( + "Value of AM vcores '" + containerVirtualCores + + "' has to be greater than 0"); + } + + Resource resourceCapability = + Resource.newInstance(containerMemory, containerVirtualCores); + if (resourceProfiles == null) { + containerMemory = containerMemory == -1 ? DEFAULT_CONTAINER_MEMORY : + containerMemory; + containerVirtualCores = + containerVirtualCores == -1 ? DEFAULT_CONTAINER_VCORES : + containerVirtualCores; + resourceCapability.setMemorySize(containerMemory); + resourceCapability.setVirtualCores(containerVirtualCores); + } + + String profileName = containerResourceProfile; + if ("".equals(containerResourceProfile) && resourceProfiles != null) { + profileName = "default"; + } + ProfileCapability capability = + ProfileCapability.newInstance(profileName, resourceCapability); + return capability; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index eedb5016e4f..1a973049fc7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -66,10 +66,12 @@ import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; @@ -79,8 +81,9 @@ import org.apache.hadoop.yarn.client.api.YarnClientApplication; import org.apache.hadoop.yarn.client.util.YarnClientUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException; import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; /** @@ -119,6 +122,11 @@ public class Client { private static final Log LOG = LogFactory.getLog(Client.class); + + private static final int DEFAULT_AM_MEMORY = 100; + private static final int DEFAULT_AM_VCORES = 1; + private static final int DEFAULT_CONTAINER_MEMORY = 10; + private static final int DEFAULT_CONTAINER_VCORES = 1; // Configuration private Configuration conf; @@ -130,9 +138,12 @@ // Queue for App master private String amQueue = ""; // Amt. of memory resource to request for to run the App Master - private long amMemory = 100; + private long amMemory = DEFAULT_AM_MEMORY; // Amt. of virtual core resource to request for to run the App Master - private int amVCores = 1; + private int amVCores = DEFAULT_AM_VCORES; + + // AM resource profile + private String amResourceProfile = ""; // Application master jar file private String appMasterJar = ""; @@ -151,9 +162,11 @@ private int shellCmdPriority = 0; // Amt of memory to request for container in which shell script will be executed - private int containerMemory = 10; + private long containerMemory = DEFAULT_CONTAINER_MEMORY; // Amt. of virtual cores to request for container in which shell script will be executed - private int containerVirtualCores = 1; + private int containerVirtualCores = DEFAULT_CONTAINER_VCORES; + // container resource profile + private String containerResourceProfile = ""; // No. of containers in which the shell script needs to be executed private int numContainers = 1; private String nodeLabelExpression = null; @@ -256,6 +269,7 @@ public Client(Configuration conf) throws Exception { opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master"); opts.addOption("master_vcores", true, "Amount of virtual cores to be requested to run the application master"); opts.addOption("jar", true, "Jar file containing the application master"); + opts.addOption("master_resource_profile", true, "Resource profile for the application master"); opts.addOption("shell_command", true, "Shell command to be executed by " + "the Application Master. Can only specify either --shell_command " + "or --shell_script"); @@ -269,6 +283,7 @@ public Client(Configuration conf) throws Exception { opts.addOption("shell_cmd_priority", true, "Priority for the shell command containers"); opts.addOption("container_memory", true, "Amount of memory in MB to be requested to run the shell command"); opts.addOption("container_vcores", true, "Amount of virtual cores to be requested to run the shell command"); + opts.addOption("container_resource_profile", true, "Resource profile for the shell command"); opts.addOption("num_containers", true, "No. of containers on which the shell command needs to be executed"); opts.addOption("log_properties", true, "log4j.properties file"); opts.addOption("keep_containers_across_application_attempts", false, @@ -372,17 +387,11 @@ public boolean init(String[] args) throws ParseException { appName = cliParser.getOptionValue("appname", "DistributedShell"); amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0")); amQueue = cliParser.getOptionValue("queue", "default"); - amMemory = Integer.parseInt(cliParser.getOptionValue("master_memory", "100")); - amVCores = Integer.parseInt(cliParser.getOptionValue("master_vcores", "1")); - - if (amMemory < 0) { - throw new IllegalArgumentException("Invalid memory specified for application master, exiting." - + " Specified memory=" + amMemory); - } - if (amVCores < 0) { - throw new IllegalArgumentException("Invalid virtual cores specified for application master, exiting." - + " Specified virtual cores=" + amVCores); - } + amMemory = + Integer.parseInt(cliParser.getOptionValue("master_memory", "-1")); + amVCores = + Integer.parseInt(cliParser.getOptionValue("master_vcores", "-1")); + amResourceProfile = cliParser.getOptionValue("master_resource_profile", ""); if (!cliParser.hasOption("jar")) { throw new IllegalArgumentException("No jar file specified for application master"); @@ -423,17 +432,18 @@ public boolean init(String[] args) throws ParseException { } shellCmdPriority = Integer.parseInt(cliParser.getOptionValue("shell_cmd_priority", "0")); - containerMemory = Integer.parseInt(cliParser.getOptionValue("container_memory", "10")); - containerVirtualCores = Integer.parseInt(cliParser.getOptionValue("container_vcores", "1")); - numContainers = Integer.parseInt(cliParser.getOptionValue("num_containers", "1")); - - - if (containerMemory < 0 || containerVirtualCores < 0 || numContainers < 1) { - throw new IllegalArgumentException("Invalid no. of containers or container memory/vcores specified," - + " exiting." - + " Specified containerMemory=" + containerMemory - + ", containerVirtualCores=" + containerVirtualCores - + ", numContainer=" + numContainers); + containerMemory = + Integer.parseInt(cliParser.getOptionValue("container_memory", "-1")); + containerVirtualCores = + Integer.parseInt(cliParser.getOptionValue("container_vcores", "-1")); + containerResourceProfile = + cliParser.getOptionValue("container_resource_profile", ""); + numContainers = + Integer.parseInt(cliParser.getOptionValue("num_containers", "1")); + + if (numContainers < 1) { + throw new IllegalArgumentException("Invalid no. of containers specified," + + " exiting. Specified numContainer=" + numContainers); } nodeLabelExpression = cliParser.getOptionValue("node_label_expression", null); @@ -540,6 +550,32 @@ public boolean run() throws IOException, YarnException { prepareTimelineDomain(); } + Map profiles; + try { + profiles = yarnClient.getResourceProfiles(); + } catch (YARNFeatureNotEnabledException re) { + profiles = null; + } + + List appProfiles = new ArrayList<>(2); + appProfiles.add(amResourceProfile); + appProfiles.add(containerResourceProfile); + for (String appProfile : appProfiles) { + if (appProfile != null && !appProfile.isEmpty()) { + if (profiles == null) { + String message = "Resource profiles is not enabled"; + LOG.error(message); + throw new IOException(message); + } + if (!profiles.containsKey(appProfile)) { + String message = "Unknown resource profile '" + appProfile + + "'. Valid resource profiles are " + profiles.keySet(); + LOG.error(message); + throw new IOException(message); + } + } + } + // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); @@ -573,6 +609,13 @@ public boolean run() throws IOException, YarnException { ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); + // Set up resource type requirements + // For now, both memory and vcores are supported, so we set memory and + // vcores requirements + setAMResourceCapability(appContext, amMemory, amVCores, amResourceProfile, + amPriority, profiles); + setContainerResources(containerMemory, containerVirtualCores, profiles); + appContext.setKeepContainersAcrossApplicationAttempts(keepContainers); appContext.setApplicationName(appName); @@ -696,8 +739,16 @@ public boolean run() throws IOException, YarnException { // Set class name vargs.add(appMasterMainClass); // Set params for Application Master - vargs.add("--container_memory " + String.valueOf(containerMemory)); - vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); + if (containerMemory > 0) { + vargs.add("--container_memory " + String.valueOf(containerMemory)); + } + if (containerVirtualCores > 0) { + vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); + } + if (containerResourceProfile != null && !containerResourceProfile + .isEmpty()) { + vargs.add("--container_resource_profile " + containerResourceProfile); + } vargs.add("--num_containers " + String.valueOf(numContainers)); if (null != nodeLabelExpression) { appContext.setNodeLabelExpression(nodeLabelExpression); @@ -730,12 +781,6 @@ public boolean run() throws IOException, YarnException { ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance( localResources, env, commands, null, null, null); - // Set up resource type requirements - // For now, both memory and vcores are supported, so we set memory and - // vcores requirements - Resource capability = Resource.newInstance(amMemory, amVCores); - appContext.setResource(capability); - // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); @@ -933,4 +978,65 @@ private void prepareTimelineDomain() { timelineClient.stop(); } } + + private void setAMResourceCapability(ApplicationSubmissionContext appContext, + long memory, int vcores, String profile, int priority, + Map profiles) throws IllegalArgumentException { + if (memory < -1 || memory == 0) { + throw new IllegalArgumentException("Invalid memory specified for" + + " application master, exiting. Specified memory=" + memory); + } + if (vcores < -1 || vcores == 0) { + throw new IllegalArgumentException("Invalid virtual cores specified for" + + " application master, exiting. Specified virtual cores=" + vcores); + } + String tmp = profile; + if (profile.isEmpty()) { + tmp = "default"; + } + if (appContext.getAMContainerResourceRequests() == null) { + List amResourceRequests = new ArrayList(); + amResourceRequests + .add(ResourceRequest.newInstance(Priority.newInstance(priority), "*", + Resources.clone(Resources.none()), 1)); + appContext.setAMContainerResourceRequests(amResourceRequests); + } + + if (appContext.getAMContainerResourceRequests().get(0) + .getProfileCapability() == null) { + appContext.getAMContainerResourceRequests().get(0).setProfileCapability( + ProfileCapability.newInstance(tmp, Resource.newInstance(0, 0))); + } + Resource capability = Resource.newInstance(0, 0); + // set amMemory because it's used to set Xmx param + if (profiles == null) { + amMemory = memory == -1 ? DEFAULT_AM_MEMORY : memory; + amVCores = vcores == -1 ? DEFAULT_AM_VCORES : vcores; + capability.setMemorySize(amMemory); + capability.setVirtualCores(amVCores); + } else { + amMemory = memory == -1 ? profiles.get(tmp).getMemorySize() : memory; + amVCores = vcores == -1 ? profiles.get(tmp).getVirtualCores() : vcores; + capability.setMemorySize(memory); + capability.setVirtualCores(vcores); + } + appContext.getAMContainerResourceRequests().get(0).getProfileCapability() + .setProfileCapabilityOverride(capability); + } + + private void setContainerResources(long memory, int vcores, + Map profiles) throws IllegalArgumentException { + if (memory < -1 || memory == 0) { + throw new IllegalArgumentException( + "Container memory '" + memory + "' has to be greated than 0"); + } + if (vcores < -1 || vcores == 0) { + throw new IllegalArgumentException( + "Container vcores '" + vcores + "' has to be greated than 0"); + } + if (profiles == null) { + containerMemory = memory == -1 ? DEFAULT_CONTAINER_MEMORY : memory; + containerVirtualCores = vcores == -1 ? DEFAULT_CONTAINER_VCORES : vcores; + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java index fc270cb79f2..b541cae9997 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java @@ -1122,6 +1122,7 @@ public void testDSShellWithInvalidArgs() throws Exception { "1" }; client.init(args); + client.run(); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected", @@ -1349,4 +1350,32 @@ private int verifyContainerLog(int containerNum, } return numOfWords; } + + @Test + public void testDistributedShellResourceProfiles() throws Exception { + String[][] args = { + {"--jar", APPMASTER_JAR, "--num_containers", "1", "--shell_command", + Shell.WINDOWS ? "dir" : "ls", "--container_resource_profile", + "maximum" }, + {"--jar", APPMASTER_JAR, "--num_containers", "1", "--shell_command", + Shell.WINDOWS ? "dir" : "ls", "--master_resource_profile", + "default" }, + {"--jar", APPMASTER_JAR, "--num_containers", "1", "--shell_command", + Shell.WINDOWS ? "dir" : "ls", "--master_resource_profile", + "default", "--container_resource_profile", "maximum" } + }; + + for (int i = 0; i < args.length; ++i) { + LOG.info("Initializing DS Client"); + Client client = new Client(new Configuration(yarnCluster.getConfig())); + Assert.assertTrue(client.init(args[i])); + LOG.info("Running DS Client"); + try { + client.run(); + Assert.fail("Client run should throw error"); + } catch (Exception e) { + continue; + } + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml index b83bff83e73..8cbf4c40e40 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml @@ -144,6 +144,7 @@ src/test/resources/application_1440536969523_0001.har/part-0 src/test/resources/application_1440536969523_0001.har/_masterindex src/test/resources/application_1440536969523_0001.har/_SUCCESS + src/test/resources/resource-profiles.json diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java index 60e305f4ff4..674124654e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java @@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.UpdateContainerRequest; import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl; @@ -126,6 +127,7 @@ protected void serviceInit(Configuration conf) throws Exception { private String nodeLabelsExpression; private ExecutionTypeRequest executionTypeRequest = ExecutionTypeRequest.newInstance(); + private String resourceProfile; /** * Instantiates a {@link ContainerRequest} with the given constraints and @@ -172,6 +174,26 @@ public ContainerRequest(Resource capability, String[] nodes, this(capability, nodes, racks, priority, allocationRequestId, true, null, ExecutionTypeRequest.newInstance()); } + /** + * Instantiates a {@link ContainerRequest} with the given constraints and + * locality relaxation enabled. + * + * @param capability + * The {@link ProfileCapability} to be requested for each container. + * @param nodes + * Any hosts to request that the containers are placed on. + * @param racks + * Any racks to request that the containers are placed on. The + * racks corresponding to any hosts requested will be automatically + * added to this list. + * @param priority + * The priority at which to request the containers. Higher + * priorities have lower numerical values. + */ + public ContainerRequest(ProfileCapability capability, String[] nodes, + String[] racks, Priority priority) { + this(capability, nodes, racks, priority, 0, true, null); + } /** * Instantiates a {@link ContainerRequest} with the given constraints. @@ -200,6 +222,29 @@ public ContainerRequest(Resource capability, String[] nodes, * Instantiates a {@link ContainerRequest} with the given constraints. * * @param capability + * The {@link ProfileCapability} to be requested for each container. + * @param nodes + * Any hosts to request that the containers are placed on. + * @param racks + * Any racks to request that the containers are placed on. The + * racks corresponding to any hosts requested will be automatically + * added to this list. + * @param priority + * The priority at which to request the containers. Higher + * priorities have lower numerical values. + * @param relaxLocality + * If true, containers for this request may be assigned on hosts + * and racks other than the ones explicitly requested. + */ + public ContainerRequest(ProfileCapability capability, String[] nodes, + String[] racks, Priority priority, boolean relaxLocality) { + this(capability, nodes, racks, priority, 0, relaxLocality, null); + } + + /** + * Instantiates a {@link ContainerRequest} with the given constraints. + * + * @param capability * The {@link Resource} to be requested for each container. * @param nodes * Any hosts to request that the containers are placed on. @@ -286,6 +331,59 @@ public ContainerRequest(Resource capability, String[] nodes, String[] racks, relaxLocality, nodeLabelsExpression, ExecutionTypeRequest.newInstance()); } + + public ContainerRequest(ProfileCapability capability, String[] nodes, + String[] racks, Priority priority, long allocationRequestId, + boolean relaxLocality, String nodeLabelsExpression) { + this(capability, nodes, racks, priority, allocationRequestId, + relaxLocality, nodeLabelsExpression, + ExecutionTypeRequest.newInstance()); + } + + /** + * Instantiates a {@link ContainerRequest} with the given constraints. + * + * @param capability + * The {@link Resource} to be requested for each container. + * @param nodes + * Any hosts to request that the containers are placed on. + * @param racks + * Any racks to request that the containers are placed on. The + * racks corresponding to any hosts requested will be automatically + * added to this list. + * @param priority + * The priority at which to request the containers. Higher + * priorities have lower numerical values. + * @param allocationRequestId + * The allocationRequestId of the request. To be used as a tracking + * id to match Containers allocated against this request. Will + * default to 0 if not specified. + * @param relaxLocality + * If true, containers for this request may be assigned on hosts + * and racks other than the ones explicitly requested. + * @param nodeLabelsExpression + * Set node labels to allocate resource, now we only support + * asking for only a single node label + * @param executionTypeRequest + * Set the execution type of the container request. + */ + public ContainerRequest(Resource capability, String[] nodes, String[] racks, + Priority priority, long allocationRequestId, boolean relaxLocality, + String nodeLabelsExpression, + ExecutionTypeRequest executionTypeRequest) { + this(capability, nodes, racks, priority, allocationRequestId, + relaxLocality, nodeLabelsExpression, executionTypeRequest, + ProfileCapability.DEFAULT_PROFILE); + } + + public ContainerRequest(ProfileCapability capability, String[] nodes, + String[] racks, Priority priority, long allocationRequestId, + boolean relaxLocality, String nodeLabelsExpression, + ExecutionTypeRequest executionTypeRequest) { + this(capability.getProfileCapabilityOverride(), nodes, racks, priority, + allocationRequestId, relaxLocality, nodeLabelsExpression, + executionTypeRequest, capability.getProfileName()); + } /** * Instantiates a {@link ContainerRequest} with the given constraints. @@ -313,11 +411,13 @@ public ContainerRequest(Resource capability, String[] nodes, String[] racks, * asking for only a single node label * @param executionTypeRequest * Set the execution type of the container request. + * @param profile + * Set the resource profile for the container request */ public ContainerRequest(Resource capability, String[] nodes, String[] racks, Priority priority, long allocationRequestId, boolean relaxLocality, String nodeLabelsExpression, - ExecutionTypeRequest executionTypeRequest) { + ExecutionTypeRequest executionTypeRequest, String profile) { this.allocationRequestId = allocationRequestId; this.capability = capability; this.nodes = (nodes != null ? ImmutableList.copyOf(nodes) : null); @@ -326,6 +426,7 @@ public ContainerRequest(Resource capability, String[] nodes, String[] racks, this.relaxLocality = relaxLocality; this.nodeLabelsExpression = nodeLabelsExpression; this.executionTypeRequest = executionTypeRequest; + this.resourceProfile = profile; sanityCheck(); } @@ -377,6 +478,10 @@ public ExecutionTypeRequest getExecutionTypeRequest() { return executionTypeRequest; } + public String getResourceProfile() { + return resourceProfile; + } + public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Capability[").append(capability).append("]"); @@ -384,6 +489,7 @@ public String toString() { sb.append("AllocationRequestId[").append(allocationRequestId).append("]"); sb.append("ExecutionTypeRequest[").append(executionTypeRequest) .append("]"); + sb.append("Resource Profile[").append(resourceProfile).append("]"); return sb.toString(); } @@ -636,6 +742,15 @@ public abstract void requestContainerUpdate( " AMRMClient is expected to implement this !!"); } + + @InterfaceStability.Evolving + public List> getMatchingRequests( + Priority priority, String resourceName, ExecutionType executionType, + ProfileCapability capability) { + throw new UnsupportedOperationException("The sub-class extending" + + " AMRMClient is expected to implement this !!"); + } + /** * Get outstanding ContainerRequests matching the given * allocationRequestId. These ContainerRequests should have been added via diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java index 8c68a31ffe2..60e7813e422 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java @@ -61,6 +61,8 @@ import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.ReservationDefinition; import org.apache.hadoop.yarn.api.records.ReservationId; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; import org.apache.hadoop.yarn.api.records.SignalContainerCommand; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.YarnApplicationState; @@ -70,6 +72,7 @@ import org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; +import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; @@ -855,4 +858,46 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( throw new UnsupportedOperationException("The sub-class extending " + YarnClient.class.getName() + " is expected to implement this !"); } + + /** + *

+ * Get the resource profiles available in the RM. + *

+ * @return a Map of the resource profile names to their capabilities + * @throws YARNFeatureNotEnabledException if resource-profile is disabled + * @throws YarnException if any error happens inside YARN + * @throws IOException in case of other errors + */ + @Public + @Unstable + public abstract Map getResourceProfiles() + throws YarnException, IOException; + + /** + *

+ * Get the details of a specific resource profile from the RM. + *

+ * @param profile the profile name + * @return resource profile name with its capabilities + * @throws YARNFeatureNotEnabledException if resource-profile is disabled + * @throws YarnException if any error happens inside YARN + * @throws IOException in case of other others + */ + @Public + @Unstable + public abstract Resource getResourceProfile(String profile) + throws YarnException, IOException; + + /** + *

+ * Get available resource types supported by RM. + *

+ * @return list of supported resource types with detailed information + * @throws YarnException if any issue happens inside YARN + * @throws IOException in case of other others + */ + @Public + @Unstable + public abstract List getResourceTypeInfo() + throws YarnException, IOException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index 7a21bc61ab0..a41ab6ad714 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.client.api.impl; import java.io.IOException; -import java.io.Serializable; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -59,6 +58,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; @@ -105,56 +105,56 @@ protected final Set blacklistedNodes = new HashSet(); protected final Set blacklistAdditions = new HashSet(); protected final Set blacklistRemovals = new HashSet(); + + protected Map resourceProfilesMap; static class ResourceRequestInfo { ResourceRequest remoteRequest; LinkedHashSet containerRequests; - + ResourceRequestInfo(Long allocationRequestId, Priority priority, - String resourceName, Resource capability, boolean relaxLocality) { + String resourceName, Resource capability, boolean relaxLocality, + String resourceProfile) { + ProfileCapability profileCapability = ProfileCapability + .newInstance(resourceProfile, capability); remoteRequest = ResourceRequest.newBuilder().priority(priority) .resourceName(resourceName).capability(capability).numContainers(0) - .allocationRequestId(allocationRequestId) - .relaxLocality(relaxLocality).build(); + .allocationRequestId(allocationRequestId).relaxLocality(relaxLocality) + .profileCapability(profileCapability).build(); containerRequests = new LinkedHashSet(); } } /** - * Class compares Resource by memory then cpu in reverse order + * Class compares Resource by memory, then cpu and then the remaining resource + * types in reverse order. */ - static class ResourceReverseMemoryThenCpuComparator implements - Comparator, Serializable { - static final long serialVersionUID = 12345L; - @Override - public int compare(Resource arg0, Resource arg1) { - long mem0 = arg0.getMemorySize(); - long mem1 = arg1.getMemorySize(); - long cpu0 = arg0.getVirtualCores(); - long cpu1 = arg1.getVirtualCores(); - if(mem0 == mem1) { - if(cpu0 == cpu1) { - return 0; - } - if(cpu0 < cpu1) { - return 1; - } - return -1; - } - if(mem0 < mem1) { - return 1; - } - return -1; - } + static class ProfileCapabilityComparator + implements Comparator { + + HashMap resourceProfilesMap; + + public ProfileCapabilityComparator( + HashMap resourceProfileMap) { + this.resourceProfilesMap = resourceProfileMap; + } + + public int compare(T arg0, T arg1) { + Resource resource0 = + ProfileCapability.toResource(arg0, resourceProfilesMap); + Resource resource1 = + ProfileCapability.toResource(arg1, resourceProfilesMap); + return resource1.compareTo(resource0); + } } - static boolean canFit(Resource arg0, Resource arg1) { - long mem0 = arg0.getMemorySize(); - long mem1 = arg1.getMemorySize(); - long cpu0 = arg0.getVirtualCores(); - long cpu1 = arg1.getVirtualCores(); - - return (mem0 <= mem1 && cpu0 <= cpu1); + boolean canFit(ProfileCapability arg0, ProfileCapability arg1) { + Resource resource0 = + ProfileCapability.toResource(arg0, resourceProfilesMap); + Resource resource1 = + ProfileCapability.toResource(arg1, resourceProfilesMap); + return Resources.fitsIn(resource0, resource1); + } private final Map> remoteRequests = @@ -233,6 +233,7 @@ public RegisterApplicationMasterResponse registerApplicationMaster( return registerApplicationMaster(); } + @SuppressWarnings("unchecked") private RegisterApplicationMasterResponse registerApplicationMaster() throws YarnException, IOException { RegisterApplicationMasterRequest request = @@ -245,6 +246,7 @@ private RegisterApplicationMasterResponse registerApplicationMaster() if (!response.getNMTokensFromPreviousAttempts().isEmpty()) { populateNMTokens(response.getNMTokensFromPreviousAttempts()); } + this.resourceProfilesMap = response.getResourceProfiles(); } return response; } @@ -416,13 +418,15 @@ public AllocateResponse allocate(float progressIndicator) for(ResourceRequest r : ask) { // create a copy of ResourceRequest as we might change it while the // RPC layer is using it to send info across - ResourceRequest rr = ResourceRequest.newBuilder() - .priority(r.getPriority()).resourceName(r.getResourceName()) - .capability(r.getCapability()).numContainers(r.getNumContainers()) - .relaxLocality(r.getRelaxLocality()) - .nodeLabelExpression(r.getNodeLabelExpression()) - .executionTypeRequest(r.getExecutionTypeRequest()) - .allocationRequestId(r.getAllocationRequestId()).build(); + ResourceRequest rr = + ResourceRequest.newBuilder().priority(r.getPriority()) + .resourceName(r.getResourceName()).capability(r.getCapability()) + .numContainers(r.getNumContainers()) + .relaxLocality(r.getRelaxLocality()) + .nodeLabelExpression(r.getNodeLabelExpression()) + .executionTypeRequest(r.getExecutionTypeRequest()) + .allocationRequestId(r.getAllocationRequestId()) + .profileCapability(r.getProfileCapability()).build(); askList.add(rr); } return askList; @@ -504,6 +508,8 @@ public void unregisterApplicationMaster(FinalApplicationStatus appStatus, public synchronized void addContainerRequest(T req) { Preconditions.checkArgument(req != null, "Resource request can not be null."); + ProfileCapability profileCapability = ProfileCapability + .newInstance(req.getResourceProfile(), req.getCapability()); Set dedupedRacks = new HashSet(); if (req.getRacks() != null) { dedupedRacks.addAll(req.getRacks()); @@ -516,6 +522,8 @@ public synchronized void addContainerRequest(T req) { Set inferredRacks = resolveRacks(req.getNodes()); inferredRacks.removeAll(dedupedRacks); + checkResourceProfile(req.getResourceProfile()); + // check that specific and non-specific requests cannot be mixed within a // priority checkLocalityRelaxationConflict(req.getAllocationRequestId(), @@ -540,26 +548,26 @@ public synchronized void addContainerRequest(T req) { } for (String node : dedupedNodes) { addResourceRequest(req.getPriority(), node, - req.getExecutionTypeRequest(), req.getCapability(), req, true, + req.getExecutionTypeRequest(), profileCapability, req, true, req.getNodeLabelExpression()); } } for (String rack : dedupedRacks) { addResourceRequest(req.getPriority(), rack, req.getExecutionTypeRequest(), - req.getCapability(), req, true, req.getNodeLabelExpression()); + profileCapability, req, true, req.getNodeLabelExpression()); } // Ensure node requests are accompanied by requests for // corresponding rack for (String rack : inferredRacks) { addResourceRequest(req.getPriority(), rack, req.getExecutionTypeRequest(), - req.getCapability(), req, req.getRelaxLocality(), + profileCapability, req, req.getRelaxLocality(), req.getNodeLabelExpression()); } // Off-switch addResourceRequest(req.getPriority(), ResourceRequest.ANY, - req.getExecutionTypeRequest(), req.getCapability(), req, + req.getExecutionTypeRequest(), profileCapability, req, req.getRelaxLocality(), req.getNodeLabelExpression()); } @@ -567,6 +575,8 @@ public synchronized void addContainerRequest(T req) { public synchronized void removeContainerRequest(T req) { Preconditions.checkArgument(req != null, "Resource request can not be null."); + ProfileCapability profileCapability = ProfileCapability + .newInstance(req.getResourceProfile(), req.getCapability()); Set allRacks = new HashSet(); if (req.getRacks() != null) { allRacks.addAll(req.getRacks()); @@ -577,17 +587,17 @@ public synchronized void removeContainerRequest(T req) { if (req.getNodes() != null) { for (String node : new HashSet(req.getNodes())) { decResourceRequest(req.getPriority(), node, - req.getExecutionTypeRequest(), req.getCapability(), req); + req.getExecutionTypeRequest(), profileCapability, req); } } for (String rack : allRacks) { decResourceRequest(req.getPriority(), rack, - req.getExecutionTypeRequest(), req.getCapability(), req); + req.getExecutionTypeRequest(), profileCapability, req); } decResourceRequest(req.getPriority(), ResourceRequest.ANY, - req.getExecutionTypeRequest(), req.getCapability(), req); + req.getExecutionTypeRequest(), profileCapability, req); } @Override @@ -686,6 +696,17 @@ public synchronized int getClusterNodeCount() { public synchronized List> getMatchingRequests( Priority priority, String resourceName, ExecutionType executionType, Resource capability) { + ProfileCapability profileCapability = + ProfileCapability.newInstance(capability); + return getMatchingRequests(priority, resourceName, executionType, + profileCapability); + } + + @Override + @SuppressWarnings("unchecked") + public synchronized List> getMatchingRequests( + Priority priority, String resourceName, ExecutionType executionType, + ProfileCapability capability) { Preconditions.checkArgument(capability != null, "The Resource to be requested should not be null "); Preconditions.checkArgument(priority != null, @@ -695,22 +716,22 @@ public synchronized int getClusterNodeCount() { RemoteRequestsTable remoteRequestsTable = getTable(0); if (null != remoteRequestsTable) { - List> matchingRequests = - remoteRequestsTable.getMatchingRequests(priority, resourceName, - executionType, capability); + List> matchingRequests = remoteRequestsTable + .getMatchingRequests(priority, resourceName, executionType, + capability); if (null != matchingRequests) { // If no exact match. Container may be larger than what was requested. // get all resources <= capability. map is reverse sorted. for (ResourceRequestInfo resReqInfo : matchingRequests) { - if (canFit(resReqInfo.remoteRequest.getCapability(), capability) && - !resReqInfo.containerRequests.isEmpty()) { + if (canFit(resReqInfo.remoteRequest.getProfileCapability(), + capability) && !resReqInfo.containerRequests.isEmpty()) { list.add(resReqInfo.containerRequests); } } } } // no match found - return list; + return list; } private Set resolveRacks(List nodes) { @@ -758,6 +779,15 @@ private void checkLocalityRelaxationConflict(Long allocationReqId, } } } + + private void checkResourceProfile(String profile) { + if (resourceProfilesMap != null && !resourceProfilesMap.isEmpty() + && !resourceProfilesMap.containsKey(profile)) { + throw new InvalidContainerRequestException( + "Invalid profile name, valid profile names are " + resourceProfilesMap + .keySet()); + } + } /** * Valid if a node label expression specified on container request is valid or @@ -845,12 +875,16 @@ private void addResourceRequestToAsk(ResourceRequest remoteRequest) { } private void addResourceRequest(Priority priority, String resourceName, - ExecutionTypeRequest execTypeReq, Resource capability, T req, + ExecutionTypeRequest execTypeReq, ProfileCapability capability, T req, boolean relaxLocality, String labelExpression) { RemoteRequestsTable remoteRequestsTable = getTable(req.getAllocationRequestId()); if (remoteRequestsTable == null) { remoteRequestsTable = new RemoteRequestsTable(); + if (this.resourceProfilesMap instanceof HashMap) { + remoteRequestsTable.setResourceComparator( + new ProfileCapabilityComparator((HashMap) resourceProfilesMap)); + } putTable(req.getAllocationRequestId(), remoteRequestsTable); } @SuppressWarnings("unchecked") @@ -863,6 +897,7 @@ private void addResourceRequest(Priority priority, String resourceName, addResourceRequestToAsk(resourceRequestInfo.remoteRequest); if (LOG.isDebugEnabled()) { + LOG.debug("Adding request to ask " + resourceRequestInfo.remoteRequest); LOG.debug("addResourceRequest:" + " applicationId=" + " priority=" + priority.getPriority() + " resourceName=" + resourceName + " numContainers=" @@ -872,7 +907,7 @@ private void addResourceRequest(Priority priority, String resourceName, } private void decResourceRequest(Priority priority, String resourceName, - ExecutionTypeRequest execTypeReq, Resource capability, T req) { + ExecutionTypeRequest execTypeReq, ProfileCapability capability, T req) { RemoteRequestsTable remoteRequestsTable = getTable(req.getAllocationRequestId()); if (remoteRequestsTable != null) { @@ -882,7 +917,7 @@ private void decResourceRequest(Priority priority, String resourceName, execTypeReq, capability, req); // send the ResourceRequest to RM even if is 0 because it needs to // override a previously sent value. If ResourceRequest was not sent - // previously then sending 0 ought to be a no-op on RM + // previously then sending 0 aught to be a no-op on RM if (resourceRequestInfo != null) { addResourceRequestToAsk(resourceRequestInfo.remoteRequest); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java index 110ca799436..135e1db2939 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/RemoteRequestsTable.java @@ -23,7 +23,7 @@ import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.api.records.Priority; -import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import java.util.Collection; import java.util.HashMap; @@ -35,43 +35,42 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl.ResourceRequestInfo; -import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl.ResourceReverseMemoryThenCpuComparator; +import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl.ProfileCapabilityComparator; class RemoteRequestsTable implements Iterable{ private static final Log LOG = LogFactory.getLog(RemoteRequestsTable.class); - static ResourceReverseMemoryThenCpuComparator resourceComparator = - new ResourceReverseMemoryThenCpuComparator(); + private ProfileCapabilityComparator resourceComparator; /** * Nested Iterator that iterates over just the ResourceRequestInfo * object. */ class RequestInfoIterator implements Iterator { - private Iterator>>> iLocMap; - private Iterator>> iExecTypeMap; - private Iterator> iCapMap; + private Iterator> iCapMap; private Iterator iResReqInfo; public RequestInfoIterator(Iterator>>> + Map>>> iLocationMap) { this.iLocMap = iLocationMap; if (iLocMap.hasNext()) { iExecTypeMap = iLocMap.next().values().iterator(); } else { iExecTypeMap = - new LinkedList>>().iterator(); } if (iExecTypeMap.hasNext()) { iCapMap = iExecTypeMap.next().values().iterator(); } else { iCapMap = - new LinkedList>() + new LinkedList>() .iterator(); } if (iCapMap.hasNext()) { @@ -113,7 +112,7 @@ public void remove() { // Nest map with Primary key : // Priority -> ResourceName(String) -> ExecutionType -> Capability(Resource) // and value : ResourceRequestInfo - private Map>>> remoteRequestsTable = new HashMap<>(); @Override @@ -122,8 +121,8 @@ public void remove() { } ResourceRequestInfo get(Priority priority, String location, - ExecutionType execType, Resource capability) { - TreeMap capabilityMap = + ExecutionType execType, ProfileCapability capability) { + TreeMap capabilityMap = getCapabilityMap(priority, location, execType); if (capabilityMap == null) { return null; @@ -131,9 +130,10 @@ ResourceRequestInfo get(Priority priority, String location, return capabilityMap.get(capability); } + @SuppressWarnings("unchecked") void put(Priority priority, String resourceName, ExecutionType execType, - Resource capability, ResourceRequestInfo resReqInfo) { - Map>> locationMap = remoteRequestsTable.get(priority); if (locationMap == null) { @@ -143,8 +143,8 @@ void put(Priority priority, String resourceName, ExecutionType execType, LOG.debug("Added priority=" + priority); } } - Map> execTypeMap = - locationMap.get(resourceName); + Map> + execTypeMap = locationMap.get(resourceName); if (execTypeMap == null) { execTypeMap = new HashMap<>(); locationMap.put(resourceName, execTypeMap); @@ -152,9 +152,14 @@ void put(Priority priority, String resourceName, ExecutionType execType, LOG.debug("Added resourceName=" + resourceName); } } - TreeMap capabilityMap = + TreeMap capabilityMap = execTypeMap.get(execType); if (capabilityMap == null) { + // this can happen if the user doesn't register with the RM before + // calling addResourceRequest + if (resourceComparator == null) { + resourceComparator = new ProfileCapabilityComparator(new HashMap<>()); + } capabilityMap = new TreeMap<>(resourceComparator); execTypeMap.put(execType, capabilityMap); if (LOG.isDebugEnabled()) { @@ -165,9 +170,9 @@ void put(Priority priority, String resourceName, ExecutionType execType, } ResourceRequestInfo remove(Priority priority, String resourceName, - ExecutionType execType, Resource capability) { + ExecutionType execType, ProfileCapability capability) { ResourceRequestInfo retVal = null; - Map>> locationMap = remoteRequestsTable.get(priority); if (locationMap == null) { if (LOG.isDebugEnabled()) { @@ -175,7 +180,7 @@ ResourceRequestInfo remove(Priority priority, String resourceName, } return null; } - Map> + Map> execTypeMap = locationMap.get(resourceName); if (execTypeMap == null) { if (LOG.isDebugEnabled()) { @@ -183,7 +188,7 @@ ResourceRequestInfo remove(Priority priority, String resourceName, } return null; } - TreeMap capabilityMap = + TreeMap capabilityMap = execTypeMap.get(execType); if (capabilityMap == null) { if (LOG.isDebugEnabled()) { @@ -204,14 +209,14 @@ ResourceRequestInfo remove(Priority priority, String resourceName, return retVal; } - Map>> getLocationMap(Priority priority) { return remoteRequestsTable.get(priority); } - Map> + Map> getExecutionTypeMap(Priority priority, String location) { - Map>> locationMap = getLocationMap(priority); if (locationMap == null) { return null; @@ -219,10 +224,10 @@ ResourceRequestInfo remove(Priority priority, String resourceName, return locationMap.get(location); } - TreeMap getCapabilityMap(Priority + TreeMap getCapabilityMap(Priority priority, String location, ExecutionType execType) { - Map> + Map> executionTypeMap = getExecutionTypeMap(priority, location); if (executionTypeMap == null) { return null; @@ -236,7 +241,7 @@ ResourceRequestInfo remove(Priority priority, String resourceName, List retList = new LinkedList<>(); for (String location : locations) { for (ExecutionType eType : ExecutionType.values()) { - TreeMap capabilityMap = + TreeMap capabilityMap = getCapabilityMap(priority, location, eType); if (capabilityMap != null) { retList.addAll(capabilityMap.values()); @@ -248,9 +253,9 @@ ResourceRequestInfo remove(Priority priority, String resourceName, List getMatchingRequests( Priority priority, String resourceName, ExecutionType executionType, - Resource capability) { + ProfileCapability capability) { List list = new LinkedList<>(); - TreeMap capabilityMap = + TreeMap capabilityMap = getCapabilityMap(priority, resourceName, executionType); if (capabilityMap != null) { ResourceRequestInfo resourceRequestInfo = capabilityMap.get(capability); @@ -266,14 +271,15 @@ ResourceRequestInfo remove(Priority priority, String resourceName, @SuppressWarnings("unchecked") ResourceRequestInfo addResourceRequest(Long allocationRequestId, Priority priority, String resourceName, ExecutionTypeRequest execTypeReq, - Resource capability, T req, boolean relaxLocality, + ProfileCapability capability, T req, boolean relaxLocality, String labelExpression) { - ResourceRequestInfo resourceRequestInfo = get(priority, resourceName, - execTypeReq.getExecutionType(), capability); + ResourceRequestInfo resourceRequestInfo = + get(priority, resourceName, execTypeReq.getExecutionType(), capability); if (resourceRequestInfo == null) { resourceRequestInfo = new ResourceRequestInfo(allocationRequestId, priority, resourceName, - capability, relaxLocality); + capability.getProfileCapabilityOverride(), relaxLocality, + capability.getProfileName()); put(priority, resourceName, execTypeReq.getExecutionType(), capability, resourceRequestInfo); } @@ -288,11 +294,14 @@ ResourceRequestInfo addResourceRequest(Long allocationRequestId, if (ResourceRequest.ANY.equals(resourceName)) { resourceRequestInfo.remoteRequest.setNodeLabelExpression(labelExpression); } + if (LOG.isDebugEnabled()) { + LOG.debug("Adding request to ask " + resourceRequestInfo.remoteRequest); + } return resourceRequestInfo; } ResourceRequestInfo decResourceRequest(Priority priority, String resourceName, - ExecutionTypeRequest execTypeReq, Resource capability, T req) { + ExecutionTypeRequest execTypeReq, ProfileCapability capability, T req) { ResourceRequestInfo resourceRequestInfo = get(priority, resourceName, execTypeReq.getExecutionType(), capability); @@ -330,4 +339,34 @@ boolean isEmpty() { return remoteRequestsTable.isEmpty(); } + @SuppressWarnings("unchecked") + public void setResourceComparator(ProfileCapabilityComparator comparator) { + ProfileCapabilityComparator old = this.resourceComparator; + this.resourceComparator = comparator; + if (old != null) { + // we've already set a resource comparator - re-create the maps with the + // new one. this is needed in case someone adds container requests before + // registering with the RM. In such a case, the comparator won't have + // the resource profiles map. After registration, the map is available + // so re-create the capabilities maps + + for (Map.Entry>>> + priEntry : remoteRequestsTable.entrySet()) { + for (Map.Entry>> nameEntry : priEntry.getValue().entrySet()) { + for (Map.Entry> execEntry : nameEntry + .getValue().entrySet()) { + Map capabilityMap = + execEntry.getValue(); + TreeMap newCapabiltyMap = + new TreeMap<>(resourceComparator); + newCapabiltyMap.putAll(capabilityMap); + execEntry.setValue(newCapabiltyMap); + } + } + } + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java index 19cb10b1d11..3c30023f5b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java @@ -43,6 +43,8 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; @@ -70,6 +72,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; @@ -101,6 +104,8 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; import org.apache.hadoop.yarn.api.records.SignalContainerCommand; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.YarnApplicationState; @@ -937,4 +942,28 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( throws YarnException, IOException { return rmClient.updateApplicationTimeouts(request); } + + @Override + public Map getResourceProfiles() + throws YarnException, IOException { + GetAllResourceProfilesRequest request = + GetAllResourceProfilesRequest.newInstance(); + return rmClient.getResourceProfiles(request).getResourceProfiles(); + } + + @Override + public Resource getResourceProfile(String profile) + throws YarnException, IOException { + GetResourceProfileRequest request = GetResourceProfileRequest + .newInstance(profile); + return rmClient.getResourceProfile(request).getResource(); + } + + @Override + public List getResourceTypeInfo() + throws YarnException, IOException { + GetAllResourceTypeInfoRequest request = + GetAllResourceTypeInfoRequest.newInstance(); + return rmClient.getResourceTypeInfo(request).getResourceTypeInfo(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index 893348ac246..e9a1d3fdbec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -61,6 +61,8 @@ import com.google.common.annotations.VisibleForTesting; +import static org.apache.hadoop.yarn.util.StringHelper.getResourceSecondsString; + @Private @Unstable public class ApplicationCLI extends YarnCLI { @@ -699,24 +701,9 @@ private int printApplicationReport(String applicationId) appReportStr.println(appReport.getRpcPort()); appReportStr.print("\tAM Host : "); appReportStr.println(appReport.getHost()); - appReportStr.print("\tAggregate Resource Allocation : "); - ApplicationResourceUsageReport usageReport = appReport.getApplicationResourceUsageReport(); - if (usageReport != null) { - //completed app report in the timeline server doesn't have usage report - appReportStr.print(usageReport.getMemorySeconds() + " MB-seconds, "); - appReportStr.println(usageReport.getVcoreSeconds() + " vcore-seconds"); - appReportStr.print("\tAggregate Resource Preempted : "); - appReportStr.print(usageReport.getPreemptedMemorySeconds() + - " MB-seconds, "); - appReportStr.println(usageReport.getPreemptedVcoreSeconds() + - " vcore-seconds"); - } else { - appReportStr.println("N/A"); - appReportStr.print("\tAggregate Resource Preempted : "); - appReportStr.println("N/A"); - } + printResourceUsage(appReportStr, usageReport); appReportStr.print("\tLog Aggregation Status : "); appReportStr.println(appReport.getLogAggregationStatus() == null ? "N/A" : appReport.getLogAggregationStatus()); @@ -747,6 +734,22 @@ private int printApplicationReport(String applicationId) return 0; } + private void printResourceUsage(PrintWriter appReportStr, + ApplicationResourceUsageReport usageReport) { + appReportStr.print("\tAggregate Resource Allocation : "); + if (usageReport != null) { + appReportStr.println( + getResourceSecondsString(usageReport.getResourceSecondsMap())); + appReportStr.print("\tAggregate Resource Preempted : "); + appReportStr.println(getResourceSecondsString( + usageReport.getPreemptedResourceSecondsMap())); + } else { + appReportStr.println("N/A"); + appReportStr.print("\tAggregate Resource Preempted : "); + appReportStr.println("N/A"); + } + } + private String getAllValidApplicationStates() { StringBuilder sb = new StringBuilder(); sb.append("The valid application state can be" + " one of the following: "); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java index 09b12f2c485..037e40af356 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java @@ -130,11 +130,13 @@ public TestAMRMClient(String schedulerName) { @Before public void setup() throws Exception { conf = new YarnConfiguration(); - createClusterAndStartApplication(); + createClusterAndStartApplication(conf); } - private void createClusterAndStartApplication() throws Exception { + private void createClusterAndStartApplication(Configuration conf) + throws Exception { // start minicluster + this.conf = conf; conf.set(YarnConfiguration.RM_SCHEDULER, schedulerName); conf.setLong( YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, @@ -532,11 +534,12 @@ private void verifyMatches( List> matches, int matchSize) { assertEquals(1, matches.size()); - assertEquals(matches.get(0).size(), matchSize); + assertEquals(matchSize, matches.get(0).size()); } @Test (timeout=60000) - public void testAMRMClientMatchingFitInferredRack() throws YarnException, IOException { + public void testAMRMClientMatchingFitInferredRack() + throws YarnException, IOException { AMRMClientImpl amClient = null; try { // start am rm client @@ -544,10 +547,10 @@ public void testAMRMClientMatchingFitInferredRack() throws YarnException, IOExce amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host", 10000, ""); - + Resource capability = Resource.newInstance(1024, 2); - ContainerRequest storedContainer1 = + ContainerRequest storedContainer1 = new ContainerRequest(capability, nodes, null, priority); amClient.addContainerRequest(storedContainer1); @@ -564,14 +567,15 @@ public void testAMRMClientMatchingFitInferredRack() throws YarnException, IOExce verifyMatches(matches, 1); storedRequest = matches.get(0).iterator().next(); assertEquals(storedContainer1, storedRequest); - + // inferred rack match no longer valid after request is removed amClient.removeContainerRequest(storedContainer1); matches = amClient.getMatchingRequests(priority, rack, capability); assertTrue(matches.isEmpty()); - - amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, - null, null); + + amClient + .unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, + null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { @@ -604,16 +608,19 @@ public void testAMRMClientMatchStorage() throws YarnException, IOException { amClient.addContainerRequest(storedContainer1); amClient.addContainerRequest(storedContainer2); amClient.addContainerRequest(storedContainer3); + + ProfileCapability profileCapability = + ProfileCapability.newInstance(capability); // test addition and storage RemoteRequestsTable remoteRequestsTable = amClient.getTable(0); int containersRequestedAny = remoteRequestsTable.get(priority, - ResourceRequest.ANY, ExecutionType.GUARANTEED, capability) + ResourceRequest.ANY, ExecutionType.GUARANTEED, profileCapability) .remoteRequest.getNumContainers(); assertEquals(2, containersRequestedAny); containersRequestedAny = remoteRequestsTable.get(priority1, - ResourceRequest.ANY, ExecutionType.GUARANTEED, capability) + ResourceRequest.ANY, ExecutionType.GUARANTEED, profileCapability) .remoteRequest.getNumContainers(); assertEquals(1, containersRequestedAny); List> matches = @@ -884,7 +891,7 @@ public void testAMRMClientWithSaslEncryption() throws Exception { teardown(); conf = new YarnConfiguration(); conf.set(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION, "privacy"); - createClusterAndStartApplication(); + createClusterAndStartApplication(conf); initAMRMClientAndTest(false); } @@ -1183,9 +1190,11 @@ public void testAMRMClientWithContainerPromotion() true, null, ExecutionTypeRequest .newInstance(ExecutionType.OPPORTUNISTIC, true))); + ProfileCapability profileCapability = + ProfileCapability.newInstance(capability); int oppContainersRequestedAny = amClient.getTable(0).get(priority2, ResourceRequest.ANY, - ExecutionType.OPPORTUNISTIC, capability).remoteRequest + ExecutionType.OPPORTUNISTIC, profileCapability).remoteRequest .getNumContainers(); assertEquals(1, oppContainersRequestedAny); @@ -1322,9 +1331,11 @@ public void testAMRMClientWithContainerDemotion() true, null, ExecutionTypeRequest .newInstance(ExecutionType.GUARANTEED, true))); + ProfileCapability profileCapability = + ProfileCapability.newInstance(capability); int oppContainersRequestedAny = amClient.getTable(0).get(priority2, ResourceRequest.ANY, - ExecutionType.GUARANTEED, capability).remoteRequest + ExecutionType.GUARANTEED, profileCapability).remoteRequest .getNumContainers(); assertEquals(1, oppContainersRequestedAny); @@ -1701,14 +1712,16 @@ private void assertNumContainers(AMRMClientImpl amClient, int expAsks, int expRelease) { RemoteRequestsTable remoteRequestsTable = amClient.getTable(allocationReqId); + ProfileCapability profileCapability = + ProfileCapability.newInstance(capability); int containersRequestedNode = remoteRequestsTable.get(priority, - node, ExecutionType.GUARANTEED, capability).remoteRequest + node, ExecutionType.GUARANTEED, profileCapability).remoteRequest .getNumContainers(); int containersRequestedRack = remoteRequestsTable.get(priority, - rack, ExecutionType.GUARANTEED, capability).remoteRequest + rack, ExecutionType.GUARANTEED, profileCapability).remoteRequest .getNumContainers(); int containersRequestedAny = remoteRequestsTable.get(priority, - ResourceRequest.ANY, ExecutionType.GUARANTEED, capability) + ResourceRequest.ANY, ExecutionType.GUARANTEED, profileCapability) .remoteRequest.getNumContainers(); assertEquals(expNode, containersRequestedNode); @@ -1906,4 +1919,106 @@ public ApplicationMasterProtocol run() { } return result; } + + @Test(timeout = 60000) + public void testGetMatchingFitWithProfiles() throws Exception { + teardown(); + conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, true); + createClusterAndStartApplication(conf); + AMRMClient amClient = null; + try { + // start am rm client + amClient = AMRMClient.createAMRMClient(); + amClient.init(conf); + amClient.start(); + amClient.registerApplicationMaster("Host", 10000, ""); + + ProfileCapability capability1 = ProfileCapability.newInstance("minimum"); + ProfileCapability capability2 = ProfileCapability.newInstance("default"); + ProfileCapability capability3 = ProfileCapability.newInstance("maximum"); + ProfileCapability capability4 = ProfileCapability + .newInstance("minimum", Resource.newInstance(2048, 1)); + ProfileCapability capability5 = ProfileCapability.newInstance("default"); + ProfileCapability capability6 = ProfileCapability + .newInstance("default", Resource.newInstance(2048, 1)); + // http has the same capabilities as default + ProfileCapability capability7 = ProfileCapability.newInstance("http"); + + ContainerRequest storedContainer1 = + new ContainerRequest(capability1, nodes, racks, priority); + ContainerRequest storedContainer2 = + new ContainerRequest(capability2, nodes, racks, priority); + ContainerRequest storedContainer3 = + new ContainerRequest(capability3, nodes, racks, priority); + ContainerRequest storedContainer4 = + new ContainerRequest(capability4, nodes, racks, priority); + ContainerRequest storedContainer5 = + new ContainerRequest(capability5, nodes, racks, priority2); + ContainerRequest storedContainer6 = + new ContainerRequest(capability6, nodes, racks, priority); + ContainerRequest storedContainer7 = + new ContainerRequest(capability7, nodes, racks, priority); + + + amClient.addContainerRequest(storedContainer1); + amClient.addContainerRequest(storedContainer2); + amClient.addContainerRequest(storedContainer3); + amClient.addContainerRequest(storedContainer4); + amClient.addContainerRequest(storedContainer5); + amClient.addContainerRequest(storedContainer6); + amClient.addContainerRequest(storedContainer7); + + // test matching of containers + List> matches; + ContainerRequest storedRequest; + // exact match + ProfileCapability testCapability1 = + ProfileCapability.newInstance("minimum"); + matches = amClient + .getMatchingRequests(priority, node, ExecutionType.GUARANTEED, + testCapability1); + verifyMatches(matches, 1); + storedRequest = matches.get(0).iterator().next(); + assertEquals(storedContainer1, storedRequest); + amClient.removeContainerRequest(storedContainer1); + + // exact matching with order maintained + // we should get back 3 matches - default + http because they have the + // same capability + ProfileCapability testCapability2 = + ProfileCapability.newInstance("default"); + matches = amClient + .getMatchingRequests(priority, node, ExecutionType.GUARANTEED, + testCapability2); + verifyMatches(matches, 2); + // must be returned in the order they were made + int i = 0; + for (ContainerRequest storedRequest1 : matches.get(0)) { + switch(i) { + case 0: + assertEquals(storedContainer2, storedRequest1); + break; + case 1: + assertEquals(storedContainer7, storedRequest1); + break; + } + i++; + } + amClient.removeContainerRequest(storedContainer5); + + // matching with larger container. all requests returned + Resource testCapability3 = Resource.newInstance(8192, 8); + matches = amClient + .getMatchingRequests(priority, node, testCapability3); + assertEquals(3, matches.size()); + + Resource testCapability4 = Resource.newInstance(2048, 1); + matches = amClient.getMatchingRequests(priority, node, testCapability4); + assertEquals(1, matches.size()); + } finally { + if (amClient != null && amClient.getServiceState() == STATE.STARTED) { + amClient.stop(); + } + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java index 96035394ec7..c87123ad38a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java @@ -29,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.client.api.AMRMClient; @@ -276,9 +277,10 @@ private void verifyResourceRequest( AMRMClientImpl client, ContainerRequest request, String location, boolean expectedRelaxLocality, ExecutionType executionType) { - ResourceRequest ask = client.getTable(0) - .get(request.getPriority(), location, executionType, - request.getCapability()).remoteRequest; + ProfileCapability profileCapability = ProfileCapability + .newInstance(request.getResourceProfile(), request.getCapability()); + ResourceRequest ask = client.getTable(0).get(request.getPriority(), + location, executionType, profileCapability).remoteRequest; assertEquals(location, ask.getResourceName()); assertEquals(1, ask.getNumContainers()); assertEquals(expectedRelaxLocality, ask.getRelaxLocality()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java index e180f6dc29a..00f5e03a9cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java @@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.Token; @@ -387,18 +388,21 @@ public void testAMRMClient() throws Exception { RemoteRequestsTable remoteRequestsTable = amClient.getTable(0); + ProfileCapability profileCapability = + ProfileCapability.newInstance(capability); + int containersRequestedNode = remoteRequestsTable.get(priority, - node, ExecutionType.GUARANTEED, capability).remoteRequest + node, ExecutionType.GUARANTEED, profileCapability).remoteRequest .getNumContainers(); int containersRequestedRack = remoteRequestsTable.get(priority, - rack, ExecutionType.GUARANTEED, capability).remoteRequest + rack, ExecutionType.GUARANTEED, profileCapability).remoteRequest .getNumContainers(); int containersRequestedAny = remoteRequestsTable.get(priority, - ResourceRequest.ANY, ExecutionType.GUARANTEED, capability) + ResourceRequest.ANY, ExecutionType.GUARANTEED, profileCapability) .remoteRequest.getNumContainers(); int oppContainersRequestedAny = remoteRequestsTable.get(priority2, ResourceRequest.ANY, - ExecutionType.OPPORTUNISTIC, capability).remoteRequest + ExecutionType.OPPORTUNISTIC, profileCapability).remoteRequest .getNumContainers(); assertEquals(2, containersRequestedNode); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java index 9b79e2d6faf..ddabd17b835 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java @@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.YarnApplicationState; @@ -255,9 +256,11 @@ public void testNMClient() racks, priority)); } + ProfileCapability profileCapability = + ProfileCapability.newInstance(capability); int containersRequestedAny = rmClient.getTable(0) .get(priority, ResourceRequest.ANY, ExecutionType.GUARANTEED, - capability).remoteRequest.getNumContainers(); + profileCapability).remoteRequest.getNumContainers(); // RM should allocate container within 2 calls to allocate() int allocatedContainerCount = 0; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java index 305d18b6525..12c32fc7dac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java @@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.Token; @@ -99,6 +100,7 @@ private static final long AM_EXPIRE_MS = 4000; private static Resource capability; + private static ProfileCapability profileCapability; private static Priority priority; private static Priority priority2; private static Priority priority3; @@ -151,6 +153,7 @@ public static void setup() throws Exception { priority3 = Priority.newInstance(3); priority4 = Priority.newInstance(4); capability = Resource.newInstance(512, 1); + profileCapability = ProfileCapability.newInstance(capability); node = nodeReports.get(0).getNodeId().getHost(); rack = nodeReports.get(0).getRackName(); @@ -273,7 +276,7 @@ public void testPromotionFromAcquired() throws YarnException, IOException { int oppContainersRequestedAny = amClient.getTable(0).get(priority2, ResourceRequest.ANY, - ExecutionType.OPPORTUNISTIC, capability).remoteRequest + ExecutionType.OPPORTUNISTIC, profileCapability).remoteRequest .getNumContainers(); assertEquals(1, oppContainersRequestedAny); @@ -394,7 +397,7 @@ public void testDemotionFromAcquired() throws YarnException, IOException { new AMRMClient.ContainerRequest(capability, null, null, priority3)); int guarContainersRequestedAny = amClient.getTable(0).get(priority3, - ResourceRequest.ANY, ExecutionType.GUARANTEED, capability) + ResourceRequest.ANY, ExecutionType.GUARANTEED, profileCapability) .remoteRequest.getNumContainers(); assertEquals(1, guarContainersRequestedAny); @@ -512,6 +515,7 @@ public void testMixedAllocationAndRelease() throws YarnException, assertEquals(0, amClient.ask.size()); assertEquals(0, amClient.release.size()); + amClient.addContainerRequest( new AMRMClient.ContainerRequest(capability, nodes, racks, priority)); amClient.addContainerRequest( @@ -532,17 +536,17 @@ public void testMixedAllocationAndRelease() throws YarnException, ExecutionType.OPPORTUNISTIC, true))); int containersRequestedNode = amClient.getTable(0).get(priority, - node, ExecutionType.GUARANTEED, capability).remoteRequest + node, ExecutionType.GUARANTEED, profileCapability).remoteRequest .getNumContainers(); int containersRequestedRack = amClient.getTable(0).get(priority, - rack, ExecutionType.GUARANTEED, capability).remoteRequest + rack, ExecutionType.GUARANTEED, profileCapability).remoteRequest .getNumContainers(); int containersRequestedAny = amClient.getTable(0).get(priority, - ResourceRequest.ANY, ExecutionType.GUARANTEED, capability) + ResourceRequest.ANY, ExecutionType.GUARANTEED, profileCapability) .remoteRequest.getNumContainers(); int oppContainersRequestedAny = amClient.getTable(0).get(priority2, ResourceRequest.ANY, - ExecutionType.OPPORTUNISTIC, capability).remoteRequest + ExecutionType.OPPORTUNISTIC, profileCapability).remoteRequest .getNumContainers(); assertEquals(4, containersRequestedNode); @@ -564,17 +568,17 @@ public void testMixedAllocationAndRelease() throws YarnException, ExecutionType.OPPORTUNISTIC, true))); containersRequestedNode = amClient.getTable(0).get(priority, - node, ExecutionType.GUARANTEED, capability).remoteRequest + node, ExecutionType.GUARANTEED, profileCapability).remoteRequest .getNumContainers(); containersRequestedRack = amClient.getTable(0).get(priority, - rack, ExecutionType.GUARANTEED, capability).remoteRequest + rack, ExecutionType.GUARANTEED, profileCapability).remoteRequest .getNumContainers(); containersRequestedAny = amClient.getTable(0).get(priority, - ResourceRequest.ANY, ExecutionType.GUARANTEED, capability) + ResourceRequest.ANY, ExecutionType.GUARANTEED, profileCapability) .remoteRequest.getNumContainers(); oppContainersRequestedAny = amClient.getTable(0).get(priority2, ResourceRequest.ANY, - ExecutionType.OPPORTUNISTIC, capability).remoteRequest + ExecutionType.OPPORTUNISTIC, profileCapability).remoteRequest .getNumContainers(); assertEquals(2, containersRequestedNode); @@ -691,10 +695,9 @@ public void testOpportunisticAllocation() throws YarnException, IOException { ExecutionTypeRequest.newInstance( ExecutionType.OPPORTUNISTIC, true))); - int oppContainersRequestedAny = - amClient.getTable(0).get(priority3, ResourceRequest.ANY, - ExecutionType.OPPORTUNISTIC, capability).remoteRequest - .getNumContainers(); + int oppContainersRequestedAny = amClient.getTable(0) + .get(priority3, ResourceRequest.ANY, ExecutionType.OPPORTUNISTIC, + profileCapability).remoteRequest.getNumContainers(); assertEquals(2, oppContainersRequestedAny); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index 3c35b9cd313..9870877c993 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -39,8 +39,10 @@ import java.util.Collections; import java.util.Date; import java.util.EnumSet; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.regex.Pattern; @@ -68,6 +70,7 @@ import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceUtilization; import org.apache.hadoop.yarn.api.records.SignalContainerCommand; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; @@ -117,9 +120,18 @@ public void testGetApplicationReport() throws Exception { for (int i = 0; i < 2; ++i) { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + Map resourceSecondsMap = new HashMap<>(); + Map preemptedResoureSecondsMap = new HashMap<>(); + resourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), 123456L); + resourceSecondsMap.put(ResourceInformation.VCORES.getName(), 4567L); + preemptedResoureSecondsMap + .put(ResourceInformation.MEMORY_MB.getName(), 1111L); + preemptedResoureSecondsMap + .put(ResourceInformation.VCORES.getName(), 2222L); ApplicationResourceUsageReport usageReport = i == 0 ? null : - ApplicationResourceUsageReport.newInstance( - 2, 0, null, null, null, 123456, 4567, 0, 0, 1111, 2222); + ApplicationResourceUsageReport + .newInstance(2, 0, null, null, null, resourceSecondsMap, 0, 0, + preemptedResoureSecondsMap); ApplicationReport newApplicationReport = ApplicationReport.newInstance( applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/resource-profiles.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/resource-profiles.json new file mode 100644 index 00000000000..d0f3f7268e4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/resource-profiles.json @@ -0,0 +1,18 @@ +{ + "minimum": { + "memory-mb" : 1024, + "vcores" : 1 + }, + "default" : { + "memory-mb" : 2048, + "vcores" : 2 + }, + "maximum" : { + "memory-mb": 4096, + "vcores" : 4 + }, + "http" : { + "memory-mb" : 2048, + "vcores" : 2 + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index f17cf8c500b..6b547d6464e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -209,6 +209,14 @@ true + + + ${project.basedir}/src/test/resources + + + ${project.basedir}/src/test/resources/resource-types + + org.apache.rat diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java index cef03b9b052..73c49906c37 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java @@ -51,7 +51,8 @@ public synchronized InputStream getConfigurationInputStream( "Illegal argument! The parameter should not be null or empty"); } Path filePath; - if (YarnConfiguration.RM_CONFIGURATION_FILES.contains(name)) { + if (YarnConfiguration.RM_CONFIGURATION_FILES.contains(name) || + YarnConfiguration.NM_CONFIGURATION_FILES.contains(name)) { filePath = new Path(this.configDir, name); if (!fs.exists(filePath)) { LOG.info(filePath + " not found"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/LocalConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/LocalConfigurationProvider.java index cfa194fb5b2..0cdbd1516d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/LocalConfigurationProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/LocalConfigurationProvider.java @@ -39,7 +39,8 @@ public InputStream getConfigurationInputStream(Configuration bootstrapConf, if (name == null || name.isEmpty()) { throw new YarnException( "Illegal argument! The parameter should not be null or empty"); - } else if (YarnConfiguration.RM_CONFIGURATION_FILES.contains(name)) { + } else if (YarnConfiguration.RM_CONFIGURATION_FILES.contains(name) || + YarnConfiguration.NM_CONFIGURATION_FILES.contains(name)) { return bootstrapConf.getConfResourceAsInputStream(name); } return new FileInputStream(name); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java index ad7cb296080..fd5096a7b37 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java @@ -89,6 +89,12 @@ import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FailApplicationAttemptRequestPBImpl; @@ -147,6 +153,12 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SignalContainerResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceProfilesRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceProfilesResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetResourceProfileRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetResourceProfileResponsePBImpl; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.proto.YarnServiceProtos; @@ -619,4 +631,46 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( return null; } } + + @Override + public GetAllResourceProfilesResponse getResourceProfiles( + GetAllResourceProfilesRequest request) throws YarnException, IOException { + YarnServiceProtos.GetAllResourceProfilesRequestProto requestProto = + ((GetAllResourceProfilesRequestPBImpl) request).getProto(); + try { + return new GetAllResourceProfilesResponsePBImpl( + proxy.getResourceProfiles(null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetResourceProfileResponse getResourceProfile( + GetResourceProfileRequest request) throws YarnException, IOException { + YarnServiceProtos.GetResourceProfileRequestProto requestProto = + ((GetResourceProfileRequestPBImpl) request).getProto(); + try { + return new GetResourceProfileResponsePBImpl( + proxy.getResourceProfile(null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public GetAllResourceTypeInfoResponse getResourceTypeInfo( + GetAllResourceTypeInfoRequest request) throws YarnException, IOException { + YarnServiceProtos.GetAllResourceTypeInfoRequestProto requestProto = + ((GetAllResourceTypeInfoRequestPBImpl) request).getProto(); + try { + return new GetAllResourceTypeInfoResponsePBImpl( + proxy.getResourceTypeInfo(null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java index 93ce6a343c5..423287e9105 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java @@ -58,6 +58,9 @@ import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FailApplicationAttemptRequestPBImpl; @@ -116,6 +119,12 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.UpdateApplicationTimeoutsResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceProfilesRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceProfilesResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetResourceProfileRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetResourceProfileResponsePBImpl; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.proto.YarnServiceProtos; import org.apache.hadoop.yarn.proto.YarnServiceProtos.FailApplicationAttemptRequestProto; @@ -169,6 +178,12 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.UpdateApplicationTimeoutsResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceProfilesResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceTypeInfoRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceTypeInfoResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceProfilesRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileResponseProto; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -631,4 +646,52 @@ public UpdateApplicationTimeoutsResponseProto updateApplicationTimeouts( throw new ServiceException(e); } } + + @Override + public GetAllResourceProfilesResponseProto getResourceProfiles( + RpcController controller, GetAllResourceProfilesRequestProto proto) + throws ServiceException { + GetAllResourceProfilesRequestPBImpl req = + new GetAllResourceProfilesRequestPBImpl(proto); + try { + GetAllResourceProfilesResponse resp = real.getResourceProfiles(req); + return ((GetAllResourceProfilesResponsePBImpl) resp).getProto(); + } catch (YarnException ye) { + throw new ServiceException(ye); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + @Override + public GetResourceProfileResponseProto getResourceProfile( + RpcController controller, GetResourceProfileRequestProto proto) + throws ServiceException { + GetResourceProfileRequestPBImpl req = + new GetResourceProfileRequestPBImpl(proto); + try { + GetResourceProfileResponse resp = real.getResourceProfile(req); + return ((GetResourceProfileResponsePBImpl) resp).getProto(); + } catch (YarnException ye) { + throw new ServiceException(ye); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } + + @Override + public GetAllResourceTypeInfoResponseProto getResourceTypeInfo( + RpcController controller, GetAllResourceTypeInfoRequestProto proto) + throws ServiceException { + GetAllResourceTypeInfoRequestPBImpl req = new GetAllResourceTypeInfoRequestPBImpl( + proto); + try { + GetAllResourceTypeInfoResponse resp = real.getResourceTypeInfo(req); + return ((GetAllResourceTypeInfoResponsePBImpl) resp).getProto(); + } catch (YarnException ye) { + throw new ServiceException(ye); + } catch (IOException ie) { + throw new ServiceException(ie); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceProfilesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceProfilesRequestPBImpl.java new file mode 100644 index 00000000000..ba06251f7af --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceProfilesRequestPBImpl.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceProfilesRequestProto; + +/** + * Protobuf implementation class for GetAllResourceProfilesRequest. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class GetAllResourceProfilesRequestPBImpl + extends GetAllResourceProfilesRequest { + + private GetAllResourceProfilesRequestProto proto = + GetAllResourceProfilesRequestProto.getDefaultInstance(); + private GetAllResourceProfilesRequestProto.Builder builder = null; + + private boolean viaProto = false; + + public GetAllResourceProfilesRequestPBImpl() { + builder = GetAllResourceProfilesRequestProto.newBuilder(); + } + + public GetAllResourceProfilesRequestPBImpl( + GetAllResourceProfilesRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetAllResourceProfilesRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceProfilesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceProfilesResponsePBImpl.java new file mode 100644 index 00000000000..eaa392f3703 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceProfilesResponsePBImpl.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProfilesProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProfileEntry; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceProfilesResponseProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceProfilesResponseProto; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Protobuf implementation class for the GetAllResourceProfilesResponse. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class GetAllResourceProfilesResponsePBImpl + extends GetAllResourceProfilesResponse { + + private GetAllResourceProfilesResponseProto proto = + GetAllResourceProfilesResponseProto.getDefaultInstance(); + private GetAllResourceProfilesResponseProto.Builder builder = null; + private boolean viaProto = false; + private Map profiles; + + public GetAllResourceProfilesResponsePBImpl() { + builder = GetAllResourceProfilesResponseProto.newBuilder(); + } + + public GetAllResourceProfilesResponsePBImpl( + GetAllResourceProfilesResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetAllResourceProfilesResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetAllResourceProfilesResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + if (profiles != null) { + addProfilesToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void addProfilesToProto() { + maybeInitBuilder(); + builder.clearResourceProfiles(); + if (profiles == null) { + return; + } + ResourceProfilesProto.Builder profilesBuilder = + ResourceProfilesProto.newBuilder(); + for (Map.Entry entry : profiles.entrySet()) { + ResourceProfileEntry.Builder profileEntry = + ResourceProfileEntry.newBuilder(); + profileEntry.setName(entry.getKey()); + profileEntry.setResources(convertToProtoFormat(entry.getValue())); + profilesBuilder.addResourceProfilesMap(profileEntry); + } + builder.setResourceProfiles(profilesBuilder.build()); + } + + public void setResourceProfiles(Map resourceProfiles) { + initResourceProfiles(); + profiles.clear(); + profiles.putAll(resourceProfiles); + } + + public Map getResourceProfiles() { + initResourceProfiles(); + return profiles; + } + + private void initResourceProfiles() { + if (profiles != null) { + return; + } + profiles = new HashMap<>(); + GetAllResourceProfilesResponseProtoOrBuilder p = viaProto ? proto : builder; + List profilesList = + p.getResourceProfiles().getResourceProfilesMapList(); + for (ResourceProfileEntry entry : profilesList) { + profiles.put(entry.getName(), new ResourcePBImpl(entry.getResources())); + } + } + + private ResourceProto convertToProtoFormat(Resource res) { + ResourcePBImpl r = new ResourcePBImpl(); + r.setMemorySize(res.getMemorySize()); + r.setVirtualCores(res.getVirtualCores()); + return r.getProto(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoRequestPBImpl.java new file mode 100644 index 00000000000..b3f4692412e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoRequestPBImpl.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceTypeInfoRequestProto; + +/** + * Protobuf implementation class for GetAllResourceTypeInfoRequest. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class GetAllResourceTypeInfoRequestPBImpl + extends GetAllResourceTypeInfoRequest { + + private GetAllResourceTypeInfoRequestProto proto = + GetAllResourceTypeInfoRequestProto.getDefaultInstance(); + private GetAllResourceTypeInfoRequestProto.Builder builder = null; + + private boolean viaProto = false; + + public GetAllResourceTypeInfoRequestPBImpl() { + builder = GetAllResourceTypeInfoRequestProto.newBuilder(); + } + + public GetAllResourceTypeInfoRequestPBImpl( + GetAllResourceTypeInfoRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetAllResourceTypeInfoRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoResponsePBImpl.java new file mode 100644 index 00000000000..28decebcabf --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoResponsePBImpl.java @@ -0,0 +1,184 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; +import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; +import org.apache.hadoop.yarn.api.records.impl.pb.ResourceTypeInfoPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceTypeInfoProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceTypeInfoResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceTypeInfoResponseProtoOrBuilder; + +import com.google.protobuf.TextFormat; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +/** + * Protobuf implementation class for the GetAllResourceTypeInfoResponse. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class GetAllResourceTypeInfoResponsePBImpl + extends + GetAllResourceTypeInfoResponse { + + private GetAllResourceTypeInfoResponseProto proto = GetAllResourceTypeInfoResponseProto + .getDefaultInstance(); + private GetAllResourceTypeInfoResponseProto.Builder builder = null; + private boolean viaProto = false; + + private List resourceTypeInfo; + + public GetAllResourceTypeInfoResponsePBImpl() { + builder = GetAllResourceTypeInfoResponseProto.newBuilder(); + } + + public GetAllResourceTypeInfoResponsePBImpl( + GetAllResourceTypeInfoResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetAllResourceTypeInfoResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public void setResourceTypeInfo(List resourceTypes) { + if (resourceTypeInfo == null) { + builder.clearResourceTypeInfo(); + } + this.resourceTypeInfo = resourceTypes; + } + + @Override + public List getResourceTypeInfo() { + initResourceTypeInfosList(); + return this.resourceTypeInfo; + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } + + private void mergeLocalToBuilder() { + if (this.resourceTypeInfo != null) { + addResourceTypeInfosToProto(); + } + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetAllResourceTypeInfoResponseProto.newBuilder(proto); + } + viaProto = false; + } + + // Once this is called. containerList will never be null - until a getProto + // is called. + private void initResourceTypeInfosList() { + if (this.resourceTypeInfo != null) { + return; + } + GetAllResourceTypeInfoResponseProtoOrBuilder p = viaProto ? proto : builder; + List list = p.getResourceTypeInfoList(); + resourceTypeInfo = new ArrayList(); + + for (ResourceTypeInfoProto a : list) { + resourceTypeInfo.add(convertFromProtoFormat(a)); + } + } + + private void addResourceTypeInfosToProto() { + maybeInitBuilder(); + builder.clearResourceTypeInfo(); + if (resourceTypeInfo == null) { + return; + } + Iterable iterable = new Iterable() { + @Override + public Iterator iterator() { + return new Iterator() { + + Iterator iter = resourceTypeInfo.iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public ResourceTypeInfoProto next() { + return convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + + } + }; + + } + }; + builder.addAllResourceTypeInfo(iterable); + } + + private ResourceTypeInfoPBImpl convertFromProtoFormat( + ResourceTypeInfoProto p) { + return new ResourceTypeInfoPBImpl(p); + } + + private ResourceTypeInfoProto convertToProtoFormat(ResourceTypeInfo t) { + return ((ResourceTypeInfoPBImpl) t).getProto(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetResourceProfileRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetResourceProfileRequestPBImpl.java new file mode 100644 index 00000000000..89a680c9f9e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetResourceProfileRequestPBImpl.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileRequestProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileRequestProto; + +/** + * Protobuf implementation for the GetResourceProfileRequest class. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class GetResourceProfileRequestPBImpl extends GetResourceProfileRequest { + + private GetResourceProfileRequestProto proto = + GetResourceProfileRequestProto.getDefaultInstance(); + private GetResourceProfileRequestProto.Builder builder = null; + private boolean viaProto = false; + + private String profile; + + public GetResourceProfileRequestPBImpl() { + builder = GetResourceProfileRequestProto.newBuilder(); + } + + public GetResourceProfileRequestPBImpl(GetResourceProfileRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public GetResourceProfileRequestProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public void setProfileName(String profileName) { + this.profile = profileName; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void mergeLocalToBuilder() { + if (profile != null) { + builder.setProfile(profile); + } + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetResourceProfileRequestProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public String getProfileName() { + if (this.profile != null) { + return profile; + } + GetResourceProfileRequestProtoOrBuilder protoOrBuilder = + viaProto ? proto : builder; + if (protoOrBuilder.hasProfile()) { + profile = protoOrBuilder.getProfile(); + } + return profile; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetResourceProfileResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetResourceProfileResponsePBImpl.java new file mode 100644 index 00000000000..e08d077a786 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetResourceProfileResponsePBImpl.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileResponseProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileResponseProto; +import org.apache.hadoop.yarn.util.resource.Resources; + +/** + * Protobuf implementation for the GetResourceProfileResponse class. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class GetResourceProfileResponsePBImpl + extends GetResourceProfileResponse { + + private GetResourceProfileResponseProto proto = + GetResourceProfileResponseProto.getDefaultInstance(); + private GetResourceProfileResponseProto.Builder builder = null; + private boolean viaProto = false; + + private Resource resource; + + public GetResourceProfileResponsePBImpl() { + builder = GetResourceProfileResponseProto.newBuilder(); + } + + public GetResourceProfileResponsePBImpl( + GetResourceProfileResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public Resource getResource() { + if (resource != null) { + return resource; + } + GetResourceProfileResponseProtoOrBuilder p = viaProto ? proto : builder; + if (p.hasResources()) { + resource = Resource.newInstance(p.getResources().getMemory(), + p.getResources().getVirtualCores()); + } + return resource; + } + + public void setResource(Resource r) { + resource = Resources.clone(r); + } + + public GetResourceProfileResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void mergeLocalToBuilder() { + if (resource != null) { + builder.setResources(convertToProtoFormat(resource)); + } + } + + private ResourceProto convertToProtoFormat(Resource res) { + ResourcePBImpl r = new ResourcePBImpl(); + r.setMemorySize(res.getMemorySize()); + r.setVirtualCores(res.getVirtualCores()); + return r.getProto(); + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = GetResourceProfileResponseProto.newBuilder(proto); + } + viaProto = false; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java index 1a70933a284..032bbc36224 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java @@ -33,6 +33,8 @@ import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProfilesProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProfileEntry; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationACLMapProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; @@ -59,6 +61,7 @@ private List containersFromPreviousAttempts = null; private List nmTokens = null; private EnumSet schedulerResourceTypes = null; + private Map profiles = null; public RegisterApplicationMasterResponsePBImpl() { builder = RegisterApplicationMasterResponseProto.newBuilder(); @@ -123,6 +126,9 @@ private void mergeLocalToBuilder() { if(schedulerResourceTypes != null) { addSchedulerResourceTypes(); } + if (profiles != null) { + addResourceProfiles(); + } } @@ -433,6 +439,58 @@ public void setSchedulerResourceTypes(EnumSet types) { this.schedulerResourceTypes.addAll(types); } + private void addResourceProfiles() { + maybeInitBuilder(); + builder.clearResourceProfiles(); + if (profiles == null) { + return; + } + ResourceProfilesProto.Builder profilesBuilder = + ResourceProfilesProto.newBuilder(); + for (Map.Entry entry : profiles.entrySet()) { + ResourceProfileEntry.Builder entryBuilder = + ResourceProfileEntry.newBuilder(); + entryBuilder.setName(entry.getKey()); + entryBuilder.setResources(convertToProtoFormat(entry.getValue())); + profilesBuilder.addResourceProfilesMap(entryBuilder.build()); + } + builder.setResourceProfiles(profilesBuilder.build()); + } + + private void initResourceProfiles() { + if (this.profiles != null) { + return; + } + this.profiles = new HashMap<>(); + RegisterApplicationMasterResponseProtoOrBuilder p = + viaProto ? proto : builder; + + if (p.hasResourceProfiles()) { + ResourceProfilesProto profilesProto = p.getResourceProfiles(); + for (ResourceProfileEntry entry : profilesProto + .getResourceProfilesMapList()) { + this.profiles + .put(entry.getName(), convertFromProtoFormat(entry.getResources())); + } + } + } + + @Override + public Map getResourceProfiles() { + initResourceProfiles(); + return this.profiles; + } + + @Override + public void setResourceProfiles(Map profilesMap) { + if (profilesMap == null) { + return; + } + initResourceProfiles(); + this.profiles.clear(); + this.profiles.putAll(profilesMap); + } + private Resource convertFromProtoFormat(ResourceProto resource) { return new ResourcePBImpl(resource); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java index 1c85e28dca8..14ede5dbf34 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java @@ -22,12 +22,15 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; import com.google.protobuf.TextFormat; +import java.util.Map; + @Private @Unstable public class ApplicationResourceUsageReportPBImpl @@ -41,6 +44,9 @@ Resource reservedResources; Resource neededResources; + private Map resourceSecondsMap; + private Map preemptedResourceSecondsMap; + public ApplicationResourceUsageReportPBImpl() { builder = ApplicationResourceUsageReportProto.newBuilder(); } @@ -49,6 +55,8 @@ public ApplicationResourceUsageReportPBImpl( ApplicationResourceUsageReportProto proto) { this.proto = proto; viaProto = true; + getResourceSecondsMap(); + getPreemptedResourceSecondsMap(); } public synchronized ApplicationResourceUsageReportProto getProto() { @@ -89,6 +97,23 @@ private void mergeLocalToBuilder() { if (this.neededResources != null) { builder.setNeededResources(convertToProtoFormat(this.neededResources)); } + builder.clearApplicationResourceUsageMap(); + builder.clearApplicationPreemptedResourceUsageMap(); + + if (preemptedResourceSecondsMap != null && !preemptedResourceSecondsMap + .isEmpty()) { + builder.addAllApplicationPreemptedResourceUsageMap(ProtoUtils + .convertMapToStringLongMapProtoList(preemptedResourceSecondsMap)); + } + if (resourceSecondsMap != null && !resourceSecondsMap.isEmpty()) { + builder.addAllApplicationResourceUsageMap( + ProtoUtils.convertMapToStringLongMapProtoList(resourceSecondsMap)); + } + + builder.setMemorySeconds(this.getMemorySeconds()); + builder.setVcoreSeconds(this.getVcoreSeconds()); + builder.setPreemptedMemorySeconds(this.getPreemptedMemorySeconds()); + builder.setPreemptedVcoreSeconds(this.getPreemptedVcoreSeconds()); } private void mergeLocalToProto() { @@ -196,54 +221,64 @@ public synchronized void setNeededResources(Resource reserved_resources) { @Override public synchronized void setMemorySeconds(long memory_seconds) { - maybeInitBuilder(); - builder.setMemorySeconds(memory_seconds); + getResourceSecondsMap() + .put(ResourceInformation.MEMORY_MB.getName(), memory_seconds); } - + @Override public synchronized long getMemorySeconds() { - ApplicationResourceUsageReportProtoOrBuilder p = viaProto ? proto : builder; - return p.getMemorySeconds(); + Map tmp = getResourceSecondsMap(); + if (tmp.containsKey(ResourceInformation.MEMORY_MB.getName())) { + return tmp.get(ResourceInformation.MEMORY_MB.getName()); + } + return 0; } @Override public synchronized void setVcoreSeconds(long vcore_seconds) { - maybeInitBuilder(); - builder.setVcoreSeconds(vcore_seconds); + getResourceSecondsMap() + .put(ResourceInformation.VCORES.getName(), vcore_seconds); } @Override public synchronized long getVcoreSeconds() { - ApplicationResourceUsageReportProtoOrBuilder p = viaProto ? proto : builder; - return (p.getVcoreSeconds()); + Map tmp = getResourceSecondsMap(); + if (tmp.containsKey(ResourceInformation.VCORES.getName())) { + return tmp.get(ResourceInformation.VCORES.getName()); + } + return 0; } @Override public synchronized void setPreemptedMemorySeconds( long preemptedMemorySeconds) { - maybeInitBuilder(); - builder.setPreemptedMemorySeconds(preemptedMemorySeconds); + getPreemptedResourceSecondsMap() + .put(ResourceInformation.MEMORY_MB.getName(), preemptedMemorySeconds); } @Override public synchronized long getPreemptedMemorySeconds() { - ApplicationResourceUsageReportProtoOrBuilder p = - viaProto ? proto : builder; - return p.getPreemptedMemorySeconds(); + Map tmp = getPreemptedResourceSecondsMap(); + if (tmp.containsKey(ResourceInformation.MEMORY_MB.getName())) { + return tmp.get(ResourceInformation.MEMORY_MB.getName()); + } + return 0; } @Override public synchronized void setPreemptedVcoreSeconds( long vcoreSeconds) { - maybeInitBuilder(); - builder.setPreemptedVcoreSeconds(vcoreSeconds); + getPreemptedResourceSecondsMap() + .put(ResourceInformation.VCORES.getName(), vcoreSeconds); } @Override public synchronized long getPreemptedVcoreSeconds() { - ApplicationResourceUsageReportProtoOrBuilder p = - viaProto ? proto : builder; - return (p.getPreemptedVcoreSeconds()); + Map tmp = getPreemptedResourceSecondsMap(); + if (tmp.containsKey(ResourceInformation.VCORES.getName())) { + return tmp.get(ResourceInformation.VCORES.getName()); + } + return 0; } private ResourcePBImpl convertFromProtoFormat(ResourceProto p) { @@ -277,4 +312,81 @@ public synchronized void setClusterUsagePercentage(float clusterUsagePerc) { maybeInitBuilder(); builder.setClusterUsagePercentage((clusterUsagePerc)); } + + @Override + public synchronized void setResourceSecondsMap( + Map resourceSecondsMap) { + this.resourceSecondsMap = resourceSecondsMap; + if (resourceSecondsMap == null) { + return; + } + if (!resourceSecondsMap + .containsKey(ResourceInformation.MEMORY_MB.getName())) { + this.setMemorySeconds(0L); + } + if (!resourceSecondsMap.containsKey(ResourceInformation.VCORES.getName())) { + this.setVcoreSeconds(0L); + } + } + + @Override + public synchronized Map getResourceSecondsMap() { + if (this.resourceSecondsMap != null) { + return this.resourceSecondsMap; + } + ApplicationResourceUsageReportProtoOrBuilder p = viaProto ? proto : builder; + this.resourceSecondsMap = ProtoUtils + .convertStringLongMapProtoListToMap( + p.getApplicationResourceUsageMapList()); + if (!this.resourceSecondsMap + .containsKey(ResourceInformation.MEMORY_MB.getName())) { + this.setMemorySeconds(p.getMemorySeconds()); + } + if (!this.resourceSecondsMap + .containsKey(ResourceInformation.VCORES.getName())) { + this.setVcoreSeconds(p.getVcoreSeconds()); + } + this.setMemorySeconds(p.getMemorySeconds()); + this.setVcoreSeconds(p.getVcoreSeconds()); + return this.resourceSecondsMap; + } + + @Override + public synchronized void setPreemptedResourceSecondsMap( + Map preemptedResourceSecondsMap) { + this.preemptedResourceSecondsMap = preemptedResourceSecondsMap; + if (preemptedResourceSecondsMap == null) { + return; + } + if (!preemptedResourceSecondsMap + .containsKey(ResourceInformation.MEMORY_MB.getName())) { + this.setPreemptedMemorySeconds(0L); + } + if (!preemptedResourceSecondsMap + .containsKey(ResourceInformation.VCORES.getName())) { + this.setPreemptedVcoreSeconds(0L); + } + } + + @Override + public synchronized Map getPreemptedResourceSecondsMap() { + if (this.preemptedResourceSecondsMap != null) { + return this.preemptedResourceSecondsMap; + } + ApplicationResourceUsageReportProtoOrBuilder p = viaProto ? proto : builder; + this.preemptedResourceSecondsMap = ProtoUtils + .convertStringLongMapProtoListToMap( + p.getApplicationPreemptedResourceUsageMapList()); + if (!this.preemptedResourceSecondsMap + .containsKey(ResourceInformation.MEMORY_MB.getName())) { + this.setPreemptedMemorySeconds(p.getPreemptedMemorySeconds()); + } + if (!this.preemptedResourceSecondsMap + .containsKey(ResourceInformation.VCORES.getName())) { + this.setPreemptedVcoreSeconds(p.getPreemptedVcoreSeconds()); + } + this.setPreemptedMemorySeconds(p.getPreemptedMemorySeconds()); + this.setPreemptedVcoreSeconds(p.getPreemptedVcoreSeconds()); + return this.preemptedResourceSecondsMap; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProfileCapabilityPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProfileCapabilityPBImpl.java new file mode 100644 index 00000000000..8c161f8a0fc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProfileCapabilityPBImpl.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.api.records.ProfileCapability; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.proto.YarnProtos; +import org.apache.hadoop.yarn.proto.YarnProtos.ProfileCapabilityProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ProfileCapabilityProto; +import org.apache.hadoop.yarn.util.resource.Resources; + +/** + * Protobuf implementation for the ProfileCapability class. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class ProfileCapabilityPBImpl extends ProfileCapability { + + private ProfileCapabilityProto proto = + ProfileCapabilityProto.getDefaultInstance(); + private ProfileCapabilityProto.Builder builder; + + private boolean viaProto; + + private String profile; + private Resource profileCapabilityOverride; + + public ProfileCapabilityPBImpl() { + builder = ProfileCapabilityProto.newBuilder(); + } + + public ProfileCapabilityPBImpl(ProfileCapabilityProto proto) { + this.proto = proto; + viaProto = true; + } + + @Override + public String getProfileName() { + if (profile != null) { + return profile; + } + ProfileCapabilityProtoOrBuilder p = viaProto ? proto : builder; + if (p.hasProfile()) { + profile = p.getProfile(); + } + return profile; + } + + @Override + public Resource getProfileCapabilityOverride() { + if (profileCapabilityOverride != null) { + return profileCapabilityOverride; + } + ProfileCapabilityProtoOrBuilder p = viaProto ? proto : builder; + if (p.hasProfileCapabilityOverride()) { + profileCapabilityOverride = + Resources.clone(new ResourcePBImpl(p.getProfileCapabilityOverride())); + } + return profileCapabilityOverride; + } + + @Override + public void setProfileName(String profileName) { + this.profile = profileName; + } + + @Override + public void setProfileCapabilityOverride(Resource r) { + this.profileCapabilityOverride = r; + } + + public ProfileCapabilityProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void mergeLocalToBuilder() { + if (profile != null) { + builder.setProfile(profile); + } + if (profileCapabilityOverride != null) { + builder.setProfileCapabilityOverride( + convertToProtoFormat(profileCapabilityOverride)); + } + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = ProfileCapabilityProto.newBuilder(proto); + } + viaProto = false; + } + + private YarnProtos.ResourceProto convertToProtoFormat(Resource res) { + ResourcePBImpl r = new ResourcePBImpl(); + r.setMemorySize(res.getMemorySize()); + r.setVirtualCores(res.getVirtualCores()); + return r.getProto(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java index fa9c43011b3..158c2ae72da 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java @@ -19,10 +19,15 @@ package org.apache.hadoop.yarn.api.records.impl.pb; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.AMCommand; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; @@ -44,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.UpdateContainerError; import org.apache.hadoop.yarn.api.records.UpdateContainerRequest; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; @@ -71,6 +77,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerTypeProto; import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeProto; import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeRequestProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceTypesProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos; import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateTypeProto; import org.apache.hadoop.yarn.server.api.ContainerType; @@ -433,6 +440,45 @@ public static UpdateContainerErrorPBImpl convertFromProtoFormat( convertToProtoFormat(UpdateContainerError t) { return ((UpdateContainerErrorPBImpl) t).getProto(); } + + /* + * ResourceTypes + */ + public static ResourceTypesProto converToProtoFormat(ResourceTypes e) { + return ResourceTypesProto.valueOf(e.name()); + } + + public static ResourceTypes convertFromProtoFormat(ResourceTypesProto e) { + return ResourceTypes.valueOf(e.name()); + } + + public static Map convertStringLongMapProtoListToMap( + List pList) { + Resource tmp = Resource.newInstance(0, 0); + Map ret = new HashMap<>(); + for (ResourceInformation entry : tmp.getResources()) { + ret.put(entry.getName(), 0L); + } + if (pList != null) { + for (YarnProtos.StringLongMapProto p : pList) { + ret.put(p.getKey(), p.getValue()); + } + } + return ret; + } + + public static List convertMapToStringLongMapProtoList( + Map map) { + List ret = new ArrayList<>(); + for (Map.Entry entry : map.entrySet()) { + YarnProtos.StringLongMapProto.Builder tmp = + YarnProtos.StringLongMapProto.newBuilder(); + tmp.setKey(entry.getKey()); + tmp.setValue(entry.getValue()); + ret.add(tmp.build()); + } + return ret; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java index 34109bec0f6..853078256ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java @@ -18,16 +18,32 @@ package org.apache.hadoop.yarn.api.records.impl.pb; - +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.api.records.impl.BaseResource; +import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto; +import org.apache.hadoop.yarn.util.UnitsConversionUtil; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; + +import java.util.Arrays; +import java.util.Map; + @Private @Unstable public class ResourcePBImpl extends Resource { + + private static final Log LOG = LogFactory.getLog(ResourcePBImpl.class); + ResourceProto proto = ResourceProto.getDefaultInstance(); ResourceProto.Builder builder = null; boolean viaProto = false; @@ -47,14 +63,17 @@ static ResourceProto getProto(Resource r) { public ResourcePBImpl() { builder = ResourceProto.newBuilder(); + initResources(); } public ResourcePBImpl(ResourceProto proto) { this.proto = proto; viaProto = true; + initResources(); } - + public ResourceProto getProto() { + mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; return proto; @@ -70,13 +89,19 @@ private void maybeInitBuilder() { @Override @SuppressWarnings("deprecation") public int getMemory() { - return (int) getMemorySize(); + return (int) this.getMemorySize(); } @Override public long getMemorySize() { - ResourceProtoOrBuilder p = viaProto ? proto : builder; - return p.getMemory(); + // memory should always be present + ResourceInformation ri = resources[MEMORY_INDEX]; + + if (ri.getUnits().isEmpty()) { + return ri.getValue(); + } + return UnitsConversionUtil.convert(ri.getUnits(), + ResourceInformation.MEMORY_MB.getUnits(), ri.getValue()); } @Override @@ -88,18 +113,134 @@ public void setMemory(int memory) { @Override public void setMemorySize(long memory) { maybeInitBuilder(); - builder.setMemory(memory); + getResourceInformation(MEMORY).setValue(memory); } @Override public int getVirtualCores() { - ResourceProtoOrBuilder p = viaProto ? proto : builder; - return p.getVirtualCores(); + // vcores should always be present + return (int) resources[VCORES_INDEX].getValue(); } @Override public void setVirtualCores(int vCores) { maybeInitBuilder(); - builder.setVirtualCores(vCores); + getResourceInformation(VCORES).setValue(vCores); + } + + private void initResources() { + if (this.resources != null) { + return; + } + ResourceProtoOrBuilder p = viaProto ? proto : builder; + initResourcesMap(); + Map indexMap = ResourceUtils.getResourceTypeIndex(); + for (ResourceInformationProto entry : p.getResourceValueMapList()) { + ResourceTypes type = + entry.hasType() ? ProtoUtils.convertFromProtoFormat(entry.getType()) : + ResourceTypes.COUNTABLE; + + // When unit not specified in proto, use the default unit. + String units = + entry.hasUnits() ? entry.getUnits() : ResourceUtils.getDefaultUnit( + entry.getKey()); + long value = entry.hasValue() ? entry.getValue() : 0L; + ResourceInformation ri = ResourceInformation + .newInstance(entry.getKey(), units, value, type, 0L, Long.MAX_VALUE); + Integer index = indexMap.get(entry.getKey()); + if (index == null) { + LOG.warn("Got unknown resource type: " + ri.getName() + "; skipping"); + } else { + resources[index].setResourceType(ri.getResourceType()); + resources[index].setUnits(ri.getUnits()); + resources[index].setValue(value); + } + } + this.setMemorySize(p.getMemory()); + this.setVirtualCores(p.getVirtualCores()); + } + + @Override + public void setResourceInformation(String resource, + ResourceInformation resourceInformation) { + maybeInitBuilder(); + if (resource == null || resourceInformation == null) { + throw new IllegalArgumentException( + "resource and/or resourceInformation cannot be null"); + } + if (!resource.equals(resourceInformation.getName())) { + resourceInformation.setName(resource); + } + ResourceInformation storedResourceInfo = getResourceInformation(resource); + ResourceInformation.copy(resourceInformation, storedResourceInfo); + } + + @Override + public void setResourceValue(String resource, long value) + throws ResourceNotFoundException { + maybeInitBuilder(); + if (resource == null) { + throw new IllegalArgumentException("resource type object cannot be null"); + } + getResourceInformation(resource).setValue(value); + } + + @Override + public ResourceInformation[] getResources() { + return super.getResources(); + } + + @Override + public ResourceInformation getResourceInformation(String resource) + throws ResourceNotFoundException { + return super.getResourceInformation(resource); + } + + @Override + public long getResourceValue(String resource) + throws ResourceNotFoundException { + return super.getResourceValue(resource); + } + + private void initResourcesMap() { + if (resources == null) { + ResourceInformation[] types = ResourceUtils.getResourceTypesArray(); + if (types == null) { + throw new YarnRuntimeException( + "Got null return value from ResourceUtils.getResourceTypes()"); + } + + resources = new ResourceInformation[types.length]; + for (ResourceInformation entry : types) { + int index = ResourceUtils.getResourceTypeIndex().get(entry.getName()); + resources[index] = ResourceInformation.newInstance(entry); + } + } + } + + synchronized private void mergeLocalToBuilder() { + builder.clearResourceValueMap(); + if(resources != null && resources.length != 0) { + for (ResourceInformation resInfo : resources) { + ResourceInformationProto.Builder e = ResourceInformationProto + .newBuilder(); + e.setKey(resInfo.getName()); + e.setUnits(resInfo.getUnits()); + e.setType(ProtoUtils.converToProtoFormat(resInfo.getResourceType())); + e.setValue(resInfo.getValue()); + builder.addResourceValueMap(e); + } + } + builder.setMemory(this.getMemorySize()); + builder.setVirtualCores(this.getVirtualCores()); + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java index eba539599a8..3c2964595cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java @@ -23,8 +23,10 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.proto.YarnProtos.ProfileCapabilityProto; import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto; @@ -40,6 +42,7 @@ private Priority priority = null; private Resource capability = null; private ExecutionTypeRequest executionTypeRequest = null; + private ProfileCapability profile = null; public ResourceRequestPBImpl() { @@ -52,7 +55,7 @@ public ResourceRequestPBImpl(ResourceRequestProto proto) { } public ResourceRequestProto getProto() { - mergeLocalToProto(); + mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; return proto; @@ -69,6 +72,9 @@ private void mergeLocalToBuilder() { builder.setExecutionTypeRequest( ProtoUtils.convertToProtoFormat(this.executionTypeRequest)); } + if (this.profile != null) { + builder.setProfile(converToProtoFormat(this.profile)); + } } private void mergeLocalToProto() { @@ -229,7 +235,8 @@ public String toString() { + ", Location: " + getResourceName() + ", Relax Locality: " + getRelaxLocality() + ", Execution Type Request: " + getExecutionTypeRequest() - + ", Node Label Expression: " + getNodeLabelExpression() + "}"; + + ", Node Label Expression: " + getNodeLabelExpression() + + ", Resource Profile: " + getProfileCapability() + "}"; } @Override @@ -250,4 +257,34 @@ public void setNodeLabelExpression(String nodeLabelExpression) { } builder.setNodeLabelExpression(nodeLabelExpression); } + + @Override + public void setProfileCapability(ProfileCapability profileCapability) { + maybeInitBuilder(); + if (profile == null) { + builder.clearProfile(); + } + this.profile = profileCapability; + } + + @Override + public ProfileCapability getProfileCapability() { + if (profile != null) { + return profile; + } + ResourceRequestProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasProfile()) { + return null; + } + return new ProfileCapabilityPBImpl(p.getProfile()); + } + + private ProfileCapabilityProto converToProtoFormat( + ProfileCapability profileCapability) { + ProfileCapabilityPBImpl tmp = new ProfileCapabilityPBImpl(); + tmp.setProfileName(profileCapability.getProfileName()); + tmp.setProfileCapabilityOverride( + profileCapability.getProfileCapabilityOverride()); + return tmp.getProto(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceTypeInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceTypeInfoPBImpl.java new file mode 100644 index 00000000000..17230e7dfa5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceTypeInfoPBImpl.java @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; +import org.apache.hadoop.yarn.proto.YarnProtos; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceTypeInfoProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ResourceTypesProto; + +/** + * {@code ResourceTypeInfoPBImpl} which implements the + * {@link ResourceTypeInfo} class which represents different resource types + * supported in YARN. + */ +@Private +@Unstable +public class ResourceTypeInfoPBImpl extends ResourceTypeInfo { + + ResourceTypeInfoProto proto = ResourceTypeInfoProto.getDefaultInstance(); + ResourceTypeInfoProto.Builder builder = null; + boolean viaProto = false; + + private String name = null; + private String defaultUnit = null; + private ResourceTypes resourceTypes = null; + + public ResourceTypeInfoPBImpl() { + builder = ResourceTypeInfoProto.newBuilder(); + } + + public ResourceTypeInfoPBImpl(ResourceTypeInfoProto proto) { + this.proto = proto; + viaProto = true; + } + + public ResourceTypeInfoProto getProto() { + mergeLocalToProto(); + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) { + maybeInitBuilder(); + } + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void mergeLocalToBuilder() { + if (this.name != null) { + builder.setName(this.name); + } + if (this.defaultUnit != null) { + builder.setUnits(this.defaultUnit); + } + if (this.resourceTypes != null) { + builder.setType(convertToProtoFormat(this.resourceTypes)); + } + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = YarnProtos.ResourceTypeInfoProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public String getName() { + if (this.name != null) { + return this.name; + } + + YarnProtos.ResourceTypeInfoProtoOrBuilder p = viaProto ? proto : builder; + return p.getName(); + } + + @Override + public void setName(String rName) { + maybeInitBuilder(); + if (rName == null) { + builder.clearName(); + } + this.name = rName; + } + + @Override + public String getDefaultUnit() { + if (this.defaultUnit != null) { + return this.defaultUnit; + } + + YarnProtos.ResourceTypeInfoProtoOrBuilder p = viaProto ? proto : builder; + return p.getUnits(); + } + + @Override + public void setDefaultUnit(String rUnits) { + maybeInitBuilder(); + if (rUnits == null) { + builder.clearUnits(); + } + this.defaultUnit = rUnits; + } + + @Override + public ResourceTypes getResourceType() { + if (this.resourceTypes != null) { + return this.resourceTypes; + } + + YarnProtos.ResourceTypeInfoProtoOrBuilder p = viaProto ? proto : builder; + return convertFromProtoFormat(p.getType()); + } + + @Override + public void setResourceType(ResourceTypes type) { + maybeInitBuilder(); + if (type == null) { + builder.clearType(); + } + this.resourceTypes = type; + } + + public static ResourceTypesProto convertToProtoFormat(ResourceTypes e) { + return ResourceTypesProto.valueOf(e.name()); + } + + public static ResourceTypes convertFromProtoFormat(ResourceTypesProto e) { + return ResourceTypes.valueOf(e.name()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java index 0240fbcd59f..331be308e2e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java @@ -20,9 +20,15 @@ import com.google.common.base.Joiner; import com.google.common.base.Splitter; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; import java.util.regex.Pattern; import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; /** * Common string manipulation helpers @@ -174,4 +180,34 @@ private static void uappend(StringBuilder sb, String part) { } sb.append(part); } + + public static String getResourceSecondsString(Map targetMap) { + List strings = new ArrayList<>(targetMap.size()); + //completed app report in the timeline server doesn't have usage report + Long memorySeconds = 0L; + Long vcoreSeconds = 0L; + if (targetMap.containsKey(ResourceInformation.MEMORY_MB.getName())) { + memorySeconds = targetMap.get(ResourceInformation.MEMORY_MB.getName()); + } + if (targetMap.containsKey(ResourceInformation.VCORES.getName())) { + vcoreSeconds = targetMap.get(ResourceInformation.VCORES.getName()); + } + strings.add(memorySeconds + " MB-seconds"); + strings.add(vcoreSeconds + " vcore-seconds"); + Map tmp = ResourceUtils.getResourceTypes(); + if (targetMap.size() > 2) { + for (Map.Entry entry : targetMap.entrySet()) { + if (!entry.getKey().equals(ResourceInformation.MEMORY_MB.getName()) + && !entry.getKey().equals(ResourceInformation.VCORES.getName())) { + String units = ""; + if (tmp.containsKey(entry.getKey())) { + units = tmp.get(entry.getKey()).getUnits(); + } + strings.add(entry.getValue() + " " + entry.getKey() + "-" + units + + "seconds"); + } + } + } + return String.join(", ", strings); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java index 7697e1dfc33..949fd9ebf5f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java @@ -22,136 +22,397 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.util.UnitsConversionUtil; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.Arrays; /** - * A {@link ResourceCalculator} which uses the concept of + * A {@link ResourceCalculator} which uses the concept of * dominant resource to compare multi-dimensional resources. * - * Essentially the idea is that the in a multi-resource environment, - * the resource allocation should be determined by the dominant share - * of an entity (user or queue), which is the maximum share that the - * entity has been allocated of any resource. - * - * In a nutshell, it seeks to maximize the minimum dominant share across - * all entities. - * + * Essentially the idea is that the in a multi-resource environment, + * the resource allocation should be determined by the dominant share + * of an entity (user or queue), which is the maximum share that the + * entity has been allocated of any resource. + * + * In a nutshell, it seeks to maximize the minimum dominant share across + * all entities. + * * For example, if user A runs CPU-heavy tasks and user B runs - * memory-heavy tasks, it attempts to equalize CPU share of user A - * with Memory-share of user B. - * + * memory-heavy tasks, it attempts to equalize CPU share of user A + * with Memory-share of user B. + * * In the single resource case, it reduces to max-min fairness for that resource. - * + * * See the Dominant Resource Fairness paper for more details: * www.cs.berkeley.edu/~matei/papers/2011/nsdi_drf.pdf */ @Private @Unstable public class DominantResourceCalculator extends ResourceCalculator { - private static final Log LOG = - LogFactory.getLog(DominantResourceCalculator.class); + static final Log LOG = LogFactory.getLog(DominantResourceCalculator.class); + private static final int maxLength = + ResourceUtils.getNumberOfKnownResourceTypes(); + + public DominantResourceCalculator() { + } + + /** + * Compare two resources - if the value for every resource type for the lhs + * is greater than that of the rhs, return 1. If the value for every resource + * type in the lhs is less than the rhs, return -1. Otherwise, return 0 + * + * @param lhs resource to be compared + * @param rhs resource to be compared + * @return 0, 1, or -1 + */ + private int compare(Resource lhs, Resource rhs) { + boolean lhsGreater = false; + boolean rhsGreater = false; + int ret = 0; + + for (int i = 0; i < maxLength; i++) { + ResourceInformation lhsResourceInformation = lhs + .getResourceInformation(i); + ResourceInformation rhsResourceInformation = rhs + .getResourceInformation(i); + int diff = lhsResourceInformation.compareTo(rhsResourceInformation); + if (diff >= 1) { + lhsGreater = true; + } else if (diff <= -1) { + rhsGreater = true; + } + } + if (lhsGreater && rhsGreater) { + ret = 0; + } else if (lhsGreater) { + ret = 1; + } else if (rhsGreater) { + ret = -1; + } + return ret; + } @Override public int compare(Resource clusterResource, Resource lhs, Resource rhs, boolean singleType) { - if (lhs.equals(rhs)) { return 0; } - + if (isInvalidDivisor(clusterResource)) { - if ((lhs.getMemorySize() < rhs.getMemorySize() && - lhs.getVirtualCores() > rhs.getVirtualCores()) || - (lhs.getMemorySize() > rhs.getMemorySize() && - lhs.getVirtualCores() < rhs.getVirtualCores())) { - return 0; - } else if (lhs.getMemorySize() > rhs.getMemorySize() - || lhs.getVirtualCores() > rhs.getVirtualCores()) { - return 1; - } else if (lhs.getMemorySize() < rhs.getMemorySize() - || lhs.getVirtualCores() < rhs.getVirtualCores()) { - return -1; + return this.compare(lhs, rhs); + } + + // We have to calculate the shares for all resource types for both + // resources and then look for which resource has the biggest + // share overall. + ResourceInformation[] clusterRes = clusterResource.getResources(); + // If array creation shows up as a time sink, these arrays could be cached + // because they're always the same length. + double[] lhsShares = new double[maxLength]; + double[] rhsShares = new double[maxLength]; + double diff; + + try { + if (singleType) { + double[] max = new double[2]; + + calculateShares(clusterRes, lhs, rhs, lhsShares, rhsShares, max); + + diff = max[0] - max[1]; + } else if (maxLength == 2) { + // Special case to handle the common scenario of only CPU and memory + // so that we can optimize for performance + diff = calculateSharesForMandatoryResources(clusterRes, lhs, rhs, + lhsShares, rhsShares); + } else { + calculateShares(clusterRes, lhs, rhs, lhsShares, rhsShares); + + Arrays.sort(lhsShares); + Arrays.sort(rhsShares); + + diff = compareShares(lhsShares, rhsShares); } + } catch (ArrayIndexOutOfBoundsException ex) { + StringWriter out = new StringWriter(); // No need to close a StringWriter + ex.printStackTrace(new PrintWriter(out)); + + LOG.error("A problem was encountered while calculating resource " + + "availability that should not occur under normal circumstances. " + + "Please report this error to the Hadoop community by opening a " + + "JIRA ticket at http://issues.apache.org/jira and including the " + + "following information:\n* Exception encountered: " + out + "* " + + "Cluster resources: " + Arrays.toString(clusterRes) + "\n* " + + "LHS resource: " + Arrays.toString(lhs.getResources()) + "\n* " + + "RHS resource: " + Arrays.toString(rhs.getResources())); + LOG.error("The resource manager is in an inconsistent state. It is safe " + + "for the resource manager to be restarted as the error encountered " + + "should be transitive. If high availability is enabled, failing " + + "over to a standby resource manager is also safe."); + throw new YarnRuntimeException("A problem was encountered while " + + "calculating resource availability that should not occur under " + + "normal circumstances. Please see the log for more information.", + ex); } - float l = getResourceAsValue(clusterResource, lhs, true); - float r = getResourceAsValue(clusterResource, rhs, true); - - if (l < r) { + return (int) Math.signum(diff); + } + + /** + * Calculate the shares for {@code first} and {@code second} according to + * {@code clusterRes}, and store the results in {@code firstShares} and + * {@code secondShares}, respectively. All parameters must be non-null. + * @param clusterRes the array of ResourceInformation instances that + * represents the cluster's maximum resources + * @param first the first resource to compare + * @param second the second resource to compare + * @param firstShares an array to store the shares for the first resource + * @param secondShares an array to store the shares for the second resource + * @return -1.0, 0.0, or 1.0, depending on whether the max share of the first + * resource is less than, equal to, or greater than the max share of the + * second resource, respectively + * @throws NullPointerException if any parameter is null + */ + private void calculateShares(ResourceInformation[] clusterRes, Resource first, + Resource second, double[] firstShares, double[] secondShares) { + ResourceInformation[] firstRes = first.getResources(); + ResourceInformation[] secondRes = second.getResources(); + + for (int i = 0; i < maxLength; i++) { + firstShares[i] = calculateShare(clusterRes[i], firstRes[i]); + secondShares[i] = calculateShare(clusterRes[i], secondRes[i]); + } + } + + /** + * Calculate the shares for {@code first} and {@code second} according to + * {@code clusterRes}, and store the results in {@code firstShares} and + * {@code secondShares}, respectively. All parameters must be non-null. + * This method assumes that the length of {@code clusterRes} is exactly 2 and + * makes performance optimizations based on that assumption. + * @param clusterRes the array of ResourceInformation instances that + * represents the cluster's maximum resources + * @param first the first resource to compare + * @param second the second resource to compare + * @param firstShares an array to store the shares for the first resource + * @param secondShares an array to store the shares for the second resource + * @return -1.0, 0.0, or 1.0, depending on whether the max share of the first + * resource is less than, equal to, or greater than the max share of the + * second resource, respectively + * @throws NullPointerException if any parameter is null + */ + private int calculateSharesForMandatoryResources( + ResourceInformation[] clusterRes, Resource first, Resource second, + double[] firstShares, double[] secondShares) { + ResourceInformation[] firstRes = first.getResources(); + ResourceInformation[] secondRes = second.getResources(); + int firstDom = 0; + int secondDom = 0; + int firstSub = 0; + int secondSub = 0; + + for (int i = 0; i < maxLength; i++) { + firstShares[i] = calculateShare(clusterRes[i], firstRes[i]); + secondShares[i] = calculateShare(clusterRes[i], secondRes[i]); + + if (firstShares[i] > firstShares[firstDom]) { + firstDom = i; + } + + if (firstShares[i] < firstShares[firstSub]) { + firstSub = i; + } + + if (secondShares[i] > secondShares[secondDom]) { + secondDom = i; + } + + if (secondShares[i] < secondShares[secondSub]) { + secondSub = i; + } + } + + if (firstShares[firstDom] > secondShares[secondDom]) { + return 1; + } else if (firstShares[firstDom] < secondShares[secondDom]) { return -1; - } else if (l > r) { + } else if (firstShares[firstSub] > secondShares[secondSub]) { return 1; - } else if (!singleType) { - l = getResourceAsValue(clusterResource, lhs, false); - r = getResourceAsValue(clusterResource, rhs, false); - if (l < r) { - return -1; - } else if (l > r) { - return 1; + } else if (firstShares[firstSub] < secondShares[secondSub]) { + return -1; + } else { + return 0; + } + } + + /** + * Calculate the shares for {@code first} and {@code second} according to + * {@code clusterRes}, and store the results in {@code firstShares} and + * {@code secondShares}, respectively. {@code max} will be populated with + * the max shares from {@code firstShare} and {@code secondShare} in the + * first and second indices, respectively. All parameters must be non-null, + * and {@code max} must have a length of at least 2. + * @param clusterRes the array of ResourceInformation instances that + * represents the cluster's maximum resources + * @param first the first resource to compare + * @param second the second resource to compare + * @param firstShares an array to store the shares for the first resource + * @param secondShares an array to store the shares for the second resource + * @param max an array to store the max shares of the first and second + * resources + * @return -1.0, 0.0, or 1.0, depending on whether the max share of the first + * resource is less than, equal to, or greater than the max share of the + * second resource, respectively + * @throws NullPointerException if any parameter is null + * @throws ArrayIndexOutOfBoundsException if the length of {@code max} is + * less than 2 + */ + private void calculateShares(ResourceInformation[] clusterRes, Resource first, + Resource second, double[] firstShares, double[] secondShares, + double[] max) { + ResourceInformation[] firstRes = first.getResources(); + ResourceInformation[] secondRes = second.getResources(); + + max[0] = 0.0; + max[1] = 0.0; + + for (int i = 0; i < maxLength; i++) { + firstShares[i] = calculateShare(clusterRes[i], firstRes[i]); + secondShares[i] = calculateShare(clusterRes[i], secondRes[i]); + + if (firstShares[i] > max[0]) { + max[0] = firstShares[i]; + } + + if (secondShares[i] > max[1]) { + max[1] = secondShares[i]; } } - - return 0; } /** - * Use 'dominant' for now since we only have 2 resources - gives us a slight - * performance boost. - * - * Once we add more resources, we'll need a more complicated (and slightly - * less performant algorithm). + * Calculate the share for a resource type. + * @param clusterRes the resource type for the cluster maximum + * @param res the resource type for which to calculate the share + * @return the share + */ + private double calculateShare(ResourceInformation clusterRes, + ResourceInformation res) { + // Convert the resources' units into the cluster resource's units + long value = UnitsConversionUtil.convert(res.getUnits(), + clusterRes.getUnits(), res.getValue()); + + return (double) value / clusterRes.getValue(); + } + + /** + * Compare the two shares arrays by comparing the largest elements, then the + * next largest if the previous were equal, etc. The share arrays must be + * sorted in ascending order. + * @param lhsShares the first share array to compare + * @param rhsShares the second share array to compare + * @return a number that is less than 0 if the first array is less than the + * second, equal to 0 if the arrays are equal, and greater than 0 if the + * first array is greater than the second */ - protected float getResourceAsValue( - Resource clusterResource, Resource resource, boolean dominant) { - // Just use 'dominant' resource - return (dominant) ? - Math.max( - (float)resource.getMemorySize() / clusterResource.getMemorySize(), - (float)resource.getVirtualCores() / clusterResource.getVirtualCores() - ) - : - Math.min( - (float)resource.getMemorySize() / clusterResource.getMemorySize(), - (float)resource.getVirtualCores() / clusterResource.getVirtualCores() - ); - } - + private double compareShares(double[] lhsShares, double[] rhsShares) { + double diff = 0.0; + + // lhsShares and rhsShares must necessarily have the same length, because + // everyone uses the same master resource list. + for (int i = lhsShares.length - 1; i >= 0; i--) { + diff = lhsShares[i] - rhsShares[i]; + + if (diff != 0.0) { + break; + } + } + + return diff; + } + @Override - public long computeAvailableContainers(Resource available, Resource required) { - return Math.min( - available.getMemorySize() / required.getMemorySize(), - available.getVirtualCores() / required.getVirtualCores()); + public long computeAvailableContainers(Resource available, + Resource required) { + long min = Long.MAX_VALUE; + for (int i = 0; i < maxLength; i++) { + ResourceInformation availableResource = available + .getResourceInformation(i); + ResourceInformation requiredResource = required.getResourceInformation(i); + long requiredResourceValue = UnitsConversionUtil.convert( + requiredResource.getUnits(), availableResource.getUnits(), + requiredResource.getValue()); + if (requiredResourceValue != 0) { + long tmp = availableResource.getValue() / requiredResourceValue; + min = min < tmp ? min : tmp; + } + } + return min > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) min; } @Override - public float divide(Resource clusterResource, + public float divide(Resource clusterResource, Resource numerator, Resource denominator) { - return - getResourceAsValue(clusterResource, numerator, true) / - getResourceAsValue(clusterResource, denominator, true); + int nKnownResourceTypes = ResourceUtils.getNumberOfKnownResourceTypes(); + ResourceInformation[] clusterRes = clusterResource.getResources(); + // We have to provide the calculateShares() method with somewhere to store + // the shares. We don't actually need these shares afterwards. + double[] numeratorShares = new double[nKnownResourceTypes]; + double[] denominatorShares = new double[nKnownResourceTypes]; + // We also have to provide a place for calculateShares() to store the max + // shares so that we can use them. + double[] max = new double[2]; + + calculateShares(clusterRes, numerator, denominator, numeratorShares, + denominatorShares, max); + + return (float) (max[0] / max[1]); } - + @Override public boolean isInvalidDivisor(Resource r) { - if (r.getMemorySize() == 0.0f || r.getVirtualCores() == 0.0f) { - return true; + for (ResourceInformation res : r.getResources()) { + if (res.getValue() == 0L) { + return true; + } } return false; } @Override public float ratio(Resource a, Resource b) { - return Math.max( - (float)a.getMemorySize()/b.getMemorySize(), - (float)a.getVirtualCores()/b.getVirtualCores() - ); + float ratio = 0.0f; + for (int i = 0; i < maxLength; i++) { + ResourceInformation aResourceInformation = a.getResourceInformation(i); + ResourceInformation bResourceInformation = b.getResourceInformation(i); + long bResourceValue = UnitsConversionUtil.convert( + bResourceInformation.getUnits(), aResourceInformation.getUnits(), + bResourceInformation.getValue()); + float tmp = (float) aResourceInformation.getValue() + / (float) bResourceValue; + ratio = ratio > tmp ? ratio : tmp; + } + return ratio; } @Override public Resource divideAndCeil(Resource numerator, int denominator) { - return Resources.createResource( - divideAndCeil(numerator.getMemorySize(), denominator), - divideAndCeil(numerator.getVirtualCores(), denominator) - ); + return divideAndCeil(numerator, (long) denominator); + } + + public Resource divideAndCeil(Resource numerator, long denominator) { + Resource ret = Resource.newInstance(numerator); + for (int i = 0; i < maxLength; i++) { + ResourceInformation resourceInformation = ret.getResourceInformation(i); + resourceInformation + .setValue(divideAndCeil(resourceInformation.getValue(), denominator)); + } + return ret; } @Override @@ -164,80 +425,132 @@ public Resource divideAndCeil(Resource numerator, float denominator) { @Override public Resource normalize(Resource r, Resource minimumResource, - Resource maximumResource, Resource stepFactor) { - if (stepFactor.getMemorySize() == 0 || stepFactor.getVirtualCores() == 0) { - Resource step = Resources.clone(stepFactor); - if (stepFactor.getMemorySize() == 0) { - LOG.error("Memory cannot be allocated in increments of zero. Assuming " - + minimumResource.getMemorySize() + "MB increment size. " - + "Please ensure the scheduler configuration is correct."); - step.setMemorySize(minimumResource.getMemorySize()); - } + Resource maximumResource, Resource stepFactor) { + Resource ret = Resource.newInstance(r); + for (int i = 0; i < maxLength; i++) { + ResourceInformation rResourceInformation = r.getResourceInformation(i); + ResourceInformation minimumResourceInformation = minimumResource + .getResourceInformation(i); + ResourceInformation maximumResourceInformation = maximumResource + .getResourceInformation(i); + ResourceInformation stepFactorResourceInformation = stepFactor + .getResourceInformation(i); + ResourceInformation tmp = ret.getResourceInformation(i); - if (stepFactor.getVirtualCores() == 0) { - LOG.error("VCore cannot be allocated in increments of zero. Assuming " - + minimumResource.getVirtualCores() + "VCores increment size. " - + "Please ensure the scheduler configuration is correct."); - step.setVirtualCores(minimumResource.getVirtualCores()); + long rValue = rResourceInformation.getValue(); + long minimumValue = UnitsConversionUtil.convert( + minimumResourceInformation.getUnits(), + rResourceInformation.getUnits(), + minimumResourceInformation.getValue()); + long maximumValue = UnitsConversionUtil.convert( + maximumResourceInformation.getUnits(), + rResourceInformation.getUnits(), + maximumResourceInformation.getValue()); + long stepFactorValue = UnitsConversionUtil.convert( + stepFactorResourceInformation.getUnits(), + rResourceInformation.getUnits(), + stepFactorResourceInformation.getValue()); + long value = Math.max(rValue, minimumValue); + if (stepFactorValue != 0) { + value = roundUp(value, stepFactorValue); } - - stepFactor = step; + tmp.setValue(Math.min(value, maximumValue)); + ret.setResourceInformation(i, tmp); } - - long normalizedMemory = Math.min( - roundUp( - Math.max(r.getMemorySize(), minimumResource.getMemorySize()), - stepFactor.getMemorySize()), - maximumResource.getMemorySize()); - int normalizedCores = Math.min( - roundUp( - Math.max(r.getVirtualCores(), minimumResource.getVirtualCores()), - stepFactor.getVirtualCores()), - maximumResource.getVirtualCores()); - return Resources.createResource(normalizedMemory, - normalizedCores); + return ret; } @Override public Resource roundUp(Resource r, Resource stepFactor) { - return Resources.createResource( - roundUp(r.getMemorySize(), stepFactor.getMemorySize()), - roundUp(r.getVirtualCores(), stepFactor.getVirtualCores()) - ); + return this.rounding(r, stepFactor, true); } @Override public Resource roundDown(Resource r, Resource stepFactor) { - return Resources.createResource( - roundDown(r.getMemorySize(), stepFactor.getMemorySize()), - roundDown(r.getVirtualCores(), stepFactor.getVirtualCores()) - ); + return this.rounding(r, stepFactor, false); + } + + private Resource rounding(Resource r, Resource stepFactor, boolean roundUp) { + Resource ret = Resource.newInstance(r); + for (int i = 0; i < maxLength; i++) { + ResourceInformation rResourceInformation = r.getResourceInformation(i); + ResourceInformation stepFactorResourceInformation = stepFactor + .getResourceInformation(i); + + long rValue = rResourceInformation.getValue(); + long stepFactorValue = UnitsConversionUtil.convert( + stepFactorResourceInformation.getUnits(), + rResourceInformation.getUnits(), + stepFactorResourceInformation.getValue()); + long value = rValue; + if (stepFactorValue != 0) { + value = roundUp + ? roundUp(rValue, stepFactorValue) + : roundDown(rValue, stepFactorValue); + } + ResourceInformation.copy(rResourceInformation, + ret.getResourceInformation(i)); + ret.getResourceInformation(i).setValue(value); + } + return ret; } @Override public Resource multiplyAndNormalizeUp(Resource r, double by, Resource stepFactor) { - return Resources.createResource( - roundUp((long) Math.ceil((float) (r.getMemorySize() * by)), - stepFactor.getMemorySize()), - roundUp((int) Math.ceil((float) (r.getVirtualCores() * by)), - stepFactor.getVirtualCores())); + return this.multiplyAndNormalize(r, by, stepFactor, true); } @Override public Resource multiplyAndNormalizeDown(Resource r, double by, Resource stepFactor) { - return Resources.createResource( - roundDown((long) (r.getMemorySize() * by), stepFactor.getMemorySize()), - roundDown((int) (r.getVirtualCores() * by), - stepFactor.getVirtualCores())); + return this.multiplyAndNormalize(r, by, stepFactor, false); + } + + private Resource multiplyAndNormalize(Resource r, double by, + Resource stepFactor, boolean roundUp) { + Resource ret = Resource.newInstance(r); + for (int i = 0; i < maxLength; i++) { + ResourceInformation rResourceInformation = r.getResourceInformation(i); + ResourceInformation stepFactorResourceInformation = stepFactor + .getResourceInformation(i); + ResourceInformation tmp = ret.getResourceInformation(i); + + long rValue = rResourceInformation.getValue(); + long stepFactorValue = UnitsConversionUtil.convert( + stepFactorResourceInformation.getUnits(), + rResourceInformation.getUnits(), + stepFactorResourceInformation.getValue()); + long value; + if (stepFactorValue != 0) { + value = roundUp + ? roundUp((long) Math.ceil((float) (rValue * by)), stepFactorValue) + : roundDown((long) (rValue * by), stepFactorValue); + } else { + value = roundUp + ? (long) Math.ceil((float) (rValue * by)) + : (long) (rValue * by); + } + tmp.setValue(value); + } + return ret; } @Override - public boolean fitsIn(Resource cluster, - Resource smaller, Resource bigger) { - return smaller.getMemorySize() <= bigger.getMemorySize() - && smaller.getVirtualCores() <= bigger.getVirtualCores(); + public boolean fitsIn(Resource cluster, Resource smaller, Resource bigger) { + for (int i = 0; i < maxLength; i++) { + ResourceInformation sResourceInformation = smaller + .getResourceInformation(i); + ResourceInformation bResourceInformation = bigger + .getResourceInformation(i); + long sResourceValue = UnitsConversionUtil.convert( + sResourceInformation.getUnits(), bResourceInformation.getUnits(), + sResourceInformation.getValue()); + if (sResourceValue > bResourceInformation.getValue()) { + return false; + } + } + return true; } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java index a1d14fdce73..874fd8e2665 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java @@ -18,104 +18,122 @@ package org.apache.hadoop.yarn.util.resource; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.api.records.impl.BaseResource; +import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException; +import org.apache.hadoop.yarn.util.UnitsConversionUtil; -@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) +import java.util.Arrays; + +/** + * Resources is a computation class which provides a set of apis to do + * mathematical operations on Resource object. + */ +@InterfaceAudience.LimitedPrivate({ "YARN", "MapReduce" }) @Unstable public class Resources { - - // Java doesn't have const :( - private static final Resource NONE = new Resource() { - @Override - @SuppressWarnings("deprecation") - public int getMemory() { - return 0; - } - - @Override - public long getMemorySize() { - return 0; - } - - @Override - public void setMemorySize(long memory) { - throw new RuntimeException("NONE cannot be modified!"); - } + private static final Log LOG = + LogFactory.getLog(Resources.class); - @Override - @SuppressWarnings("deprecation") - public void setMemory(int memory) { - throw new RuntimeException("NONE cannot be modified!"); - } - @Override - public int getVirtualCores() { - return 0; - } + // Cache the #known-resource-types so that we don't have to access it everytime. + private final static int maxLength = ResourceUtils.getNumberOfKnownResourceTypes(); - @Override - public void setVirtualCores(int cores) { - throw new RuntimeException("NONE cannot be modified!"); + /** + * Helper class to create a resource with a fixed value for all resource + * types. For example, a NONE resource which returns 0 for any resource type. + */ + static class FixedValueResource extends BaseResource { + + private long resourceValue; + private String name; + + /** + * Constructor for a fixed value resource. + * @param rName the name of the resource + * @param value the fixed value to be returned for all resource types + */ + FixedValueResource(String rName, long value) { + this.resourceValue = value; + this.name = rName; + initResourceMap(); } - @Override - public int compareTo(Resource o) { - long diff = 0 - o.getMemorySize(); - if (diff == 0) { - diff = 0 - o.getVirtualCores(); + private int resourceValueToInt() { + if(this.resourceValue > Integer.MAX_VALUE) { + return Integer.MAX_VALUE; } - return Long.signum(diff); + return Long.valueOf(this.resourceValue).intValue(); } - - }; - - private static final Resource UNBOUNDED = new Resource() { @Override @SuppressWarnings("deprecation") public int getMemory() { - return Integer.MAX_VALUE; + return resourceValueToInt(); } @Override public long getMemorySize() { - return Long.MAX_VALUE; + return this.resourceValue; } @Override @SuppressWarnings("deprecation") public void setMemory(int memory) { - throw new RuntimeException("UNBOUNDED cannot be modified!"); + throw new RuntimeException(name + " cannot be modified!"); } @Override public void setMemorySize(long memory) { - throw new RuntimeException("UNBOUNDED cannot be modified!"); + throw new RuntimeException(name + " cannot be modified!"); } @Override public int getVirtualCores() { - return Integer.MAX_VALUE; + return resourceValueToInt(); + } + + @Override + public void setVirtualCores(int virtualCores) { + throw new RuntimeException(name + " cannot be modified!"); } @Override - public void setVirtualCores(int cores) { - throw new RuntimeException("UNBOUNDED cannot be modified!"); + public void setResourceInformation(String resource, + ResourceInformation resourceInformation) + throws ResourceNotFoundException { + throw new RuntimeException(name + " cannot be modified!"); } @Override - public int compareTo(Resource o) { - long diff = Long.MAX_VALUE - o.getMemorySize(); - if (diff == 0) { - diff = Integer.MAX_VALUE - o.getVirtualCores(); + public void setResourceValue(String resource, long value) + throws ResourceNotFoundException { + throw new RuntimeException(name + " cannot be modified!"); + } + + private void initResourceMap() { + ResourceInformation[] types = ResourceUtils.getResourceTypesArray(); + if (types != null) { + resources = new ResourceInformation[types.length]; + for (int index = 0; index < types.length; index++) { + resources[index] = ResourceInformation.newInstance(types[index]); + resources[index].setValue(resourceValue); + + // this is a fix for getVirtualCores returning an int + if (resourceValue > Integer.MAX_VALUE && ResourceInformation.VCORES + .getName().equals(resources[index].getName())) { + resources[index].setValue((long) Integer.MAX_VALUE); + } + } } - return Long.signum(diff); } - - }; + } public static Resource createResource(int memory) { return createResource(memory, (memory > 0) ? 1 : 0); @@ -125,6 +143,11 @@ public static Resource createResource(int memory, int cores) { return Resource.newInstance(memory, cores); } + private static final Resource UNBOUNDED = + new FixedValueResource("UNBOUNDED", Long.MAX_VALUE); + + private static final Resource NONE = new FixedValueResource("NONE", 0L); + public static Resource createResource(long memory) { return createResource(memory, (memory > 0) ? 1 : 0); } @@ -152,12 +175,25 @@ public static Resource unbounded() { } public static Resource clone(Resource res) { - return createResource(res.getMemorySize(), res.getVirtualCores()); + return Resource.newInstance(res); } public static Resource addTo(Resource lhs, Resource rhs) { - lhs.setMemorySize(lhs.getMemorySize() + rhs.getMemorySize()); - lhs.setVirtualCores(lhs.getVirtualCores() + rhs.getVirtualCores()); + for (int i = 0; i < maxLength; i++) { + try { + ResourceInformation rhsValue = rhs.getResourceInformation(i); + ResourceInformation lhsValue = lhs.getResourceInformation(i); + + long convertedRhs = (rhsValue.getUnits().equals(lhsValue.getUnits())) + ? rhsValue.getValue() + : UnitsConversionUtil.convert(rhsValue.getUnits(), + lhsValue.getUnits(), rhsValue.getValue()); + lhs.setResourceValue(i, lhsValue.getValue() + convertedRhs); + } catch (ResourceNotFoundException ye) { + LOG.warn("Resource is missing:" + ye.getMessage()); + continue; + } + } return lhs; } @@ -166,8 +202,21 @@ public static Resource add(Resource lhs, Resource rhs) { } public static Resource subtractFrom(Resource lhs, Resource rhs) { - lhs.setMemorySize(lhs.getMemorySize() - rhs.getMemorySize()); - lhs.setVirtualCores(lhs.getVirtualCores() - rhs.getVirtualCores()); + for (int i = 0; i < maxLength; i++) { + try { + ResourceInformation rhsValue = rhs.getResourceInformation(i); + ResourceInformation lhsValue = lhs.getResourceInformation(i); + + long convertedRhs = (rhsValue.getUnits().equals(lhsValue.getUnits())) + ? rhsValue.getValue() + : UnitsConversionUtil.convert(rhsValue.getUnits(), + lhsValue.getUnits(), rhsValue.getValue()); + lhs.setResourceValue(i, lhsValue.getValue() - convertedRhs); + } catch (ResourceNotFoundException ye) { + LOG.warn("Resource is missing:" + ye.getMessage()); + continue; + } + } return lhs; } @@ -198,8 +247,15 @@ public static Resource negate(Resource resource) { } public static Resource multiplyTo(Resource lhs, double by) { - lhs.setMemorySize((long)(lhs.getMemorySize() * by)); - lhs.setVirtualCores((int)(lhs.getVirtualCores() * by)); + for (int i = 0; i < maxLength; i++) { + try { + ResourceInformation lhsValue = lhs.getResourceInformation(i); + lhs.setResourceValue(i, (long) (lhsValue.getValue() * by)); + } catch (ResourceNotFoundException ye) { + LOG.warn("Resource is missing:" + ye.getMessage()); + continue; + } + } return lhs; } @@ -213,9 +269,23 @@ public static Resource multiply(Resource lhs, double by) { */ public static Resource multiplyAndAddTo( Resource lhs, Resource rhs, double by) { - lhs.setMemorySize(lhs.getMemorySize() + (long)(rhs.getMemorySize() * by)); - lhs.setVirtualCores(lhs.getVirtualCores() - + (int)(rhs.getVirtualCores() * by)); + for (int i = 0; i < maxLength; i++) { + try { + ResourceInformation rhsValue = rhs.getResourceInformation(i); + ResourceInformation lhsValue = lhs.getResourceInformation(i); + + long convertedRhs = (long) (((rhsValue.getUnits() + .equals(lhsValue.getUnits())) + ? rhsValue.getValue() + : UnitsConversionUtil.convert(rhsValue.getUnits(), + lhsValue.getUnits(), rhsValue.getValue())) + * by); + lhs.setResourceValue(i, lhsValue.getValue() + convertedRhs); + } catch (ResourceNotFoundException ye) { + LOG.warn("Resource is missing:" + ye.getMessage()); + continue; + } + } return lhs; } @@ -231,8 +301,15 @@ public static Resource multiplyAndNormalizeDown( public static Resource multiplyAndRoundDown(Resource lhs, double by) { Resource out = clone(lhs); - out.setMemorySize((long)(lhs.getMemorySize() * by)); - out.setVirtualCores((int)(lhs.getVirtualCores() * by)); + for (int i = 0; i < maxLength; i++) { + try { + ResourceInformation lhsValue = lhs.getResourceInformation(i); + out.setResourceValue(i, (long) (lhsValue.getValue() * by)); + } catch (ResourceNotFoundException ye) { + LOG.warn("Resource is missing:" + ye.getMessage()); + continue; + } + } return out; } @@ -332,8 +409,24 @@ public static Resource max( } public static boolean fitsIn(Resource smaller, Resource bigger) { - return smaller.getMemorySize() <= bigger.getMemorySize() && - smaller.getVirtualCores() <= bigger.getVirtualCores(); + for (int i = 0; i < maxLength; i++) { + try { + ResourceInformation rhsValue = bigger.getResourceInformation(i); + ResourceInformation lhsValue = smaller.getResourceInformation(i); + + long convertedRhs = (rhsValue.getUnits().equals(lhsValue.getUnits())) + ? rhsValue.getValue() + : UnitsConversionUtil.convert(rhsValue.getUnits(), + lhsValue.getUnits(), rhsValue.getValue()); + if (lhsValue.getValue() > convertedRhs) { + return false; + } + } catch (ResourceNotFoundException ye) { + LOG.warn("Resource is missing:" + ye.getMessage()); + continue; + } + } + return true; } public static boolean fitsIn(ResourceCalculator rc, Resource cluster, @@ -342,13 +435,49 @@ public static boolean fitsIn(ResourceCalculator rc, Resource cluster, } public static Resource componentwiseMin(Resource lhs, Resource rhs) { - return createResource(Math.min(lhs.getMemorySize(), rhs.getMemorySize()), - Math.min(lhs.getVirtualCores(), rhs.getVirtualCores())); + Resource ret = createResource(0); + for (int i = 0; i < maxLength; i++) { + try { + ResourceInformation rhsValue = rhs.getResourceInformation(i); + ResourceInformation lhsValue = lhs.getResourceInformation(i); + + long convertedRhs = (rhsValue.getUnits().equals(lhsValue.getUnits())) + ? rhsValue.getValue() + : UnitsConversionUtil.convert(rhsValue.getUnits(), + lhsValue.getUnits(), rhsValue.getValue()); + ResourceInformation outInfo = lhsValue.getValue() < convertedRhs + ? lhsValue + : rhsValue; + ret.setResourceInformation(i, outInfo); + } catch (ResourceNotFoundException ye) { + LOG.warn("Resource is missing:" + ye.getMessage()); + continue; + } + } + return ret; } public static Resource componentwiseMax(Resource lhs, Resource rhs) { - return createResource(Math.max(lhs.getMemorySize(), rhs.getMemorySize()), - Math.max(lhs.getVirtualCores(), rhs.getVirtualCores())); + Resource ret = createResource(0); + for (int i = 0; i < maxLength; i++) { + try { + ResourceInformation rhsValue = rhs.getResourceInformation(i); + ResourceInformation lhsValue = lhs.getResourceInformation(i); + + long convertedRhs = (rhsValue.getUnits().equals(lhsValue.getUnits())) + ? rhsValue.getValue() + : UnitsConversionUtil.convert(rhsValue.getUnits(), + lhsValue.getUnits(), rhsValue.getValue()); + ResourceInformation outInfo = lhsValue.getValue() > convertedRhs + ? lhsValue + : rhsValue; + ret.setResourceInformation(i, outInfo); + } catch (ResourceNotFoundException ye) { + LOG.warn("Resource is missing:" + ye.getMessage()); + continue; + } + } + return ret; } public static boolean isAnyMajorResourceZero(ResourceCalculator rc, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 17d04965dda..15d6ebc9078 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -944,6 +944,22 @@ 600000 + + + Flag to enable/disable resource profiles + + yarn.resourcemanager.resource-profiles.enabled + false + + + + + If resource profiles is enabled, source file for the profiles + + yarn.resourcemanager.resource-profiles.source-file + resource-profiles.json + + @@ -3289,4 +3305,14 @@ false + + + + yarn.resource-types + + + The resource types to be used for scheduling. Use resource-types.xml + to specify details about the individual resource types. + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java index 82170b31342..86946518db3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java @@ -37,6 +37,9 @@ @SuppressWarnings("checkstyle:visibilitymodifier") protected static HashMap typeValueCache = new HashMap(); + @SuppressWarnings("checkstyle:visibilitymodifier") + protected static HashMap> excludedPropertiesMap = + new HashMap<>(); private static Random rand = new Random(); private static byte [] bytes = new byte[] {'1', '2', '3', '4'}; @@ -167,6 +170,10 @@ public String toString() { private Map getGetSetPairs(Class recordClass) throws Exception { Map ret = new HashMap(); + List excluded = null; + if (excludedPropertiesMap.containsKey(recordClass.getClass())) { + excluded = excludedPropertiesMap.get(recordClass.getClass()); + } Method [] methods = recordClass.getDeclaredMethods(); // get all get methods for (int i = 0; i < methods.length; i++) { @@ -224,6 +231,11 @@ public String toString() { (gsp.setMethod == null)) { LOG.info(String.format("Exclude potential property: %s\n", gsp.propertyName)); itr.remove(); + } else if ((excluded != null && excluded.contains(gsp.propertyName))) { + LOG.info(String.format( + "Excluding potential property(present in exclusion list): %s\n", + gsp.propertyName)); + itr.remove(); } else { LOG.info(String.format("New property: %s type: %s", gsp.toString(), gsp.type)); gsp.testValue = genTypeValue(gsp.type); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index a3f5491cdc0..c5585c28b21 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -42,6 +42,9 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceProfilesResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsRequestPBImpl; @@ -74,6 +77,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueInfoResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetQueueUserAclsInfoResponsePBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetResourceProfileRequestPBImpl; +import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetResourceProfileResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.IncreaseContainersResourceResponsePBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.KillApplicationRequestPBImpl; @@ -127,6 +132,7 @@ import org.apache.hadoop.yarn.api.records.PreemptionMessage; import org.apache.hadoop.yarn.api.records.PreemptionResourceRequest; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import org.apache.hadoop.yarn.api.records.QueueConfigurations; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueState; @@ -139,9 +145,11 @@ import org.apache.hadoop.yarn.api.records.ReservationRequests; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceAllocationRequest; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; import org.apache.hadoop.yarn.api.records.ResourceUtilization; import org.apache.hadoop.yarn.api.records.SerializedException; import org.apache.hadoop.yarn.api.records.StrictPreemptionContract; @@ -174,18 +182,21 @@ import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionMessagePBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.PreemptionResourceRequestPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.PriorityPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ProfileCapabilityPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.QueueUserACLInfoPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ResourceBlacklistRequestPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ResourceOptionPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ResourceTypeInfoPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.StrictPreemptionContractPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.URLPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.UpdateContainerRequestPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.YarnClusterMetricsPBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptReportProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; @@ -301,6 +312,10 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceProfilesResponseProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileRequestProto; +import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetResourceProfileResponseProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ProfileCapabilityProto; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl; @@ -332,6 +347,7 @@ import org.junit.Test; import com.google.common.collect.ImmutableSet; +import java.util.Arrays; /** * Test class for YARN API protocol records. @@ -346,6 +362,8 @@ public static void setup() throws Exception { typeValueCache.put(SerializedException.class, SerializedException.newInstance(new IOException("exception for test"))); generateByNewInstance(ExecutionTypeRequest.class); + typeValueCache.put(ResourceInformation.class, ResourceInformation + .newInstance("localhost.test/sample", 1l)); generateByNewInstance(LogAggregationContext.class); generateByNewInstance(ApplicationId.class); generateByNewInstance(ApplicationAttemptId.class); @@ -359,6 +377,7 @@ public static void setup() throws Exception { generateByNewInstance(NodeReport.class); generateByNewInstance(Token.class); generateByNewInstance(NMToken.class); + generateByNewInstance(ProfileCapability.class); generateByNewInstance(ResourceRequest.class); generateByNewInstance(ApplicationAttemptReport.class); generateByNewInstance(ApplicationResourceUsageReport.class); @@ -408,6 +427,7 @@ public static void setup() throws Exception { generateByNewInstance(ApplicationTimeout.class); generateByNewInstance(QueueConfigurations.class); generateByNewInstance(CollectorInfo.class); + generateByNewInstance(ResourceTypeInfo.class); } @Test @@ -731,6 +751,8 @@ public void testApplicationReportPBImpl() throws Exception { @Test public void testApplicationResourceUsageReportPBImpl() throws Exception { + excludedPropertiesMap.put(ApplicationResourceUsageReportPBImpl.class.getClass(), + Arrays.asList("PreemptedResourceSecondsMap", "ResourceSecondsMap")); validatePBImplRecord(ApplicationResourceUsageReportPBImpl.class, ApplicationResourceUsageReportProto.class); } @@ -1153,4 +1175,46 @@ public void testExecutionTypeRequestPBImpl() throws Exception { validatePBImplRecord(ExecutionTypeRequestPBImpl.class, ExecutionTypeRequestProto.class); } + + @Test + public void testGetAllResourceProfilesResponsePBImpl() throws Exception { + validatePBImplRecord(GetAllResourceProfilesResponsePBImpl.class, + GetAllResourceProfilesResponseProto.class); + } + + @Test + public void testGetResourceProfileRequestPBImpl() throws Exception { + validatePBImplRecord(GetResourceProfileRequestPBImpl.class, + GetResourceProfileRequestProto.class); + } + + @Test + public void testGetResourceProfileResponsePBImpl() throws Exception { + validatePBImplRecord(GetResourceProfileResponsePBImpl.class, + GetResourceProfileResponseProto.class); + } + + @Test + public void testProfileCapabilityPBImpl() throws Exception { + validatePBImplRecord(ProfileCapabilityPBImpl.class, + ProfileCapabilityProto.class); + } + + @Test + public void testResourceTypesInfoPBImpl() throws Exception { + validatePBImplRecord(ResourceTypeInfoPBImpl.class, + YarnProtos.ResourceTypeInfoProto.class); + } + + @Test + public void testGetAllResourceTypesInfoRequestPBImpl() throws Exception { + validatePBImplRecord(GetAllResourceTypeInfoRequestPBImpl.class, + YarnServiceProtos.GetAllResourceTypeInfoRequestProto.class); + } + + @Test + public void testGetAllResourceTypesInfoResponsePBImpl() throws Exception { + validatePBImplRecord(GetAllResourceTypeInfoResponsePBImpl.class, + YarnServiceProtos.GetAllResourceTypeInfoResponseProto.class); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestResourcePBImpl.java new file mode 100644 index 00000000000..569a7b74f8b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestResourcePBImpl.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api; + +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl; +import org.apache.hadoop.yarn.proto.YarnProtos; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test class to handle various proto related tests for resources. + */ +public class TestResourcePBImpl { + @Test + public void testEmptyResourcePBInit() throws Exception { + Resource res = new ResourcePBImpl(); + // Assert to check it sets resource value and unit to default. + Assert.assertEquals(0, res.getMemorySize()); + Assert.assertEquals(ResourceInformation.MEMORY_MB.getUnits(), + res.getResourceInformation(ResourceInformation.MEMORY_MB.getName()) + .getUnits()); + Assert.assertEquals(ResourceInformation.VCORES.getUnits(), + res.getResourceInformation(ResourceInformation.VCORES.getName()) + .getUnits()); + } + + @Test + public void testResourcePBInitFromOldPB() throws Exception { + YarnProtos.ResourceProto proto = + YarnProtos.ResourceProto.newBuilder().setMemory(1024).setVirtualCores(3) + .build(); + // Assert to check it sets resource value and unit to default. + Resource res = new ResourcePBImpl(proto); + Assert.assertEquals(1024, res.getMemorySize()); + Assert.assertEquals(3, res.getVirtualCores()); + Assert.assertEquals(ResourceInformation.MEMORY_MB.getUnits(), + res.getResourceInformation(ResourceInformation.MEMORY_MB.getName()) + .getUnits()); + Assert.assertEquals(ResourceInformation.VCORES.getUnits(), + res.getResourceInformation(ResourceInformation.VCORES.getName()) + .getUnits()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java index b123b0520d4..5b4155cf845 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java @@ -21,15 +21,21 @@ import java.util.Arrays; import java.util.Collection; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; -import org.junit.Assert; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + @RunWith(Parameterized.class) public class TestResourceCalculator { - private ResourceCalculator resourceCalculator; + private final ResourceCalculator resourceCalculator; @Parameterized.Parameters public static Collection getParameters() { @@ -38,6 +44,20 @@ { new DominantResourceCalculator() } }); } + @Before + public void setupNoExtraResource() { + // This has to run before each test because we don't know when + // setupExtraResource() might be called + ResourceUtils.resetResourceTypes(new Configuration()); + } + + private static void setupExtraResource() { + Configuration conf = new Configuration(); + + conf.set(YarnConfiguration.RESOURCE_TYPES, "test"); + ResourceUtils.resetResourceTypes(conf); + } + public TestResourceCalculator(ResourceCalculator rs) { this.resourceCalculator = rs; } @@ -47,32 +67,177 @@ public void testFitsIn() { Resource cluster = Resource.newInstance(1024, 1); if (resourceCalculator instanceof DefaultResourceCalculator) { - Assert.assertTrue(resourceCalculator.fitsIn(cluster, + assertTrue(resourceCalculator.fitsIn(cluster, Resource.newInstance(1, 2), Resource.newInstance(2, 1))); - Assert.assertTrue(resourceCalculator.fitsIn(cluster, + assertTrue(resourceCalculator.fitsIn(cluster, Resource.newInstance(1, 2), Resource.newInstance(2, 2))); - Assert.assertTrue(resourceCalculator.fitsIn(cluster, + assertTrue(resourceCalculator.fitsIn(cluster, Resource.newInstance(1, 2), Resource.newInstance(1, 2))); - Assert.assertTrue(resourceCalculator.fitsIn(cluster, + assertTrue(resourceCalculator.fitsIn(cluster, Resource.newInstance(1, 2), Resource.newInstance(1, 1))); - Assert.assertFalse(resourceCalculator.fitsIn(cluster, + assertFalse(resourceCalculator.fitsIn(cluster, Resource.newInstance(2, 1), Resource.newInstance(1, 2))); } else if (resourceCalculator instanceof DominantResourceCalculator) { - Assert.assertFalse(resourceCalculator.fitsIn(cluster, + assertFalse(resourceCalculator.fitsIn(cluster, Resource.newInstance(1, 2), Resource.newInstance(2, 1))); - Assert.assertTrue(resourceCalculator.fitsIn(cluster, + assertTrue(resourceCalculator.fitsIn(cluster, Resource.newInstance(1, 2), Resource.newInstance(2, 2))); - Assert.assertTrue(resourceCalculator.fitsIn(cluster, + assertTrue(resourceCalculator.fitsIn(cluster, Resource.newInstance(1, 2), Resource.newInstance(1, 2))); - Assert.assertFalse(resourceCalculator.fitsIn(cluster, + assertFalse(resourceCalculator.fitsIn(cluster, Resource.newInstance(1, 2), Resource.newInstance(1, 1))); - Assert.assertFalse(resourceCalculator.fitsIn(cluster, + assertFalse(resourceCalculator.fitsIn(cluster, Resource.newInstance(2, 1), Resource.newInstance(1, 2))); } } + private Resource newResource(long memory, int cpu) { + Resource res = Resource.newInstance(memory, cpu); + + return res; + } + + private Resource newResource(long memory, int cpu, int test) { + Resource res = newResource(memory, cpu); + + res.setResourceValue("test", test); + + return res; + } + + /** + * Test that the compare() method returns the expected result (0, -1, or 1). + * If the expected result is not 0, this method will also test the resources + * in the opposite order and check for the negative of the expected result. + * + * @param cluster the cluster resource + * @param res1 the LHS resource + * @param res2 the RHS resource + * @param expected the expected result + */ + private void assertComparison(Resource cluster, Resource res1, Resource res2, + int expected) { + int actual = resourceCalculator.compare(cluster, res1, res2); + + assertEquals(String.format("Resource comparison did not give the expected " + + "result for %s v/s %s", res1.toString(), res2.toString()), + expected, actual); + + if (expected != 0) { + // Try again with args in the opposite order and the negative of the + // expected result. + actual = resourceCalculator.compare(cluster, res2, res1); + assertEquals(String.format("Resource comparison did not give the " + + "expected result for %s v/s %s", res2.toString(), res1.toString()), + expected * -1, actual); + } + } + + @Test + public void testCompareWithOnlyMandatory() { + // This test is necessary because there are optimizations that are only + // triggered when only the mandatory resources are configured. + + // Keep cluster resources even so that the numbers are easy to understand + Resource cluster = newResource(4, 4); + + assertComparison(cluster, newResource(1, 1), newResource(1, 1), 0); + assertComparison(cluster, newResource(0, 0), newResource(0, 0), 0); + assertComparison(cluster, newResource(2, 2), newResource(1, 1), 1); + assertComparison(cluster, newResource(2, 2), newResource(0, 0), 1); + + if (resourceCalculator instanceof DefaultResourceCalculator) { + testCompareDefaultWithOnlyMandatory(cluster); + } else if (resourceCalculator instanceof DominantResourceCalculator) { + testCompareDominantWithOnlyMandatory(cluster); + } + } + + private void testCompareDefaultWithOnlyMandatory(Resource cluster) { + assertComparison(cluster, newResource(1, 1), newResource(1, 1), 0); + assertComparison(cluster, newResource(1, 2), newResource(1, 1), 0); + assertComparison(cluster, newResource(1, 1), newResource(1, 0), 0); + assertComparison(cluster, newResource(2, 1), newResource(1, 1), 1); + assertComparison(cluster, newResource(2, 1), newResource(1, 2), 1); + assertComparison(cluster, newResource(2, 1), newResource(1, 0), 1); + } + + private void testCompareDominantWithOnlyMandatory(Resource cluster) { + assertComparison(cluster, newResource(2, 1), newResource(2, 1), 0); + assertComparison(cluster, newResource(2, 1), newResource(1, 2), 0); + assertComparison(cluster, newResource(2, 1), newResource(1, 1), 1); + assertComparison(cluster, newResource(2, 2), newResource(2, 1), 1); + assertComparison(cluster, newResource(2, 2), newResource(1, 2), 1); + assertComparison(cluster, newResource(3, 1), newResource(3, 0), 1); + } + + @Test + public void testCompare() { + // Test with 3 resources + setupExtraResource(); + + // Keep cluster resources even so that the numbers are easy to understand + Resource cluster = newResource(4L, 4, 4); + + assertComparison(cluster, newResource(1, 1, 1), newResource(1, 1, 1), 0); + assertComparison(cluster, newResource(0, 0, 0), newResource(0, 0, 0), 0); + assertComparison(cluster, newResource(2, 2, 2), newResource(1, 1, 1), 1); + assertComparison(cluster, newResource(2, 2, 2), newResource(0, 0, 0), 1); + + if (resourceCalculator instanceof DefaultResourceCalculator) { + testCompareDefault(cluster); + } else if (resourceCalculator instanceof DominantResourceCalculator) { + testCompareDominant(cluster); + } + } + + private void testCompareDefault(Resource cluster) { + assertComparison(cluster, newResource(1, 1, 2), newResource(1, 1, 1), 0); + assertComparison(cluster, newResource(1, 2, 1), newResource(1, 1, 1), 0); + assertComparison(cluster, newResource(1, 2, 2), newResource(1, 1, 1), 0); + assertComparison(cluster, newResource(1, 2, 2), newResource(1, 0, 0), 0); + assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 1), 1); + assertComparison(cluster, newResource(2, 1, 1), newResource(1, 2, 1), 1); + assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 2), 1); + assertComparison(cluster, newResource(2, 1, 1), newResource(1, 2, 2), 1); + assertComparison(cluster, newResource(2, 1, 1), newResource(1, 0, 0), 1); + } + + private void testCompareDominant(Resource cluster) { + assertComparison(cluster, newResource(2, 1, 1), newResource(2, 1, 1), 0); + assertComparison(cluster, newResource(2, 1, 1), newResource(1, 2, 1), 0); + assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 2), 0); + assertComparison(cluster, newResource(2, 1, 0), newResource(0, 1, 2), 0); + assertComparison(cluster, newResource(2, 2, 1), newResource(1, 2, 2), 0); + assertComparison(cluster, newResource(2, 2, 1), newResource(2, 1, 2), 0); + assertComparison(cluster, newResource(2, 2, 1), newResource(2, 2, 1), 0); + assertComparison(cluster, newResource(2, 2, 0), newResource(2, 0, 2), 0); + assertComparison(cluster, newResource(3, 2, 1), newResource(3, 2, 1), 0); + assertComparison(cluster, newResource(3, 2, 1), newResource(3, 1, 2), 0); + assertComparison(cluster, newResource(3, 2, 1), newResource(1, 2, 3), 0); + assertComparison(cluster, newResource(3, 2, 1), newResource(1, 3, 2), 0); + assertComparison(cluster, newResource(3, 2, 1), newResource(2, 1, 3), 0); + assertComparison(cluster, newResource(3, 2, 1), newResource(2, 3, 1), 0); + assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 1), 1); + assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 0), 1); + assertComparison(cluster, newResource(2, 2, 1), newResource(2, 1, 1), 1); + assertComparison(cluster, newResource(2, 2, 1), newResource(1, 2, 1), 1); + assertComparison(cluster, newResource(2, 2, 1), newResource(1, 1, 2), 1); + assertComparison(cluster, newResource(2, 2, 1), newResource(0, 2, 2), 1); + assertComparison(cluster, newResource(2, 2, 2), newResource(2, 1, 1), 1); + assertComparison(cluster, newResource(2, 2, 2), newResource(1, 2, 1), 1); + assertComparison(cluster, newResource(2, 2, 2), newResource(1, 1, 2), 1); + assertComparison(cluster, newResource(2, 2, 2), newResource(2, 2, 1), 1); + assertComparison(cluster, newResource(2, 2, 2), newResource(2, 1, 2), 1); + assertComparison(cluster, newResource(2, 2, 2), newResource(1, 2, 2), 1); + assertComparison(cluster, newResource(3, 2, 1), newResource(2, 2, 2), 1); + assertComparison(cluster, newResource(3, 1, 1), newResource(2, 2, 2), 1); + assertComparison(cluster, newResource(3, 1, 1), newResource(3, 1, 0), 1); + assertComparison(cluster, newResource(3, 1, 1), newResource(3, 0, 0), 1); + } + @Test(timeout = 10000) - public void testResourceCalculatorCompareMethod() { + public void testCompareWithEmptyCluster() { Resource clusterResource = Resource.newInstance(0, 0); // For lhs == rhs @@ -126,27 +291,27 @@ private void assertResourcesOperations(Resource clusterResource, boolean greaterThan, boolean greaterThanOrEqual, Resource max, Resource min) { - Assert.assertEquals("Less Than operation is wrongly calculated.", lessThan, + assertEquals("Less Than operation is wrongly calculated.", lessThan, Resources.lessThan(resourceCalculator, clusterResource, lhs, rhs)); - Assert.assertEquals( + assertEquals( "Less Than Or Equal To operation is wrongly calculated.", lessThanOrEqual, Resources.lessThanOrEqual(resourceCalculator, clusterResource, lhs, rhs)); - Assert.assertEquals("Greater Than operation is wrongly calculated.", + assertEquals("Greater Than operation is wrongly calculated.", greaterThan, Resources.greaterThan(resourceCalculator, clusterResource, lhs, rhs)); - Assert.assertEquals( + assertEquals( "Greater Than Or Equal To operation is wrongly calculated.", greaterThanOrEqual, Resources.greaterThanOrEqual(resourceCalculator, clusterResource, lhs, rhs)); - Assert.assertEquals("Max(value) Operation wrongly calculated.", max, + assertEquals("Max(value) Operation wrongly calculated.", max, Resources.max(resourceCalculator, clusterResource, lhs, rhs)); - Assert.assertEquals("Min(value) operation is wrongly calculated.", min, + assertEquals("Min(value) operation is wrongly calculated.", min, Resources.min(resourceCalculator, clusterResource, lhs, rhs)); } @@ -164,13 +329,13 @@ public void testNormalize() { Resource result = Resources.normalize(resourceCalculator, ask, min, max, increment); - Assert.assertEquals(2 * 1024, result.getMemorySize()); + assertEquals(2 * 1024, result.getMemorySize()); } else if (resourceCalculator instanceof DominantResourceCalculator) { Resource result = Resources.normalize(resourceCalculator, ask, min, max, increment); - Assert.assertEquals(2 * 1024, result.getMemorySize()); - Assert.assertEquals(4, result.getVirtualCores()); + assertEquals(2 * 1024, result.getMemorySize()); + assertEquals(4, result.getVirtualCores()); } // if resources asked are less than minimum resource, then normalize it to @@ -183,13 +348,13 @@ public void testNormalize() { Resource result = Resources.normalize(resourceCalculator, ask, min, max, increment); - Assert.assertEquals(2 * 1024, result.getMemorySize()); + assertEquals(2 * 1024, result.getMemorySize()); } else if (resourceCalculator instanceof DominantResourceCalculator) { Resource result = Resources.normalize(resourceCalculator, ask, min, max, increment); - Assert.assertEquals(2 * 1024, result.getMemorySize()); - Assert.assertEquals(2, result.getVirtualCores()); + assertEquals(2 * 1024, result.getMemorySize()); + assertEquals(2, result.getVirtualCores()); } // if resources asked are larger than maximum resource, then normalize it to @@ -202,13 +367,13 @@ public void testNormalize() { Resource result = Resources.normalize(resourceCalculator, ask, min, max, increment); - Assert.assertEquals(8 * 1024, result.getMemorySize()); + assertEquals(8 * 1024, result.getMemorySize()); } else if (resourceCalculator instanceof DominantResourceCalculator) { Resource result = Resources.normalize(resourceCalculator, ask, min, max, increment); - Assert.assertEquals(8 * 1024, result.getMemorySize()); - Assert.assertEquals(8, result.getVirtualCores()); + assertEquals(8 * 1024, result.getMemorySize()); + assertEquals(8, result.getVirtualCores()); } // if increment is 0, use minimum resource as the increment resource. @@ -220,13 +385,13 @@ public void testNormalize() { Resource result = Resources.normalize(resourceCalculator, ask, min, max, increment); - Assert.assertEquals(2 * 1024, result.getMemorySize()); + assertEquals(2 * 1024, result.getMemorySize()); } else if (resourceCalculator instanceof DominantResourceCalculator) { Resource result = Resources.normalize(resourceCalculator, ask, min, max, increment); - Assert.assertEquals(2 * 1024, result.getMemorySize()); - Assert.assertEquals(2, result.getVirtualCores()); + assertEquals(2 * 1024, result.getMemorySize()); + assertEquals(2, result.getVirtualCores()); } } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java new file mode 100644 index 00000000000..a5550a70f22 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java @@ -0,0 +1,306 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.util.resource; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.util.HashMap; +import java.util.Map; + +/** + * Test class to verify all resource utility methods. + */ +public class TestResourceUtils { + + static class ResourceFileInformation { + String filename; + int resourceCount; + Map resourceNameUnitsMap; + + public ResourceFileInformation(String name, int count) { + filename = name; + resourceCount = count; + resourceNameUnitsMap = new HashMap<>(); + } + } + + @Before + public void setup() { + ResourceUtils.resetResourceTypes(); + } + + @After + public void teardown() { + Configuration conf = new YarnConfiguration(); + File source = new File( + conf.getClassLoader().getResource("resource-types-1.xml").getFile()); + File dest = new File(source.getParent(), "resource-types.xml"); + if (dest.exists()) { + dest.delete(); + } + } + + private void testMemoryAndVcores(Map res) { + String memory = ResourceInformation.MEMORY_MB.getName(); + String vcores = ResourceInformation.VCORES.getName(); + Assert.assertTrue("Resource 'memory' missing", res.containsKey(memory)); + Assert.assertEquals("'memory' units incorrect", + ResourceInformation.MEMORY_MB.getUnits(), res.get(memory).getUnits()); + Assert.assertEquals("'memory' types incorrect", + ResourceInformation.MEMORY_MB.getResourceType(), + res.get(memory).getResourceType()); + Assert.assertTrue("Resource 'vcores' missing", res.containsKey(vcores)); + Assert.assertEquals("'vcores' units incorrect", + ResourceInformation.VCORES.getUnits(), res.get(vcores).getUnits()); + Assert.assertEquals("'vcores' type incorrect", + ResourceInformation.VCORES.getResourceType(), + res.get(vcores).getResourceType()); + } + + @Test + public void testGetResourceTypes() throws Exception { + + Map res = ResourceUtils.getResourceTypes(); + Assert.assertEquals(2, res.size()); + testMemoryAndVcores(res); + } + + @Test + public void testGetResourceTypesConfigs() throws Exception { + + Configuration conf = new YarnConfiguration(); + + ResourceFileInformation testFile1 = + new ResourceFileInformation("resource-types-1.xml", 2); + ResourceFileInformation testFile2 = + new ResourceFileInformation("resource-types-2.xml", 3); + testFile2.resourceNameUnitsMap.put("resource1", "G"); + ResourceFileInformation testFile3 = + new ResourceFileInformation("resource-types-3.xml", 3); + testFile3.resourceNameUnitsMap.put("resource2", ""); + ResourceFileInformation testFile4 = + new ResourceFileInformation("resource-types-4.xml", 4); + testFile4.resourceNameUnitsMap.put("resource1", "G"); + testFile4.resourceNameUnitsMap.put("resource2", "m"); + + ResourceFileInformation[] tests = {testFile1, testFile2, testFile3, + testFile4}; + Map res; + for (ResourceFileInformation testInformation : tests) { + ResourceUtils.resetResourceTypes(); + File source = new File( + conf.getClassLoader().getResource(testInformation.filename) + .getFile()); + File dest = new File(source.getParent(), "resource-types.xml"); + FileUtils.copyFile(source, dest); + res = ResourceUtils.getResourceTypes(); + testMemoryAndVcores(res); + Assert.assertEquals(testInformation.resourceCount, res.size()); + for (Map.Entry entry : testInformation.resourceNameUnitsMap + .entrySet()) { + String resourceName = entry.getKey(); + Assert.assertTrue("Missing key " + resourceName, + res.containsKey(resourceName)); + Assert.assertEquals(entry.getValue(), res.get(resourceName).getUnits()); + } + dest.delete(); + } + } + + @Test + public void testGetResourceTypesConfigErrors() throws Exception { + Configuration conf = new YarnConfiguration(); + + String[] resourceFiles = {"resource-types-error-1.xml", + "resource-types-error-2.xml", "resource-types-error-3.xml", + "resource-types-error-4.xml"}; + for (String resourceFile : resourceFiles) { + ResourceUtils.resetResourceTypes(); + File dest = null; + try { + File source = + new File(conf.getClassLoader().getResource(resourceFile).getFile()); + dest = new File(source.getParent(), "resource-types.xml"); + FileUtils.copyFile(source, dest); + ResourceUtils.getResourceTypes(); + Assert.fail("Expected error with file " + resourceFile); + } catch (NullPointerException ne) { + throw ne; + } catch (Exception e) { + if (dest != null) { + dest.delete(); + } + } + } + } + + @Test + public void testInitializeResourcesMap() throws Exception { + String[] empty = {"", ""}; + String[] res1 = {"resource1", "m"}; + String[] res2 = {"resource2", "G"}; + String[][] test1 = {empty}; + String[][] test2 = {res1}; + String[][] test3 = {res2}; + String[][] test4 = {res1, res2}; + + String[][][] allTests = {test1, test2, test3, test4}; + + for (String[][] test : allTests) { + + Configuration conf = new YarnConfiguration(); + String resSt = ""; + for (String[] resources : test) { + resSt += (resources[0] + ","); + } + resSt = resSt.substring(0, resSt.length() - 1); + conf.set(YarnConfiguration.RESOURCE_TYPES, resSt); + for (String[] resources : test) { + String name = + YarnConfiguration.RESOURCE_TYPES + "." + resources[0] + ".units"; + conf.set(name, resources[1]); + } + Map ret = + ResourceUtils.resetResourceTypes(conf); + + // for test1, 4 - length will be 1, 4 + // for the others, len will be 3 + int len = 3; + if (test == test1) { + len = 2; + } else if (test == test4) { + len = 4; + } + + Assert.assertEquals(len, ret.size()); + for (String[] resources : test) { + if (resources[0].length() == 0) { + continue; + } + Assert.assertTrue(ret.containsKey(resources[0])); + ResourceInformation resInfo = ret.get(resources[0]); + Assert.assertEquals(resources[1], resInfo.getUnits()); + Assert.assertEquals(ResourceTypes.COUNTABLE, resInfo.getResourceType()); + } + // we must always have memory and vcores with their fixed units + Assert.assertTrue(ret.containsKey("memory-mb")); + ResourceInformation memInfo = ret.get("memory-mb"); + Assert.assertEquals("Mi", memInfo.getUnits()); + Assert.assertEquals(ResourceTypes.COUNTABLE, memInfo.getResourceType()); + Assert.assertTrue(ret.containsKey("vcores")); + ResourceInformation vcoresInfo = ret.get("vcores"); + Assert.assertEquals("", vcoresInfo.getUnits()); + Assert + .assertEquals(ResourceTypes.COUNTABLE, vcoresInfo.getResourceType()); + } + } + + @Test + public void testInitializeResourcesMapErrors() throws Exception { + + String[] mem1 = {"memory-mb", ""}; + String[] vcores1 = {"vcores", "M"}; + + String[] mem2 = {"memory-mb", "m"}; + String[] vcores2 = {"vcores", "G"}; + + String[] mem3 = {"memory", ""}; + + String[][] test1 = {mem1, vcores1}; + String[][] test2 = {mem2, vcores2}; + String[][] test3 = {mem3}; + + String[][][] allTests = {test1, test2, test3}; + + for (String[][] test : allTests) { + + Configuration conf = new YarnConfiguration(); + String resSt = ""; + for (String[] resources : test) { + resSt += (resources[0] + ","); + } + resSt = resSt.substring(0, resSt.length() - 1); + conf.set(YarnConfiguration.RESOURCE_TYPES, resSt); + for (String[] resources : test) { + String name = + YarnConfiguration.RESOURCE_TYPES + "." + resources[0] + ".units"; + conf.set(name, resources[1]); + } + try { + ResourceUtils.initializeResourcesMap(conf); + Assert.fail("resource map initialization should fail"); + } catch (Exception e) { + // do nothing + } + } + } + + @Test + public void testGetResourceInformation() throws Exception { + + Configuration conf = new YarnConfiguration(); + Map testRun = new HashMap<>(); + setupResourceTypes(conf, "resource-types-4.xml"); + // testRun.put("node-resources-1.xml", Resource.newInstance(1024, 1)); + Resource test3Resources = Resource.newInstance(1024, 1); + test3Resources.setResourceInformation("resource1", + ResourceInformation.newInstance("resource1", "Gi", 5L)); + test3Resources.setResourceInformation("resource2", + ResourceInformation.newInstance("resource2", "m", 2L)); + testRun.put("node-resources-2.xml", test3Resources); + + for (Map.Entry entry : testRun.entrySet()) { + String resourceFile = entry.getKey(); + ResourceUtils.resetNodeResources(); + File dest; + File source = new File( + conf.getClassLoader().getResource(resourceFile).getFile()); + dest = new File(source.getParent(), "node-resources.xml"); + FileUtils.copyFile(source, dest); + Map actual = ResourceUtils + .getNodeResourceInformation(conf); + Assert.assertEquals(actual.size(), + entry.getValue().getResources().length); + for (ResourceInformation resInfo : entry.getValue().getResources()) { + Assert.assertEquals(resInfo, actual.get(resInfo.getName())); + } + dest.delete(); + } + } + + public static String setupResourceTypes(Configuration conf, String filename) + throws Exception { + File source = new File( + conf.getClassLoader().getResource(filename).getFile()); + File dest = new File(source.getParent(), "resource-types.xml"); + FileUtils.copyFile(source, dest); + ResourceUtils.getResourceTypes(); + return dest.getAbsolutePath(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java index d79179ac0d9..a8404fbaee7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java @@ -18,35 +18,102 @@ package org.apache.hadoop.yarn.util.resource; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.junit.After; +import org.junit.Before; import org.junit.Test; +import java.io.File; + +import static org.apache.hadoop.yarn.util.resource.Resources.componentwiseMin; +import static org.apache.hadoop.yarn.util.resource.Resources.componentwiseMax; +import static org.apache.hadoop.yarn.util.resource.Resources.add; +import static org.apache.hadoop.yarn.util.resource.Resources.subtract; +import static org.apache.hadoop.yarn.util.resource.Resources.multiply; +import static org.apache.hadoop.yarn.util.resource.Resources.multiplyAndAddTo; +import static org.apache.hadoop.yarn.util.resource.Resources.multiplyAndRoundDown; +import static org.apache.hadoop.yarn.util.resource.Resources.fitsIn; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; public class TestResources { - + + static class ExtendedResources extends Resources { + public static Resource unbounded() { + return new FixedValueResource("UNBOUNDED", Long.MAX_VALUE); + } + + public static Resource none() { + return new FixedValueResource("NONE", 0L); + } + } + + private static final String EXTRA_RESOURCE_TYPE = "resource2"; + private String resourceTypesFile; + + private void setupExtraResourceType() throws Exception { + Configuration conf = new YarnConfiguration(); + resourceTypesFile = + TestResourceUtils.setupResourceTypes(conf, "resource-types-3.xml"); + } + + private void unsetExtraResourceType() { + deleteResourceTypesFile(); + ResourceUtils.resetResourceTypes(); + } + + private void deleteResourceTypesFile() { + if (resourceTypesFile != null && !resourceTypesFile.isEmpty()) { + File resourceFile = new File(resourceTypesFile); + resourceFile.delete(); + } + } + + @Before + public void setup() throws Exception { + setupExtraResourceType(); + } + + @After + public void teardown() { + deleteResourceTypesFile(); + } + public Resource createResource(long memory, int vCores) { return Resource.newInstance(memory, vCores); } - @Test(timeout=10000) + public Resource createResource(long memory, int vCores, long resource2) { + Resource ret = Resource.newInstance(memory, vCores); + ret.setResourceInformation(EXTRA_RESOURCE_TYPE, + ResourceInformation.newInstance(EXTRA_RESOURCE_TYPE, resource2)); + return ret; + } + + @Test(timeout = 10000) public void testCompareToWithUnboundedResource() { - assertTrue(Resources.unbounded().compareTo( - createResource(Long.MAX_VALUE, Integer.MAX_VALUE)) == 0); - assertTrue(Resources.unbounded().compareTo( - createResource(Long.MAX_VALUE, 0)) > 0); - assertTrue(Resources.unbounded().compareTo( - createResource(0, Integer.MAX_VALUE)) > 0); + unsetExtraResourceType(); + Resource unboundedClone = Resources.clone(ExtendedResources.unbounded()); + assertTrue(unboundedClone + .compareTo(createResource(Long.MAX_VALUE, Integer.MAX_VALUE)) == 0); + assertTrue(unboundedClone.compareTo(createResource(Long.MAX_VALUE, 0)) > 0); + assertTrue( + unboundedClone.compareTo(createResource(0, Integer.MAX_VALUE)) > 0); } - @Test(timeout=10000) + @Test(timeout = 10000) public void testCompareToWithNoneResource() { assertTrue(Resources.none().compareTo(createResource(0, 0)) == 0); - assertTrue(Resources.none().compareTo( - createResource(1, 0)) < 0); - assertTrue(Resources.none().compareTo( - createResource(0, 1)) < 0); + assertTrue(Resources.none().compareTo(createResource(1, 0)) < 0); + assertTrue(Resources.none().compareTo(createResource(0, 1)) < 0); + assertTrue(Resources.none().compareTo(createResource(0, 0, 0)) == 0); + assertTrue(Resources.none().compareTo(createResource(1, 0, 0)) < 0); + assertTrue(Resources.none().compareTo(createResource(0, 1, 0)) < 0); + assertTrue(Resources.none().compareTo(createResource(0, 0, 1)) < 0); } @Test(timeout=10000) @@ -69,4 +136,131 @@ public void testMultipleRoundUp() { assertEquals(memoryErrorMsg, result.getMemorySize(), 0); assertEquals(vcoreErrorMsg, result.getVirtualCores(), 0); } + + @Test(timeout = 1000) + public void testFitsIn() { + assertTrue(fitsIn(createResource(1, 1), createResource(2, 2))); + assertTrue(fitsIn(createResource(2, 2), createResource(2, 2))); + assertFalse(fitsIn(createResource(2, 2), createResource(1, 1))); + assertFalse(fitsIn(createResource(1, 2), createResource(2, 1))); + assertFalse(fitsIn(createResource(2, 1), createResource(1, 2))); + assertTrue(fitsIn(createResource(1, 1, 1), createResource(2, 2, 2))); + assertTrue(fitsIn(createResource(1, 1, 0), createResource(2, 2, 0))); + assertTrue(fitsIn(createResource(1, 1, 1), createResource(2, 2, 2))); + } + + @Test(timeout = 1000) + public void testComponentwiseMin() { + assertEquals(createResource(1, 1), + componentwiseMin(createResource(1, 1), createResource(2, 2))); + assertEquals(createResource(1, 1), + componentwiseMin(createResource(2, 2), createResource(1, 1))); + assertEquals(createResource(1, 1), + componentwiseMin(createResource(1, 2), createResource(2, 1))); + assertEquals(createResource(1, 1, 1), + componentwiseMin(createResource(1, 1, 1), createResource(2, 2, 2))); + assertEquals(createResource(1, 1, 0), + componentwiseMin(createResource(2, 2, 2), createResource(1, 1))); + assertEquals(createResource(1, 1, 2), + componentwiseMin(createResource(1, 2, 2), createResource(2, 1, 3))); + } + + @Test + public void testComponentwiseMax() { + assertEquals(createResource(2, 2), + componentwiseMax(createResource(1, 1), createResource(2, 2))); + assertEquals(createResource(2, 2), + componentwiseMax(createResource(2, 2), createResource(1, 1))); + assertEquals(createResource(2, 2), + componentwiseMax(createResource(1, 2), createResource(2, 1))); + assertEquals(createResource(2, 2, 2), + componentwiseMax(createResource(1, 1, 1), createResource(2, 2, 2))); + assertEquals(createResource(2, 2, 2), + componentwiseMax(createResource(2, 2, 2), createResource(1, 1))); + assertEquals(createResource(2, 2, 3), + componentwiseMax(createResource(1, 2, 2), createResource(2, 1, 3))); + assertEquals(createResource(2, 2, 1), + componentwiseMax(createResource(2, 2, 0), createResource(2, 1, 1))); + } + + @Test + public void testAdd() { + assertEquals(createResource(2, 3), + add(createResource(1, 1), createResource(1, 2))); + assertEquals(createResource(3, 2), + add(createResource(1, 1), createResource(2, 1))); + assertEquals(createResource(2, 2, 0), + add(createResource(1, 1, 0), createResource(1, 1, 0))); + assertEquals(createResource(2, 2, 3), + add(createResource(1, 1, 1), createResource(1, 1, 2))); + } + + @Test + public void testSubtract() { + assertEquals(createResource(1, 0), + subtract(createResource(2, 1), createResource(1, 1))); + assertEquals(createResource(0, 1), + subtract(createResource(1, 2), createResource(1, 1))); + assertEquals(createResource(2, 2, 0), + subtract(createResource(3, 3, 0), createResource(1, 1, 0))); + assertEquals(createResource(1, 1, 2), + subtract(createResource(2, 2, 3), createResource(1, 1, 1))); + } + + @Test + public void testClone() { + assertEquals(createResource(1, 1), Resources.clone(createResource(1, 1))); + assertEquals(createResource(1, 1, 0), + Resources.clone(createResource(1, 1))); + assertEquals(createResource(1, 1), + Resources.clone(createResource(1, 1, 0))); + assertEquals(createResource(1, 1, 2), + Resources.clone(createResource(1, 1, 2))); + } + + @Test + public void testMultiply() { + assertEquals(createResource(4, 2), multiply(createResource(2, 1), 2)); + assertEquals(createResource(4, 2, 0), multiply(createResource(2, 1), 2)); + assertEquals(createResource(2, 4), multiply(createResource(1, 2), 2)); + assertEquals(createResource(2, 4, 0), multiply(createResource(1, 2), 2)); + assertEquals(createResource(6, 6, 0), multiply(createResource(3, 3, 0), 2)); + assertEquals(createResource(4, 4, 6), multiply(createResource(2, 2, 3), 2)); + } + + @Test + public void testMultiplyAndRoundDown() { + assertEquals(createResource(4, 1), + multiplyAndRoundDown(createResource(3, 1), 1.5)); + assertEquals(createResource(4, 1, 0), + multiplyAndRoundDown(createResource(3, 1), 1.5)); + assertEquals(createResource(1, 4), + multiplyAndRoundDown(createResource(1, 3), 1.5)); + assertEquals(createResource(1, 4, 0), + multiplyAndRoundDown(createResource(1, 3), 1.5)); + assertEquals(createResource(7, 7, 0), + multiplyAndRoundDown(createResource(3, 3, 0), 2.5)); + assertEquals(createResource(2, 2, 7), + multiplyAndRoundDown(createResource(1, 1, 3), 2.5)); + } + + @Test + public void testMultiplyAndAddTo() throws Exception { + unsetExtraResourceType(); + setupExtraResourceType(); + assertEquals(createResource(6, 4), + multiplyAndAddTo(createResource(3, 1), createResource(2, 2), 1.5)); + assertEquals(createResource(6, 4, 0), + multiplyAndAddTo(createResource(3, 1), createResource(2, 2), 1.5)); + assertEquals(createResource(4, 7), + multiplyAndAddTo(createResource(1, 1), createResource(2, 4), 1.5)); + assertEquals(createResource(4, 7, 0), + multiplyAndAddTo(createResource(1, 1), createResource(2, 4), 1.5)); + assertEquals(createResource(6, 4, 0), + multiplyAndAddTo(createResource(3, 1, 0), createResource(2, 2, 0), + 1.5)); + assertEquals(createResource(6, 4, 6), + multiplyAndAddTo(createResource(3, 1, 2), createResource(2, 2, 3), + 1.5)); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-1.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-1.xml new file mode 100644 index 00000000000..f00573e3077 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-1.xml @@ -0,0 +1,29 @@ + + + + + + + + yarn.nodemanager.resource.memory-mb + 1024 + + + + yarn.nodemanager.resource.vcores + 1 + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-2.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-2.xml new file mode 100644 index 00000000000..9d9b3dc65c8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-2.xml @@ -0,0 +1,39 @@ + + + + + + + + yarn.nodemanager.resource-type.memory-mb + 1024Mi + + + + yarn.nodemanager.resource-type.vcores + 1 + + + + yarn.nodemanager.resource-type.resource1 + 5Gi + + + + yarn.nodemanager.resource-type.resource2 + 2m + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-1.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-1.xml new file mode 100644 index 00000000000..3ec106dfbb2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-1.xml @@ -0,0 +1,18 @@ + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-2.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-2.xml new file mode 100644 index 00000000000..6e5885ed7d7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-2.xml @@ -0,0 +1,29 @@ + + + + + + + + yarn.resource-types + resource1 + + + + yarn.resource-types.resource1.units + G + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-3.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-3.xml new file mode 100644 index 00000000000..8fd6fefa8f1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-3.xml @@ -0,0 +1,24 @@ + + + + + + + + yarn.resource-types + resource2 + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-4.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-4.xml new file mode 100644 index 00000000000..c84316a536e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-4.xml @@ -0,0 +1,34 @@ + + + + + + + + yarn.resource-types + resource1,resource2 + + + + yarn.resource-types.resource1.units + G + + + + yarn.resource-types.resource2.units + m + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-1.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-1.xml new file mode 100644 index 00000000000..d1942f2c97f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-1.xml @@ -0,0 +1,29 @@ + + + + + + + + yarn.resource-types + memory-mb,resource1 + + + + yarn.resource-types.resource1.calculator-units + G + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-2.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-2.xml new file mode 100644 index 00000000000..ca428ebfb32 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-2.xml @@ -0,0 +1,29 @@ + + + + + + + + yarn.resource-types + vcores,resource1 + + + + yarn.resource-types.resource1.calculator-units + G + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-3.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-3.xml new file mode 100644 index 00000000000..08b8a6dc0e9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-3.xml @@ -0,0 +1,29 @@ + + + + + + + + yarn.resource-types + vcores,resource1 + + + + yarn.resource-types.resource1.calculator-units + A + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-4.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-4.xml new file mode 100644 index 00000000000..c8eb7662097 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-4.xml @@ -0,0 +1,24 @@ + + + + + + + + yarn.resource-types + memory,resource1 + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java index 9240ed872e0..0b57717c29f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java @@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; @@ -338,9 +339,20 @@ private static ApplicationReportExt convertToApplicationReport( ApplicationMetricsConstants.APP_MEM_PREEMPT_METRICS); long preemptedVcoreSeconds = parseLong(entityInfo, ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS); - appResources = ApplicationResourceUsageReport.newInstance(0, 0, null, - null, null, memorySeconds, vcoreSeconds, 0, 0, - preemptedMemorySeconds, preemptedVcoreSeconds); + Map resourceSecondsMap = new HashMap<>(); + Map preemptedResoureSecondsMap = new HashMap<>(); + resourceSecondsMap + .put(ResourceInformation.MEMORY_MB.getName(), memorySeconds); + resourceSecondsMap + .put(ResourceInformation.VCORES.getName(), vcoreSeconds); + preemptedResoureSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), + preemptedMemorySeconds); + preemptedResoureSecondsMap + .put(ResourceInformation.VCORES.getName(), preemptedVcoreSeconds); + + appResources = ApplicationResourceUsageReport + .newInstance(0, 0, null, null, null, resourceSecondsMap, 0, 0, + preemptedResoureSecondsMap); } if (entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java index e7f47af2647..3b37abdee03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java @@ -65,8 +65,6 @@ import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.api.records.YarnApplicationState; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; @@ -447,12 +445,12 @@ public static ApplicationSubmissionContext newApplicationSubmissionContext( queue, priority, amContainer, isUnmanagedAM, cancelTokensWhenComplete, maxAppAttempts, resource, null); } - + public static ApplicationResourceUsageReport newApplicationResourceUsageReport( int numUsedContainers, int numReservedContainers, Resource usedResources, - Resource reservedResources, Resource neededResources, long memorySeconds, - long vcoreSeconds, long preemptedMemorySeconds, - long preemptedVcoreSeconds) { + Resource reservedResources, Resource neededResources, + Map resourceSecondsMap, + Map preemptedResourceSecondsMap) { ApplicationResourceUsageReport report = recordFactory.newRecordInstance(ApplicationResourceUsageReport.class); report.setNumUsedContainers(numUsedContainers); @@ -460,10 +458,8 @@ public static ApplicationResourceUsageReport newApplicationResourceUsageReport( report.setUsedResources(usedResources); report.setReservedResources(reservedResources); report.setNeededResources(neededResources); - report.setMemorySeconds(memorySeconds); - report.setVcoreSeconds(vcoreSeconds); - report.setPreemptedMemorySeconds(preemptedMemorySeconds); - report.setPreemptedVcoreSeconds(preemptedVcoreSeconds); + report.setResourceSecondsMap(resourceSecondsMap); + report.setPreemptedResourceSecondsMap(preemptedResourceSecondsMap); return report; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java index e33d7e19774..769296b74b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java @@ -41,6 +41,10 @@ import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; @@ -73,6 +77,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; @@ -852,4 +858,22 @@ public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority( return new String[0]; } + + @Override + public GetAllResourceProfilesResponse getResourceProfiles( + GetAllResourceProfilesRequest request) throws YarnException, IOException { + return null; + } + + @Override + public GetResourceProfileResponse getResourceProfile( + GetResourceProfileRequest request) throws YarnException, IOException { + return null; + } + + @Override + public GetAllResourceTypeInfoResponse getResourceTypeInfo( + GetAllResourceTypeInfoRequest request) throws YarnException, IOException { + return null; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 35b7cb0e5f1..3efe0bc1e18 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -174,27 +174,28 @@ public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher, @Override protected void serviceInit(Configuration conf) throws Exception { - int memoryMb = NodeManagerHardwareUtils.getContainerMemoryMB(conf); + this.totalResource = NodeManagerHardwareUtils.getNodeResources(conf); + long memoryMb = totalResource.getMemorySize(); float vMemToPMem = conf.getFloat( YarnConfiguration.NM_VMEM_PMEM_RATIO, YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO); - int virtualMemoryMb = (int)Math.ceil(memoryMb * vMemToPMem); + long virtualMemoryMb = (long)Math.ceil(memoryMb * vMemToPMem); - int virtualCores = NodeManagerHardwareUtils.getVCores(conf); + int virtualCores = totalResource.getVirtualCores(); LOG.info("Nodemanager resources: memory set to " + memoryMb + "MB."); LOG.info("Nodemanager resources: vcores set to " + virtualCores + "."); + LOG.info("Nodemanager resources: " + totalResource); - this.totalResource = Resource.newInstance(memoryMb, virtualCores); metrics.addResource(totalResource); // Get actual node physical resources - int physicalMemoryMb = memoryMb; + long physicalMemoryMb = memoryMb; int physicalCores = virtualCores; ResourceCalculatorPlugin rcp = ResourceCalculatorPlugin.getNodeResourceMonitorPlugin(conf); if (rcp != null) { - physicalMemoryMb = (int) (rcp.getPhysicalMemorySize() / (1024 * 1024)); + physicalMemoryMb = rcp.getPhysicalMemorySize() / (1024 * 1024); physicalCores = rcp.getNumProcessors(); } this.physicalResource = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java index 32f73c85a0c..6fe5bbe73fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java @@ -21,10 +21,16 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; + +import java.util.Map; /** * Helper class to determine hardware related characteristics such as the @@ -240,8 +246,8 @@ private static int getVCoresInternal(ResourceCalculatorPlugin plugin, return cores; } - private static int getConfiguredMemoryMB(Configuration conf) { - int memoryMb = conf.getInt(YarnConfiguration.NM_PMEM_MB, + private static long getConfiguredMemoryMB(Configuration conf) { + long memoryMb = conf.getLong(YarnConfiguration.NM_PMEM_MB, YarnConfiguration.DEFAULT_NM_PMEM_MB); if (memoryMb == -1) { memoryMb = YarnConfiguration.DEFAULT_NM_PMEM_MB; @@ -264,7 +270,7 @@ private static int getConfiguredMemoryMB(Configuration conf) { * - the configuration for the NodeManager * @return the amount of memory that will be used for YARN containers in MB. */ - public static int getContainerMemoryMB(Configuration conf) { + public static long getContainerMemoryMB(Configuration conf) { if (!isHardwareDetectionEnabled(conf)) { return getConfiguredMemoryMB(conf); } @@ -293,7 +299,7 @@ public static int getContainerMemoryMB(Configuration conf) { * - the configuration for the NodeManager * @return the amount of memory that will be used for YARN containers in MB. */ - public static int getContainerMemoryMB(ResourceCalculatorPlugin plugin, + public static long getContainerMemoryMB(ResourceCalculatorPlugin plugin, Configuration conf) { if (!isHardwareDetectionEnabled(conf) || plugin == null) { return getConfiguredMemoryMB(conf); @@ -301,26 +307,24 @@ public static int getContainerMemoryMB(ResourceCalculatorPlugin plugin, return getContainerMemoryMBInternal(plugin, conf); } - private static int getContainerMemoryMBInternal(ResourceCalculatorPlugin plugin, + private static long getContainerMemoryMBInternal(ResourceCalculatorPlugin plugin, Configuration conf) { - int memoryMb = conf.getInt(YarnConfiguration.NM_PMEM_MB, -1); + long memoryMb = conf.getInt(YarnConfiguration.NM_PMEM_MB, -1); if (memoryMb == -1) { - int physicalMemoryMB = - (int) (plugin.getPhysicalMemorySize() / (1024 * 1024)); - int hadoopHeapSizeMB = - (int) (Runtime.getRuntime().maxMemory() / (1024 * 1024)); - int containerPhysicalMemoryMB = - (int) (0.8f * (physicalMemoryMB - (2 * hadoopHeapSizeMB))); - int reservedMemoryMB = - conf.getInt(YarnConfiguration.NM_SYSTEM_RESERVED_PMEM_MB, -1); + long physicalMemoryMB = (plugin.getPhysicalMemorySize() / (1024 * 1024)); + long hadoopHeapSizeMB = (Runtime.getRuntime().maxMemory() + / (1024 * 1024)); + long containerPhysicalMemoryMB = (long) (0.8f + * (physicalMemoryMB - (2 * hadoopHeapSizeMB))); + long reservedMemoryMB = conf + .getInt(YarnConfiguration.NM_SYSTEM_RESERVED_PMEM_MB, -1); if (reservedMemoryMB != -1) { containerPhysicalMemoryMB = physicalMemoryMB - reservedMemoryMB; } - if(containerPhysicalMemoryMB <= 0) { + if (containerPhysicalMemoryMB <= 0) { LOG.error("Calculated memory for YARN containers is too low." + " Node memory is " + physicalMemoryMB - + " MB, system reserved memory is " - + reservedMemoryMB + " MB."); + + " MB, system reserved memory is " + reservedMemoryMB + " MB."); } containerPhysicalMemoryMB = Math.max(containerPhysicalMemoryMB, 0); memoryMb = containerPhysicalMemoryMB; @@ -332,4 +336,50 @@ private static int getContainerMemoryMBInternal(ResourceCalculatorPlugin plugin, } return memoryMb; } + + /** + * Get the resources for the node. + * @param configuration configuration file + * @return the resources for the node + */ + public static Resource getNodeResources(Configuration configuration) { + Configuration conf = new Configuration(configuration); + String memory = ResourceInformation.MEMORY_MB.getName(); + String vcores = ResourceInformation.VCORES.getName(); + + Resource ret = Resource.newInstance(0, 0); + Map resourceInformation = + ResourceUtils.getNodeResourceInformation(conf); + for (Map.Entry entry : resourceInformation + .entrySet()) { + ret.setResourceInformation(entry.getKey(), entry.getValue()); + LOG.debug("Setting key " + entry.getKey() + " to " + entry.getValue()); + } + if (resourceInformation.containsKey(memory)) { + Long value = resourceInformation.get(memory).getValue(); + if (value > Integer.MAX_VALUE) { + throw new YarnRuntimeException("Value '" + value + + "' for resource memory is more than the maximum for an integer."); + } + ResourceInformation memResInfo = resourceInformation.get(memory); + if(memResInfo.getValue() == 0) { + ret.setMemorySize(getContainerMemoryMB(conf)); + LOG.debug("Set memory to " + ret.getMemorySize()); + } + } + if (resourceInformation.containsKey(vcores)) { + Long value = resourceInformation.get(vcores).getValue(); + if (value > Integer.MAX_VALUE) { + throw new YarnRuntimeException("Value '" + value + + "' for resource vcores is more than the maximum for an integer."); + } + ResourceInformation vcoresResInfo = resourceInformation.get(vcores); + if(vcoresResInfo.getValue() == 0) { + ret.setVirtualCores(getVCores(conf)); + LOG.debug("Set vcores to " + ret.getVirtualCores()); + } + } + LOG.debug("Node resource information map is " + ret); + return ret; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java index 4add586bbf1..767c308aeb6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java @@ -172,7 +172,7 @@ public void testGetContainerMemoryMB() throws Exception { YarnConfiguration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION, true); - int mem = NodeManagerHardwareUtils.getContainerMemoryMB(null, conf); + long mem = NodeManagerHardwareUtils.getContainerMemoryMB(null, conf); Assert.assertEquals(YarnConfiguration.DEFAULT_NM_PMEM_MB, mem); mem = NodeManagerHardwareUtils.getContainerMemoryMB(plugin, conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index 6c016fec3c5..98a9d6c1de4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -351,6 +351,11 @@ src/test/resources/delete-reservation.json src/test/resources/update-reservation.json src/test/resources/invariants.txt + src/test/resources/profiles/sample-profiles-1.json + src/test/resources/profiles/sample-profiles-2.json + src/test/resources/profiles/illegal-profiles-1.json + src/test/resources/profiles/illegal-profiles-2.json + src/test/resources/profiles/illegal-profiles-3.json diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java index 931b1c8b7d5..7ae23e7bb63 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java @@ -82,7 +82,7 @@ public synchronized void addProcessor( public void registerApplicationMaster( ApplicationAttemptId applicationAttemptId, RegisterApplicationMasterRequest request, - RegisterApplicationMasterResponse resp) throws IOException { + RegisterApplicationMasterResponse resp) throws IOException, YarnException { this.head.registerApplicationMaster(applicationAttemptId, request, resp); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index e6c25adefac..8aa5dc17f15 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -112,6 +112,12 @@ import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; @@ -145,6 +151,7 @@ import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceProfilesManager; import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationAllocation; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationInputValidator; @@ -175,6 +182,7 @@ import org.apache.hadoop.yarn.util.UTCClock; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; @@ -210,6 +218,8 @@ private static final EnumSet ACTIVE_APP_STATES = EnumSet.of( RMAppState.ACCEPTED, RMAppState.RUNNING); + private ResourceProfilesManager resourceProfilesManager; + public ClientRMService(RMContext rmContext, YarnScheduler scheduler, RMAppManager rmAppManager, ApplicationACLsManager applicationACLsManager, QueueACLsManager queueACLsManager, @@ -232,6 +242,7 @@ public ClientRMService(RMContext rmContext, YarnScheduler scheduler, this.reservationSystem = rmContext.getReservationSystem(); this.clock = clock; this.rValidator = new ReservationInputValidator(clock); + resourceProfilesManager = rmContext.getResourceProfilesManager(); } @Override @@ -1763,4 +1774,31 @@ private RMApp verifyUserAccessForRMApp(ApplicationId applicationId, return application; } + @Override + public GetAllResourceProfilesResponse getResourceProfiles( + GetAllResourceProfilesRequest request) throws YarnException, IOException { + GetAllResourceProfilesResponse response = + GetAllResourceProfilesResponse.newInstance(); + response.setResourceProfiles(resourceProfilesManager.getResourceProfiles()); + return response; + } + + @Override + public GetResourceProfileResponse getResourceProfile( + GetResourceProfileRequest request) throws YarnException, IOException { + GetResourceProfileResponse response = + GetResourceProfileResponse.newInstance(); + response.setResource( + resourceProfilesManager.getProfile(request.getProfileName())); + return response; + } + + @Override + public GetAllResourceTypeInfoResponse getResourceTypeInfo( + GetAllResourceTypeInfoRequest request) throws YarnException, IOException { + GetAllResourceTypeInfoResponse response = + GetAllResourceTypeInfoResponse.newInstance(); + response.setResourceTypeInfo(ResourceUtils.getResourcesTypeInfo()); + return response; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java index a993d694e28..5632efecaa0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java @@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceProfilesManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt @@ -100,18 +101,21 @@ RecordFactoryProvider.getRecordFactory(null); private RMContext rmContext; + private ResourceProfilesManager resourceProfilesManager; @Override public void init(ApplicationMasterServiceContext amsContext, ApplicationMasterServiceProcessor nextProcessor) { this.rmContext = (RMContext)amsContext; + this.resourceProfilesManager = rmContext.getResourceProfilesManager(); } @Override public void registerApplicationMaster( ApplicationAttemptId applicationAttemptId, RegisterApplicationMasterRequest request, - RegisterApplicationMasterResponse response) throws IOException { + RegisterApplicationMasterResponse response) + throws IOException, YarnException { RMApp app = getRmContext().getRMApps().get( applicationAttemptId.getApplicationId()); @@ -171,6 +175,12 @@ public void registerApplicationMaster( response.setSchedulerResourceTypes(getScheduler() .getSchedulingResourceTypes()); + if (getRmContext().getYarnConfiguration().getBoolean( + YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, + YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_ENABLED)) { + response.setResourceProfiles( + resourceProfilesManager.getResourceProfiles()); + } } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java index 4fc2916fe0c..c3ed7d51925 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java @@ -127,7 +127,8 @@ public void init(ApplicationMasterServiceContext amsContext, public void registerApplicationMaster( ApplicationAttemptId applicationAttemptId, RegisterApplicationMasterRequest request, - RegisterApplicationMasterResponse response) throws IOException { + RegisterApplicationMasterResponse response) + throws IOException, YarnException { SchedulerApplicationAttempt appAttempt = ((AbstractYarnScheduler) getScheduler()).getApplicationAttempt(applicationAttemptId); if (appAttempt.getOpportunisticContainerContext() == null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index bcd1a9c92d7..2dc8c62b710 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -69,6 +69,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.SettableFuture; +import org.apache.hadoop.yarn.util.StringHelper; /** * This class manages the list of applications for the resource manager. @@ -188,7 +189,12 @@ public static SummaryBuilder createAppSummary(RMApp app) { .add("preemptedAMContainers", metrics.getNumAMContainersPreempted()) .add("preemptedNonAMContainers", metrics.getNumNonAMContainersPreempted()) .add("preemptedResources", metrics.getResourcePreempted()) - .add("applicationType", app.getApplicationType()); + .add("applicationType", app.getApplicationType()) + .add("resourceSeconds", StringHelper + .getResourceSecondsString(metrics.getResourceSecondsMap())) + .add("preemptedResourceSeconds", StringHelper + .getResourceSecondsString( + metrics.getPreemptedResourceSecondsMap())); return summary; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java index 0ea9516567d..6df31354d3e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java @@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceProfilesManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor.RMAppLifetimeMonitor; @@ -159,4 +160,8 @@ void setRMDelegatedNodeLabelsUpdater( String getHAZookeeperConnectionState(); ResourceManager getResourceManager(); + + ResourceProfilesManager getResourceProfilesManager(); + + void setResourceProfilesManager(ResourceProfilesManager mgr); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index db2c585c031..921e5d5fe77 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceProfilesManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor.RMAppLifetimeMonitor; @@ -82,6 +83,8 @@ */ private RMActiveServiceContext activeServiceContext; + private ResourceProfilesManager resourceProfilesManager; + /** * Default constructor. To be used in conjunction with setter methods for * individual fields. @@ -551,5 +554,14 @@ public RMAppLifetimeMonitor getRMAppLifetimeMonitor() { return this.activeServiceContext.getRMAppLifetimeMonitor(); } + @Override + public ResourceProfilesManager getResourceProfilesManager() { + return this.resourceProfilesManager; + } + + @Override + public void setResourceProfilesManager(ResourceProfilesManager mgr) { + this.resourceProfilesManager = mgr; + } // Note: Read java doc before adding any services over here. } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java index 35b0c983fac..2aae3a5c55e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java @@ -32,6 +32,7 @@ import com.google.common.collect.Sets; import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -46,6 +47,7 @@ import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.ProfileCapability; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; @@ -65,6 +67,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.security.YarnAuthorizationProvider; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceProfilesManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt .RMAppAttemptState; @@ -88,6 +91,8 @@ */ public class RMServerUtils { + private static final Log LOG_HANDLE = LogFactory.getLog(RMServerUtils.class); + public static final String UPDATE_OUTSTANDING_ERROR = "UPDATE_OUTSTANDING_ERROR"; private static final String INCORRECT_CONTAINER_VERSION_ERROR = @@ -478,7 +483,7 @@ public static YarnApplicationAttemptState createApplicationAttemptState( DUMMY_APPLICATION_RESOURCE_USAGE_REPORT = BuilderUtils.newApplicationResourceUsageReport(-1, -1, Resources.createResource(-1, -1), Resources.createResource(-1, -1), - Resources.createResource(-1, -1), 0, 0, 0, 0); + Resources.createResource(-1, -1), new HashMap<>(), new HashMap<>()); /** @@ -622,4 +627,43 @@ public static int getApplicableNodeCountForAM(RMContext rmContext, return labelsToNodes.get(label); } } + + public static void convertProfileToResourceCapability(ResourceRequest ask, + Configuration conf, ResourceProfilesManager resourceProfilesManager) + throws YarnException { + + if (LOG_HANDLE.isDebugEnabled()) { + LOG_HANDLE + .debug("Converting profile to resource capability for ask " + ask); + } + + boolean profilesEnabled = + conf.getBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, + YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_ENABLED); + if (!profilesEnabled) { + if (ask.getProfileCapability() != null && !ask.getProfileCapability() + .getProfileCapabilityOverride().equals(Resources.none())) { + ask.setCapability( + ask.getProfileCapability().getProfileCapabilityOverride()); + } + } else { + if (ask.getProfileCapability() != null) { + ask.setCapability(ProfileCapability + .toResource(ask.getProfileCapability(), + resourceProfilesManager.getResourceProfiles())); + } + } + if (LOG_HANDLE.isDebugEnabled()) { + LOG_HANDLE + .debug("Converted profile to resource capability for ask " + ask); + } + } + + public static Long getOrDefault(Map map, String key, + Long defaultValue) { + if (map.containsKey(key)) { + return map.get(key); + } + return defaultValue; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 5333f254328..e53a42c245c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -78,6 +78,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; import org.apache.hadoop.yarn.server.resourcemanager.reservation.AbstractReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceProfilesManager; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceProfilesManagerImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; @@ -231,6 +233,13 @@ protected void serviceInit(Configuration conf) throws Exception { this.rmContext = new RMContextImpl(); rmContext.setResourceManager(this); + + // add resource profiles here because it's used by AbstractYarnScheduler + ResourceProfilesManager resourceProfilesManager = + new ResourceProfilesManagerImpl(); + resourceProfilesManager.init(conf); + rmContext.setResourceProfilesManager(resourceProfilesManager); + this.configurationProvider = ConfigurationProviderFactory.getConfigurationProvider(conf); this.configurationProvider.init(this.conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index cc47e02cb19..a42d0533c52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -376,10 +376,11 @@ public RegisterNodeManagerResponse registerNodeManager( // Check if this node has minimum allocations if (capability.getMemorySize() < minAllocMb || capability.getVirtualCores() < minAllocVcores) { - String message = - "NodeManager from " + host - + " doesn't satisfy minimum allocations, Sending SHUTDOWN" - + " signal to the NodeManager."; + String message = "NodeManager from " + host + + " doesn't satisfy minimum allocations, Sending SHUTDOWN" + + " signal to the NodeManager. Node capabilities are " + capability + + "; minimums are " + minAllocMb + "mb and " + minAllocVcores + + " vcores"; LOG.info(message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java index 35340e62a22..00ef39fdd98 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java @@ -851,11 +851,8 @@ public void storeNewApplicationAttempt(RMAppAttempt appAttempt) { appAttempt.getAppAttemptId(), appAttempt.getMasterContainer(), credentials, appAttempt.getStartTime(), - resUsage.getMemorySeconds(), - resUsage.getVcoreSeconds(), - attempMetrics.getPreemptedMemory(), - attempMetrics.getPreemptedVcore() - ); + resUsage.getResourceUsageSecondsMap(), + attempMetrics.getPreemptedResourceSecondsMap()); getRMStateStoreEventHandler().handle( new RMStateStoreAppAttemptEvent(attemptState)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java index 67aaf947127..2de071ad2ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java @@ -25,23 +25,28 @@ import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationAttemptStateDataProto; +import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.util.Records; +import java.util.Map; + /* * Contains the state data that needs to be persisted for an ApplicationAttempt */ @Public @Unstable public abstract class ApplicationAttemptStateData { + public static ApplicationAttemptStateData newInstance( ApplicationAttemptId attemptId, Container container, Credentials attemptTokens, long startTime, RMAppAttemptState finalState, String finalTrackingUrl, String diagnostics, FinalApplicationStatus amUnregisteredFinalStatus, int exitStatus, - long finishTime, long memorySeconds, long vcoreSeconds, - long preemptedMemorySeconds, long preemptedVcoreSeconds) { + long finishTime, Map resourceSecondsMap, + Map preemptedResourceSecondsMap) { ApplicationAttemptStateData attemptStateData = Records.newRecord(ApplicationAttemptStateData.class); attemptStateData.setAttemptId(attemptId); @@ -54,23 +59,33 @@ public static ApplicationAttemptStateData newInstance( attemptStateData.setFinalApplicationStatus(amUnregisteredFinalStatus); attemptStateData.setAMContainerExitStatus(exitStatus); attemptStateData.setFinishTime(finishTime); - attemptStateData.setMemorySeconds(memorySeconds); - attemptStateData.setVcoreSeconds(vcoreSeconds); - attemptStateData.setPreemptedMemorySeconds(preemptedMemorySeconds); - attemptStateData.setPreemptedVcoreSeconds(preemptedVcoreSeconds); + attemptStateData.setMemorySeconds(RMServerUtils + .getOrDefault(resourceSecondsMap, + ResourceInformation.MEMORY_MB.getName(), 0L)); + attemptStateData.setVcoreSeconds(RMServerUtils + .getOrDefault(resourceSecondsMap, ResourceInformation.VCORES.getName(), + 0L)); + attemptStateData.setPreemptedMemorySeconds(RMServerUtils + .getOrDefault(preemptedResourceSecondsMap, + ResourceInformation.MEMORY_MB.getName(), 0L)); + attemptStateData.setPreemptedVcoreSeconds(RMServerUtils + .getOrDefault(preemptedResourceSecondsMap, + ResourceInformation.VCORES.getName(), 0L)); + attemptStateData.setResourceSecondsMap(resourceSecondsMap); + attemptStateData + .setPreemptedResourceSecondsMap(preemptedResourceSecondsMap); return attemptStateData; } public static ApplicationAttemptStateData newInstance( ApplicationAttemptId attemptId, Container masterContainer, - Credentials attemptTokens, long startTime, long memorySeconds, - long vcoreSeconds, long preemptedMemorySeconds, - long preemptedVcoreSeconds) { - return newInstance(attemptId, masterContainer, attemptTokens, - startTime, null, "N/A", "", null, ContainerExitStatus.INVALID, 0, - memorySeconds, vcoreSeconds, - preemptedMemorySeconds, preemptedVcoreSeconds); - } + Credentials attemptTokens, long startTime, + Map resourceSeondsMap, + Map preemptedResourceSecondsMap) { + return newInstance(attemptId, masterContainer, attemptTokens, startTime, + null, "N/A", "", null, ContainerExitStatus.INVALID, 0, + resourceSeondsMap, preemptedResourceSecondsMap); + } public abstract ApplicationAttemptStateDataProto getProto(); @@ -215,4 +230,50 @@ public abstract void setFinalApplicationStatus( @Public @Unstable public abstract void setPreemptedVcoreSeconds(long vcoreSeconds); + + /** + * Get the aggregated number of resources preempted that the application has + * allocated times the number of seconds the application has been running. + * + * @return map containing the resource name and aggregated preempted + * resource-seconds + */ + @Public + @Unstable + public abstract Map getResourceSecondsMap(); + + /** + * Set the aggregated number of resources that the application has + * allocated times the number of seconds the application has been running. + * + * @param resourceSecondsMap map containing the resource name and aggregated + * resource-seconds + */ + @Public + @Unstable + public abstract void setResourceSecondsMap( + Map resourceSecondsMap); + + /** + * Get the aggregated number of resources preempted that the application has + * allocated times the number of seconds the application has been running. + * + * @return map containing the resource name and aggregated preempted + * resource-seconds + */ + @Public + @Unstable + public abstract Map getPreemptedResourceSecondsMap(); + + /** + * Set the aggregated number of resources preempted that the application has + * allocated times the number of seconds the application has been running. + * + * @param preemptedResourceSecondsMap map containing the resource name and + * aggregated preempted resource-seconds + */ + @Public + @Unstable + public abstract void setPreemptedResourceSecondsMap( + Map preemptedResourceSecondsMap); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java index e89726f91ad..ed71ea2f016 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -55,6 +56,9 @@ private Container masterContainer = null; private ByteBuffer appAttemptTokens = null; + private Map resourceSecondsMap; + private Map preemptedResourceSecondsMap; + public ApplicationAttemptStateDataPBImpl() { builder = ApplicationAttemptStateDataProto.newBuilder(); } @@ -404,4 +408,50 @@ private static Credentials convertCredentialsFromByteBuffer( IOUtils.closeStream(dibb); } } + + @Override + public Map getResourceSecondsMap() { + if (this.resourceSecondsMap != null) { + return this.resourceSecondsMap; + } + ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder; + this.resourceSecondsMap = ProtoUtils.convertStringLongMapProtoListToMap( + p.getApplicationResourceUsageMapList()); + return this.resourceSecondsMap; + } + + @Override + public void setResourceSecondsMap(Map resourceSecondsMap) { + maybeInitBuilder(); + builder.clearApplicationResourceUsageMap(); + this.resourceSecondsMap = resourceSecondsMap; + if (resourceSecondsMap != null) { + builder.addAllApplicationResourceUsageMap( + ProtoUtils.convertMapToStringLongMapProtoList(resourceSecondsMap)); + } + } + + @Override + public Map getPreemptedResourceSecondsMap() { + if (this.preemptedResourceSecondsMap != null) { + return this.preemptedResourceSecondsMap; + } + ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder; + this.preemptedResourceSecondsMap = ProtoUtils + .convertStringLongMapProtoListToMap( + p.getApplicationResourceUsageMapList()); + return this.preemptedResourceSecondsMap; + } + + @Override + public void setPreemptedResourceSecondsMap( + Map preemptedResourceSecondsMap) { + maybeInitBuilder(); + builder.clearPreemptedResourceUsageMap(); + this.preemptedResourceSecondsMap = preemptedResourceSecondsMap; + if (preemptedResourceSecondsMap != null) { + builder.addAllPreemptedResourceUsageMap(ProtoUtils + .convertMapToStringLongMapProtoList(preemptedResourceSecondsMap)); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java new file mode 100644 index 00000000000..700f0ef3aa5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.resource; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException; +import org.apache.hadoop.yarn.exceptions.YarnException; + +import java.io.IOException; +import java.util.Map; + +/** + * Interface for the resource profiles manager. Provides an interface to get + * the list of available profiles and some helper functions. + */ +public interface ResourceProfilesManager { + + /** + * Method to handle all initialization steps for ResourceProfilesManager. + * @param config Configuration object + * @throws IOException when invalid resource profile names are loaded + */ + void init(Configuration config) throws IOException; + + /** + * Get the resource capability associated with given profile name. + * @param profile name of resource profile + * @return resource capability for given profile + * + * @throws YarnException when any invalid profile name or feature is disabled + */ + Resource getProfile(String profile) throws YarnException; + + /** + * Get all supported resource profiles. + * @return a map of resource objects associated with each profile + * + * @throws YARNFeatureNotEnabledException when feature is disabled + */ + Map getResourceProfiles() throws + YARNFeatureNotEnabledException; + + /** + * Reload profiles based on updated configuration. + * @throws IOException when invalid resource profile names are loaded + */ + void reloadProfiles() throws IOException; + + /** + * Get default supported resource profile. + * @return resource object which is default + * @throws YarnException when any invalid profile name or feature is disabled + */ + Resource getDefaultProfile() throws YarnException; + + /** + * Get minimum supported resource profile. + * @return resource object which is minimum + * @throws YarnException when any invalid profile name or feature is disabled + */ + Resource getMinimumProfile() throws YarnException; + + /** + * Get maximum supported resource profile. + * @return resource object which is maximum + * @throws YarnException when any invalid profile name or feature is disabled + */ + Resource getMaximumProfile() throws YarnException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java new file mode 100644 index 00000000000..dfd14e0d2c6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java @@ -0,0 +1,257 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.resource; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.codehaus.jackson.map.ObjectMapper; + +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** + * PBImpl class to handle all proto related implementation for + * ResourceProfilesManager. + */ +public class ResourceProfilesManagerImpl implements ResourceProfilesManager { + + private static final Log LOG = + LogFactory.getLog(ResourceProfilesManagerImpl.class); + + private final Map profiles = new ConcurrentHashMap<>(); + private List resourceTypeInfo = + new ArrayList(); + private Configuration conf; + private boolean profileEnabled = false; + + private static final String MEMORY = ResourceInformation.MEMORY_MB.getName(); + private static final String VCORES = ResourceInformation.VCORES.getName(); + + public static final String DEFAULT_PROFILE = "default"; + public static final String MINIMUM_PROFILE = "minimum"; + public static final String MAXIMUM_PROFILE = "maximum"; + + protected final ReentrantReadWriteLock.ReadLock readLock; + protected final ReentrantReadWriteLock.WriteLock writeLock; + + private static final String[] MANDATORY_PROFILES = {DEFAULT_PROFILE, + MINIMUM_PROFILE, MAXIMUM_PROFILE}; + private static final String FEATURE_NOT_ENABLED_MSG = + "Resource profile is not enabled, please " + + "enable resource profile feature before using its functions." + + " (by setting " + YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED + + " to true)"; + + public ResourceProfilesManagerImpl() { + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + readLock = lock.readLock(); + writeLock = lock.writeLock(); + } + + public void init(Configuration config) throws IOException { + conf = config; + loadProfiles(); + + // Load resource types, this should be done even if resource profile is + // disabled, since we have mandatory resource types like vcores/memory. + loadResourceTypes(); + } + + private void loadResourceTypes() { + // Add all resource types + try { + writeLock.lock(); + Collection resourcesInfo = ResourceUtils + .getResourceTypes().values(); + for (ResourceInformation resourceInfo : resourcesInfo) { + resourceTypeInfo + .add(ResourceTypeInfo.newInstance(resourceInfo.getName(), + resourceInfo.getUnits(), resourceInfo.getResourceType())); + } + } finally { + writeLock.unlock(); + } + } + + private void loadProfiles() throws IOException { + profileEnabled = + conf.getBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, + YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_ENABLED); + if (!profileEnabled) { + return; + } + String sourceFile = + conf.get(YarnConfiguration.RM_RESOURCE_PROFILES_SOURCE_FILE, + YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE); + String resourcesFile = sourceFile; + ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + if (classLoader == null) { + classLoader = ResourceProfilesManagerImpl.class.getClassLoader(); + } + if (classLoader != null) { + URL tmp = classLoader.getResource(sourceFile); + if (tmp != null) { + resourcesFile = tmp.getPath(); + } + } + ObjectMapper mapper = new ObjectMapper(); + Map data = mapper.readValue(new File(resourcesFile), Map.class); + Iterator iterator = data.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry entry = (Map.Entry) iterator.next(); + String profileName = entry.getKey().toString(); + if (profileName.isEmpty()) { + throw new IOException( + "Name of resource profile cannot be an empty string"); + } + if (entry.getValue() instanceof Map) { + Map profileInfo = (Map) entry.getValue(); + // ensure memory and vcores are specified + if (!profileInfo.containsKey(MEMORY) + || !profileInfo.containsKey(VCORES)) { + throw new IOException( + "Illegal resource profile definition; profile '" + profileName + + "' must contain '" + MEMORY + "' and '" + VCORES + "'"); + } + Resource resource = parseResource(profileInfo); + profiles.put(profileName, resource); + LOG.info( + "Added profile '" + profileName + "' with resources: " + resource); + } + } + // check to make sure mandatory profiles are present + for (String profile : MANDATORY_PROFILES) { + if (!profiles.containsKey(profile)) { + throw new IOException( + "Mandatory profile missing '" + profile + "' missing. " + + Arrays.toString(MANDATORY_PROFILES) + " must be present"); + } + } + LOG.info("Loaded profiles: " + profiles.keySet()); + } + + private Resource parseResource(Map profileInfo) throws IOException { + Resource resource = Resource.newInstance(0, 0); + Iterator iterator = profileInfo.entrySet().iterator(); + Map resourceTypes = ResourceUtils + .getResourceTypes(); + while (iterator.hasNext()) { + Map.Entry resourceEntry = (Map.Entry) iterator.next(); + String resourceName = resourceEntry.getKey().toString(); + ResourceInformation resourceValue = fromString(resourceName, + resourceEntry.getValue().toString()); + if (resourceName.equals(MEMORY)) { + resource.setMemorySize(resourceValue.getValue()); + continue; + } + if (resourceName.equals(VCORES)) { + resource + .setVirtualCores(Long.valueOf(resourceValue.getValue()).intValue()); + continue; + } + if (resourceTypes.containsKey(resourceName)) { + resource.setResourceInformation(resourceName, resourceValue); + } else { + throw new IOException("Unrecognized resource type '" + resourceName + + "'. Recognized resource types are '" + resourceTypes.keySet() + + "'"); + } + } + return resource; + } + + private void checkAndThrowExceptionWhenFeatureDisabled() + throws YARNFeatureNotEnabledException { + if (!profileEnabled) { + throw new YARNFeatureNotEnabledException(FEATURE_NOT_ENABLED_MSG); + } + } + + @Override + public Resource getProfile(String profile) throws YarnException{ + checkAndThrowExceptionWhenFeatureDisabled(); + + if (profile == null) { + throw new YarnException("Profile name cannot be null"); + } + + Resource profileRes = profiles.get(profile); + if (profileRes == null) { + throw new YarnException( + "Resource profile '" + profile + "' not found"); + } + return Resources.clone(profileRes); + } + + @Override + public Map getResourceProfiles() + throws YARNFeatureNotEnabledException { + checkAndThrowExceptionWhenFeatureDisabled(); + return Collections.unmodifiableMap(profiles); + } + + @Override + @VisibleForTesting + public void reloadProfiles() throws IOException { + profiles.clear(); + loadProfiles(); + } + + @Override + public Resource getDefaultProfile() throws YarnException { + return getProfile(DEFAULT_PROFILE); + } + + @Override + public Resource getMinimumProfile() throws YarnException { + return getProfile(MINIMUM_PROFILE); + } + + @Override + public Resource getMaximumProfile() throws YarnException { + return getProfile(MAXIMUM_PROFILE); + } + + private ResourceInformation fromString(String name, String value) { + String units = ResourceUtils.getUnits(value); + Long resourceValue = + Long.valueOf(value.substring(0, value.length() - units.length())); + return ResourceInformation.newInstance(name, units, resourceValue); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 13b079286fc..728a99fd54e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -748,14 +748,10 @@ public ApplicationReport createAndGetApplicationReport(String clientUserName, } RMAppMetrics rmAppMetrics = getRMAppMetrics(); - appUsageReport.setMemorySeconds(rmAppMetrics.getMemorySeconds()); - appUsageReport.setVcoreSeconds(rmAppMetrics.getVcoreSeconds()); - appUsageReport. - setPreemptedMemorySeconds(rmAppMetrics. - getPreemptedMemorySeconds()); - appUsageReport. - setPreemptedVcoreSeconds(rmAppMetrics. - getPreemptedVcoreSeconds()); + appUsageReport + .setResourceSecondsMap(rmAppMetrics.getResourceSecondsMap()); + appUsageReport.setPreemptedResourceSecondsMap( + rmAppMetrics.getPreemptedResourceSecondsMap()); } if (currentApplicationAttemptId == null) { @@ -1628,10 +1624,9 @@ public RMAppMetrics getRMAppMetrics() { Resource resourcePreempted = Resource.newInstance(0, 0); int numAMContainerPreempted = 0; int numNonAMContainerPreempted = 0; - long memorySeconds = 0; - long vcoreSeconds = 0; - long preemptedMemorySeconds = 0; - long preemptedVcoreSeconds = 0; + Map resourceSecondsMap = new HashMap<>(); + Map preemptedSecondsMap = new HashMap<>(); + for (RMAppAttempt attempt : attempts.values()) { if (null != attempt) { RMAppAttemptMetrics attemptMetrics = @@ -1645,17 +1640,25 @@ public RMAppMetrics getRMAppMetrics() { // for both running and finished containers. AggregateAppResourceUsage resUsage = attempt.getRMAppAttemptMetrics().getAggregateAppResourceUsage(); - memorySeconds += resUsage.getMemorySeconds(); - vcoreSeconds += resUsage.getVcoreSeconds(); - preemptedMemorySeconds += attemptMetrics.getPreemptedMemory(); - preemptedVcoreSeconds += attemptMetrics.getPreemptedVcore(); + for (Map.Entry entry : resUsage + .getResourceUsageSecondsMap().entrySet()) { + long value = RMServerUtils + .getOrDefault(resourceSecondsMap, entry.getKey(), 0L); + value += entry.getValue(); + resourceSecondsMap.put(entry.getKey(), value); + } + for (Map.Entry entry : attemptMetrics + .getPreemptedResourceSecondsMap().entrySet()) { + long value = RMServerUtils + .getOrDefault(preemptedSecondsMap, entry.getKey(), 0L); + value += entry.getValue(); + preemptedSecondsMap.put(entry.getKey(), value); + } } } - return new RMAppMetrics(resourcePreempted, - numNonAMContainerPreempted, numAMContainerPreempted, - memorySeconds, vcoreSeconds, - preemptedMemorySeconds, preemptedVcoreSeconds); + return new RMAppMetrics(resourcePreempted, numNonAMContainerPreempted, + numAMContainerPreempted, resourceSecondsMap, preemptedSecondsMap); } @Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppMetrics.java index fa068ea2d88..2bb7fd1ae10 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppMetrics.java @@ -19,27 +19,27 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; + +import java.util.Map; public class RMAppMetrics { final Resource resourcePreempted; final int numNonAMContainersPreempted; final int numAMContainersPreempted; - final long memorySeconds; - final long vcoreSeconds; - private final long preemptedMemorySeconds; - private final long preemptedVcoreSeconds; + private final Map resourceSecondsMap; + private final Map preemptedResourceSecondsMap; public RMAppMetrics(Resource resourcePreempted, int numNonAMContainersPreempted, int numAMContainersPreempted, - long memorySeconds, long vcoreSeconds, long preemptedMemorySeconds, - long preemptedVcoreSeconds) { + Map resourceSecondsMap, + Map preemptedResourceSecondsMap) { this.resourcePreempted = resourcePreempted; this.numNonAMContainersPreempted = numNonAMContainersPreempted; this.numAMContainersPreempted = numAMContainersPreempted; - this.memorySeconds = memorySeconds; - this.vcoreSeconds = vcoreSeconds; - this.preemptedMemorySeconds = preemptedMemorySeconds; - this.preemptedVcoreSeconds = preemptedVcoreSeconds; + this.resourceSecondsMap = resourceSecondsMap; + this.preemptedResourceSecondsMap = preemptedResourceSecondsMap; } public Resource getResourcePreempted() { @@ -55,19 +55,32 @@ public int getNumAMContainersPreempted() { } public long getMemorySeconds() { - return memorySeconds; + return RMServerUtils.getOrDefault(resourceSecondsMap, + ResourceInformation.MEMORY_MB.getName(), 0L); } public long getVcoreSeconds() { - return vcoreSeconds; + return RMServerUtils + .getOrDefault(resourceSecondsMap, ResourceInformation.VCORES.getName(), + 0L); } public long getPreemptedMemorySeconds() { - return preemptedMemorySeconds; + return RMServerUtils.getOrDefault(preemptedResourceSecondsMap, + ResourceInformation.MEMORY_MB.getName(), 0L); } public long getPreemptedVcoreSeconds() { - return preemptedVcoreSeconds; + return RMServerUtils.getOrDefault(preemptedResourceSecondsMap, + ResourceInformation.VCORES.getName(), 0L); + } + + public Map getResourceSecondsMap() { + return resourceSecondsMap; + } + + public Map getPreemptedResourceSecondsMap() { + return preemptedResourceSecondsMap; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AggregateAppResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AggregateAppResourceUsage.java index f0c2b348c32..b858712f7d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AggregateAppResourceUsage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AggregateAppResourceUsage.java @@ -19,42 +19,38 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt; import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; + +import java.util.HashMap; +import java.util.Map; @Private public class AggregateAppResourceUsage { - long memorySeconds; - long vcoreSeconds; + private Map resourceSecondsMap = new HashMap<>(); - public AggregateAppResourceUsage(long memorySeconds, long vcoreSeconds) { - this.memorySeconds = memorySeconds; - this.vcoreSeconds = vcoreSeconds; + public AggregateAppResourceUsage(Map resourceSecondsMap) { + this.resourceSecondsMap.putAll(resourceSecondsMap); } /** * @return the memorySeconds */ public long getMemorySeconds() { - return memorySeconds; - } - - /** - * @param memorySeconds the memorySeconds to set - */ - public void setMemorySeconds(long memorySeconds) { - this.memorySeconds = memorySeconds; + return RMServerUtils.getOrDefault(resourceSecondsMap, + ResourceInformation.MEMORY_MB.getName(), 0L); } /** * @return the vcoreSeconds */ public long getVcoreSeconds() { - return vcoreSeconds; + return RMServerUtils + .getOrDefault(resourceSecondsMap, ResourceInformation.VCORES.getName(), + 0L); } - /** - * @param vcoreSeconds the vcoreSeconds to set - */ - public void setVcoreSeconds(long vcoreSeconds) { - this.vcoreSeconds = vcoreSeconds; + public Map getResourceUsageSecondsMap() { + return resourceSecondsMap; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index d748860d874..9778c44d72d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -953,12 +953,9 @@ public ApplicationResourceUsageReport getApplicationResourceUsageReport() { } AggregateAppResourceUsage resUsage = this.attemptMetrics.getAggregateAppResourceUsage(); - report.setMemorySeconds(resUsage.getMemorySeconds()); - report.setVcoreSeconds(resUsage.getVcoreSeconds()); - report.setPreemptedMemorySeconds( - this.attemptMetrics.getPreemptedMemory()); - report.setPreemptedVcoreSeconds( - this.attemptMetrics.getPreemptedVcore()); + report.setResourceSecondsMap(resUsage.getResourceUsageSecondsMap()); + report.setPreemptedResourceSecondsMap( + this.attemptMetrics.getPreemptedResourceSecondsMap()); return report; } finally { this.readLock.unlock(); @@ -995,11 +992,10 @@ public void recover(RMState state) { this.finalStatus = attemptState.getFinalApplicationStatus(); this.startTime = attemptState.getStartTime(); this.finishTime = attemptState.getFinishTime(); - this.attemptMetrics.updateAggregateAppResourceUsage( - attemptState.getMemorySeconds(), attemptState.getVcoreSeconds()); + this.attemptMetrics + .updateAggregateAppResourceUsage(attemptState.getResourceSecondsMap()); this.attemptMetrics.updateAggregatePreemptedAppResourceUsage( - attemptState.getPreemptedMemorySeconds(), - attemptState.getPreemptedVcoreSeconds()); + attemptState.getPreemptedResourceSecondsMap()); } public void transferStateFromAttempt(RMAppAttempt attempt) { @@ -1375,16 +1371,12 @@ private void rememberTargetTransitionsAndStoreState(RMAppAttemptEvent event, RMStateStore rmStore = rmContext.getStateStore(); setFinishTime(System.currentTimeMillis()); - ApplicationAttemptStateData attemptState = - ApplicationAttemptStateData.newInstance( - applicationAttemptId, getMasterContainer(), - rmStore.getCredentialsFromAppAttempt(this), - startTime, stateToBeStored, finalTrackingUrl, diags.toString(), - finalStatus, exitStatus, - getFinishTime(), resUsage.getMemorySeconds(), - resUsage.getVcoreSeconds(), - this.attemptMetrics.getPreemptedMemory(), - this.attemptMetrics.getPreemptedVcore()); + ApplicationAttemptStateData attemptState = ApplicationAttemptStateData + .newInstance(applicationAttemptId, getMasterContainer(), + rmStore.getCredentialsFromAppAttempt(this), startTime, + stateToBeStored, finalTrackingUrl, diags.toString(), finalStatus, exitStatus, + getFinishTime(), resUsage.getResourceUsageSecondsMap(), + this.attemptMetrics.getPreemptedResourceSecondsMap()); LOG.info("Updating application attempt " + applicationAttemptId + " with final state: " + targetedFinalState + ", and exit status: " + exitStatus); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java index 0655609a893..0982ef93434 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -25,11 +27,13 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; +import org.apache.commons.lang.time.DateUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; @@ -49,10 +53,8 @@ private ReadLock readLock; private WriteLock writeLock; - private AtomicLong finishedMemorySeconds = new AtomicLong(0); - private AtomicLong finishedVcoreSeconds = new AtomicLong(0); - private AtomicLong preemptedMemorySeconds = new AtomicLong(0); - private AtomicLong preemptedVcoreSeconds = new AtomicLong(0); + private Map resourceUsageMap = new HashMap<>(); + private Map preemptedResourceMap = new HashMap<>(); private RMContext rmContext; private int[][] localityStatistics = @@ -102,11 +104,16 @@ public Resource getResourcePreempted() { } public long getPreemptedMemory() { - return preemptedMemorySeconds.get(); + return preemptedResourceMap.get(ResourceInformation.MEMORY_MB.getName()) + .get(); } public long getPreemptedVcore() { - return preemptedVcoreSeconds.get(); + return preemptedResourceMap.get(ResourceInformation.VCORES.getName()).get(); + } + + public Map getPreemptedResourceSecondsMap() { + return convertAtomicLongMaptoLongMap(preemptedResourceMap); } public int getNumNonAMContainersPreempted() { @@ -122,35 +129,89 @@ public boolean getIsPreempted() { } public AggregateAppResourceUsage getAggregateAppResourceUsage() { - long memorySeconds = finishedMemorySeconds.get(); - long vcoreSeconds = finishedVcoreSeconds.get(); + Map resourcesUsed = + convertAtomicLongMaptoLongMap(resourceUsageMap); // Only add in the running containers if this is the active attempt. RMApp rmApp = rmContext.getRMApps().get(attemptId.getApplicationId()); - if (null != rmApp) { - RMAppAttempt currentAttempt = rmApp.getCurrentAppAttempt(); + if (rmApp != null) { + RMAppAttempt currentAttempt = rmContext.getRMApps().get(attemptId.getApplicationId()).getCurrentAppAttempt(); if (currentAttempt.getAppAttemptId().equals(attemptId)) { - ApplicationResourceUsageReport appResUsageReport = rmContext - .getScheduler().getAppResourceUsageReport(attemptId); + ApplicationResourceUsageReport appResUsageReport = + rmContext.getScheduler().getAppResourceUsageReport(attemptId); if (appResUsageReport != null) { - memorySeconds += appResUsageReport.getMemorySeconds(); - vcoreSeconds += appResUsageReport.getVcoreSeconds(); + Map tmp = appResUsageReport.getResourceSecondsMap(); + for (Map.Entry entry : tmp.entrySet()) { + if (resourcesUsed.containsKey(entry.getKey())) { + Long value = resourcesUsed.get(entry.getKey()); + value += entry.getValue(); + resourcesUsed.put(entry.getKey(), value); + } else{ + resourcesUsed.put(entry.getKey(), entry.getValue()); + } + } } } } - return new AggregateAppResourceUsage(memorySeconds, vcoreSeconds); + return new AggregateAppResourceUsage(resourcesUsed); + } + + public void updateAggregateAppResourceUsage(Resource allocated, + long deltaUsedMillis) { + updateUsageMap(allocated, deltaUsedMillis, resourceUsageMap); + } + + public void updateAggregatePreemptedAppResourceUsage(Resource allocated, + long deltaUsedMillis) { + updateUsageMap(allocated, deltaUsedMillis, preemptedResourceMap); } - public void updateAggregateAppResourceUsage(long finishedMemorySeconds, - long finishedVcoreSeconds) { - this.finishedMemorySeconds.addAndGet(finishedMemorySeconds); - this.finishedVcoreSeconds.addAndGet(finishedVcoreSeconds); + public void updateAggregateAppResourceUsage( + Map resourceSecondsMap) { + updateUsageMap(resourceSecondsMap, resourceUsageMap); } public void updateAggregatePreemptedAppResourceUsage( - long preemptedMemorySeconds, long preemptedVcoreSeconds) { - this.preemptedMemorySeconds.addAndGet(preemptedMemorySeconds); - this.preemptedVcoreSeconds.addAndGet(preemptedVcoreSeconds); + Map preemptedResourceSecondsMap) { + updateUsageMap(preemptedResourceSecondsMap, preemptedResourceMap); + } + + private void updateUsageMap(Resource allocated, long deltaUsedMillis, + Map targetMap) { + for (ResourceInformation entry : allocated.getResources()) { + AtomicLong resourceUsed; + if (!targetMap.containsKey(entry.getName())) { + resourceUsed = new AtomicLong(0); + targetMap.put(entry.getName(), resourceUsed); + + } + resourceUsed = targetMap.get(entry.getName()); + resourceUsed.addAndGet((entry.getValue() * deltaUsedMillis) + / DateUtils.MILLIS_PER_SECOND); + } + } + + private void updateUsageMap(Map sourceMap, + Map targetMap) { + for (Map.Entry entry : sourceMap.entrySet()) { + AtomicLong resourceUsed; + if (!targetMap.containsKey(entry.getKey())) { + resourceUsed = new AtomicLong(0); + targetMap.put(entry.getKey(), resourceUsed); + + } + resourceUsed = targetMap.get(entry.getKey()); + resourceUsed.set(entry.getValue()); + } + } + + private Map convertAtomicLongMaptoLongMap( + Map source) { + Map ret = new HashMap<>(); + for (Map.Entry entry : source.entrySet()) { + ret.put(entry.getKey(), entry.getValue().get()); + } + return ret; } public void incNumAllocatedContainers(NodeType containerType, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java index f49db7e761b..2e0f202a45e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java @@ -25,7 +25,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; -import org.apache.commons.lang.time.DateUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -713,20 +712,15 @@ private static void updateAttemptMetrics(RMContainerImpl container) { if (rmAttempt != null) { long usedMillis = container.finishTime - container.creationTime; - long memorySeconds = resource.getMemorySize() - * usedMillis / DateUtils.MILLIS_PER_SECOND; - long vcoreSeconds = resource.getVirtualCores() - * usedMillis / DateUtils.MILLIS_PER_SECOND; rmAttempt.getRMAppAttemptMetrics() - .updateAggregateAppResourceUsage(memorySeconds,vcoreSeconds); + .updateAggregateAppResourceUsage(resource, usedMillis); // If this is a preempted container, update preemption metrics if (ContainerExitStatus.PREEMPTED == container.finishedStatus - .getExitStatus()) { - rmAttempt.getRMAppAttemptMetrics().updatePreemptionInfo(resource, - container); + .getExitStatus()) { rmAttempt.getRMAppAttemptMetrics() - .updateAggregatePreemptedAppResourceUsage(memorySeconds, - vcoreSeconds); + .updatePreemptionInfo(resource, container); + rmAttempt.getRMAppAttemptMetrics() + .updateAggregatePreemptedAppResourceUsage(resource, usedMillis); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index 2c270174845..a83f28d2c11 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent; @@ -95,6 +96,7 @@ import org.apache.hadoop.yarn.server.utils.Lock; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.SystemClock; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; import com.google.common.annotations.VisibleForTesting; @@ -1280,7 +1282,63 @@ protected void rollbackContainerUpdate( * @param container Container. */ public void asyncContainerRelease(RMContainer container) { - this.rmContext.getDispatcher().getEventHandler() - .handle(new ReleaseContainerEvent(container)); + this.rmContext.getDispatcher().getEventHandler().handle( + new ReleaseContainerEvent(container)); + } + + /* + * Get a Resource object with for the minimum allocation possible. If resource + * profiles are enabled then the 'minimum' resource profile will be used. If + * they are not enabled, use the minimums specified in the config files. + * + * @return a Resource object with the minimum allocation for the scheduler + */ + public Resource getMinimumAllocation() { + boolean profilesEnabled = getConfig() + .getBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, + YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_ENABLED); + Resource ret; + if (!profilesEnabled) { + ret = ResourceUtils.getResourceTypesMinimumAllocation(); + } else { + try { + ret = rmContext.getResourceProfilesManager().getMinimumProfile(); + } catch (YarnException e) { + LOG.error( + "Exception while getting minimum profile from profile manager:", e); + throw new YarnRuntimeException(e); + } + } + LOG.info("Minimum allocation = " + ret); + return ret; + } + + /** + * Get a Resource object with for the maximum allocation possible. If resource + * profiles are enabled then the 'maximum' resource profile will be used. If + * they are not enabled, use the maximums specified in the config files. + * + * @return a Resource object with the maximum allocation for the scheduler + */ + + public Resource getMaximumAllocation() { + boolean profilesEnabled = getConfig() + .getBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, + YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_ENABLED); + Resource ret; + if (!profilesEnabled) { + ret = ResourceUtils.getResourceTypesMaximumAllocation(); + } else { + try { + ret = rmContext.getResourceProfilesManager().getMaximumProfile(); + } catch (YarnException e) { + LOG.error( + "Exception while getting maximum profile from ResourceProfileManager:", + e); + throw new YarnRuntimeException(e); + } + } + LOG.info("Maximum allocation = " + ret); + return ret; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java index 010e64506b6..ccec6bc6a89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java @@ -219,10 +219,15 @@ public Resource getMaxAllowedAllocation() { return configuredMaxAllocation; } - return Resources.createResource( - Math.min(configuredMaxAllocation.getMemorySize(), maxNodeMemory), - Math.min(configuredMaxAllocation.getVirtualCores(), maxNodeVCores) - ); + Resource ret = Resources.clone(configuredMaxAllocation); + if (ret.getMemorySize() > maxNodeMemory) { + ret.setMemorySize(maxNodeMemory); + } + if (ret.getVirtualCores() > maxNodeVCores) { + ret.setVirtualCores(maxNodeVCores); + } + + return ret; } finally { readLock.unlock(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index f9a72191a67..dc4c0bc8721 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -55,11 +55,13 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.UpdateContainerError; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.server.api.ContainerType; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AggregateAppResourceUsage; @@ -107,9 +109,7 @@ private static final long MEM_AGGREGATE_ALLOCATION_CACHE_MSECS = 3000; protected long lastMemoryAggregateAllocationUpdateTime = 0; - private long lastMemorySeconds = 0; - private long lastVcoreSeconds = 0; - + private Map lastResourceSecondsMap = new HashMap<>(); protected final AppSchedulingInfo appSchedulingInfo; protected ApplicationAttemptId attemptId; protected Map liveContainers = @@ -1001,22 +1001,23 @@ private AggregateAppResourceUsage getRunningAggregateAppResourceUsage() { // recently. if ((currentTimeMillis - lastMemoryAggregateAllocationUpdateTime) > MEM_AGGREGATE_ALLOCATION_CACHE_MSECS) { - long memorySeconds = 0; - long vcoreSeconds = 0; + Map resourceSecondsMap = new HashMap<>(); for (RMContainer rmContainer : this.liveContainers.values()) { long usedMillis = currentTimeMillis - rmContainer.getCreationTime(); Resource resource = rmContainer.getContainer().getResource(); - memorySeconds += resource.getMemorySize() * usedMillis / - DateUtils.MILLIS_PER_SECOND; - vcoreSeconds += resource.getVirtualCores() * usedMillis - / DateUtils.MILLIS_PER_SECOND; + for (ResourceInformation entry : resource.getResources()) { + long value = RMServerUtils + .getOrDefault(resourceSecondsMap, entry.getName(), 0L); + value += entry.getValue() * usedMillis + / DateUtils.MILLIS_PER_SECOND; + resourceSecondsMap.put(entry.getName(), value); + } } lastMemoryAggregateAllocationUpdateTime = currentTimeMillis; - lastMemorySeconds = memorySeconds; - lastVcoreSeconds = vcoreSeconds; + lastResourceSecondsMap = resourceSecondsMap; } - return new AggregateAppResourceUsage(lastMemorySeconds, lastVcoreSeconds); + return new AggregateAppResourceUsage(lastResourceSecondsMap); } public ApplicationResourceUsageReport getResourceUsageReport() { @@ -1031,6 +1032,11 @@ public ApplicationResourceUsageReport getResourceUsageReport() { Resource cluster = rmContext.getScheduler().getClusterResource(); ResourceCalculator calc = rmContext.getScheduler().getResourceCalculator(); + Map preemptedResourceSecondsMaps = new HashMap<>(); + preemptedResourceSecondsMaps + .put(ResourceInformation.MEMORY_MB.getName(), 0L); + preemptedResourceSecondsMaps + .put(ResourceInformation.VCORES.getName(), 0L); float queueUsagePerc = 0.0f; float clusterUsagePerc = 0.0f; if (!calc.isInvalidDivisor(cluster)) { @@ -1040,15 +1046,15 @@ public ApplicationResourceUsageReport getResourceUsageReport() { queueUsagePerc = calc.divide(cluster, usedResourceClone, Resources.multiply(cluster, queueCapacityPerc)) * 100; } - clusterUsagePerc = calc.divide(cluster, usedResourceClone, cluster) - * 100; + clusterUsagePerc = + calc.divide(cluster, usedResourceClone, cluster) * 100; } - return ApplicationResourceUsageReport.newInstance(liveContainers.size(), - reservedContainers.size(), usedResourceClone, reservedResourceClone, - Resources.add(usedResourceClone, reservedResourceClone), - runningResourceUsage.getMemorySeconds(), - runningResourceUsage.getVcoreSeconds(), queueUsagePerc, - clusterUsagePerc, 0, 0); + return ApplicationResourceUsageReport + .newInstance(liveContainers.size(), reservedContainers.size(), + usedResourceClone, reservedResourceClone, + Resources.add(usedResourceClone, reservedResourceClone), + runningResourceUsage.getResourceUsageSecondsMap(), queueUsagePerc, + clusterUsagePerc, preemptedResourceSecondsMaps); } finally { writeLock.unlock(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index 7b554db4705..c558b8dd912 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -39,10 +39,12 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException; import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException; +import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.security.AccessType; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl; @@ -253,6 +255,14 @@ public static void normalizeAndvalidateRequest(ResourceRequest resReq, private static void validateResourceRequest(ResourceRequest resReq, Resource maximumResource, QueueInfo queueInfo, RMContext rmContext) throws InvalidResourceRequestException { + try { + RMServerUtils.convertProfileToResourceCapability(resReq, + rmContext.getYarnConfiguration(), + rmContext.getResourceProfilesManager()); + } catch (YarnException ye) { + throw new InvalidResourceRequestException(ye); + } + if (resReq.getCapability().getMemorySize() < 0 || resReq.getCapability().getMemorySize() > maximumResource.getMemorySize()) { throw new InvalidResourceRequestException("Invalid resource request" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index fde84c48def..8e88037bebb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -291,8 +291,8 @@ void initScheduler(Configuration configuration) throws writeLock.lock(); this.conf = loadCapacitySchedulerConfiguration(configuration); validateConf(this.conf); - this.minimumAllocation = this.conf.getMinimumAllocation(); - initMaximumResourceCapability(this.conf.getMaximumAllocation()); + this.minimumAllocation = super.getMinimumAllocation(); + initMaximumResourceCapability(super.getMaximumAllocation()); this.calculator = this.conf.getResourceCalculator(); this.usePortForNodeName = this.conf.getUsePortForNodeName(); this.applications = new ConcurrentHashMap<>(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index c5212501b34..4552983faa0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -1298,8 +1298,8 @@ private void initScheduler(Configuration conf) throws IOException { this.conf = new FairSchedulerConfiguration(conf); validateConf(this.conf); authorizer = YarnAuthorizationProvider.getInstance(conf); - minimumAllocation = this.conf.getMinimumAllocation(); - initMaximumResourceCapability(this.conf.getMaximumAllocation()); + minimumAllocation = super.getMinimumAllocation(); + initMaximumResourceCapability(super.getMaximumAllocation()); incrAllocation = this.conf.getIncrementAllocation(); updateReservationThreshold(); continuousSchedulingEnabled = this.conf.isContinuousSchedulingEnabled(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index 94c7e166ff5..185d426d717 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -241,17 +241,8 @@ private synchronized void initScheduler(Configuration conf) { //Use ConcurrentSkipListMap because applications need to be ordered this.applications = new ConcurrentSkipListMap<>(); - this.minimumAllocation = - Resources.createResource(conf.getInt( - YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB)); - initMaximumResourceCapability( - Resources.createResource(conf.getInt( - YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB), - conf.getInt( - YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES))); + this.minimumAllocation = super.getMinimumAllocation(); + initMaximumResourceCapability(super.getMaximumAllocation()); this.usePortForNodeName = conf.getBoolean( YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java index f6b1a943a60..806b6364099 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java @@ -19,17 +19,21 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.records.ResourceTypeInfo; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; import com.google.inject.Inject; +import java.util.Arrays; + /** * Provides an table with an overview of many cluster wide metrics and if * per user metrics are enabled it will show an overview of what the @@ -168,8 +172,8 @@ protected void render(Block html) { } } - - SchedulerInfo schedulerInfo=new SchedulerInfo(this.rm); + + SchedulerInfo schedulerInfo = new SchedulerInfo(this.rm); div.h3("Scheduler Metrics"). table("#schedulermetricsoverview"). @@ -186,7 +190,8 @@ protected void render(Block html) { tbody().$class("ui-widget-content"). tr(). td(String.valueOf(schedulerInfo.getSchedulerType())). - td(String.valueOf(schedulerInfo.getSchedulerResourceTypes())). + td(String.valueOf(Arrays.toString(ResourceUtils.getResourcesTypeInfo() + .toArray(new ResourceTypeInfo[0])))). td(schedulerInfo.getMinAllocation().toString()). td(schedulerInfo.getMaxAllocation().toString()). td(String.valueOf(schedulerInfo.getMaxClusterLevelAppPriority())). diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java index cd04264d07c..47e517f19bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java @@ -36,6 +36,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics; import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo; import org.apache.hadoop.yarn.server.webapp.AppBlock; +import org.apache.hadoop.yarn.util.StringHelper; import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV; @@ -98,15 +99,12 @@ protected void createApplicationMetricsTable(Block html){ attemptResourcePreempted) .__("Number of Non-AM Containers Preempted from Current Attempt:", attemptNumNonAMContainerPreempted) - .__("Aggregate Resource Allocation:", - String.format("%d MB-seconds, %d vcore-seconds", - appMetrics == null ? "N/A" : appMetrics.getMemorySeconds(), - appMetrics == null ? "N/A" : appMetrics.getVcoreSeconds())) + .__("Aggregate Resource Allocation:", appMetrics == null ? "N/A" : + StringHelper + .getResourceSecondsString(appMetrics.getResourceSecondsMap())) .__("Aggregate Preempted Resource Allocation:", - String.format("%d MB-seconds, %d vcore-seconds", - appMetrics == null ? "N/A" : appMetrics.getPreemptedMemorySeconds(), - appMetrics == null ? "N/A" : - appMetrics.getPreemptedVcoreSeconds())); + appMetrics == null ? "N/A" : StringHelper.getResourceSecondsString( + appMetrics.getPreemptedResourceSecondsMap())); pdiv.__(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java index 9fb8fb5858d..236c4677653 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java @@ -101,6 +101,7 @@ private long vcoreSeconds; protected float queueUsagePercentage; protected float clusterUsagePercentage; + protected Map resourceSecondsMap; // preemption info fields private long preemptedResourceMB; @@ -109,6 +110,7 @@ private int numAMContainerPreempted; private long preemptedMemorySeconds; private long preemptedVcoreSeconds; + protected Map preemptedResourceSecondsMap; // list of resource requests @XmlElement(name = "resourceRequests") @@ -236,8 +238,10 @@ public AppInfo(ResourceManager rm, RMApp app, Boolean hasAccess, appMetrics.getResourcePreempted().getVirtualCores(); memorySeconds = appMetrics.getMemorySeconds(); vcoreSeconds = appMetrics.getVcoreSeconds(); + resourceSecondsMap = appMetrics.getResourceSecondsMap(); preemptedMemorySeconds = appMetrics.getPreemptedMemorySeconds(); preemptedVcoreSeconds = appMetrics.getPreemptedVcoreSeconds(); + preemptedResourceSecondsMap = appMetrics.getPreemptedResourceSecondsMap(); ApplicationSubmissionContext appSubmissionContext = app.getApplicationSubmissionContext(); unmanagedApplication = appSubmissionContext.getUnmanagedAM(); @@ -415,6 +419,22 @@ public long getReservedVCores() { return this.reservedVCores; } + public long getPreemptedMB() { + return preemptedResourceMB; + } + + public long getPreemptedVCores() { + return preemptedResourceVCores; + } + + public int getNumNonAMContainersPreempted() { + return numNonAMContainerPreempted; + } + + public int getNumAMContainersPreempted() { + return numAMContainerPreempted; + } + public long getMemorySeconds() { return memorySeconds; } @@ -423,6 +443,10 @@ public long getVcoreSeconds() { return vcoreSeconds; } + public Map getResourceSecondsMap() { + return resourceSecondsMap; + } + public long getPreemptedMemorySeconds() { return preemptedMemorySeconds; } @@ -431,6 +455,10 @@ public long getPreemptedVcoreSeconds() { return preemptedVcoreSeconds; } + public Map getPreemptedResourceSecondsMap() { + return preemptedResourceSecondsMap; + } + public List getResourceRequests() { return this.resourceRequests; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java index 5083943b65a..e13980afc39 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java @@ -20,46 +20,68 @@ import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.util.resource.Resources; @XmlRootElement -@XmlAccessorType(XmlAccessType.FIELD) +@XmlAccessorType(XmlAccessType.NONE) public class ResourceInfo { + + @XmlElement long memory; + @XmlElement int vCores; - + + private Resource resources; + public ResourceInfo() { } public ResourceInfo(Resource res) { memory = res.getMemorySize(); vCores = res.getVirtualCores(); + resources = Resources.clone(res); } public long getMemorySize() { - return memory; + if (resources == null) { + resources = Resource.newInstance(memory, vCores); + } + return resources.getMemorySize(); } public int getvCores() { - return vCores; + if (resources == null) { + resources = Resource.newInstance(memory, vCores); + } + return resources.getVirtualCores(); } - + @Override public String toString() { - return ""; + return resources.toString(); } public void setMemory(int memory) { + if (resources == null) { + resources = Resource.newInstance(memory, vCores); + } this.memory = memory; + resources.setMemorySize(memory); } public void setvCores(int vCores) { + if (resources == null) { + resources = Resource.newInstance(memory, vCores); + } this.vCores = vCores; + resources.setVirtualCores(vCores); } public Resource getResource() { - return Resource.newInstance(memory, vCores); + return Resource.newInstance(resources); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java index cf93edd2c1e..81491b14ce1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; +import java.util.Arrays; import java.util.EnumSet; import javax.xml.bind.annotation.XmlRootElement; @@ -73,7 +74,7 @@ public ResourceInfo getMaxAllocation() { } public String getSchedulerResourceTypes() { - return this.schedulingResourceTypes.toString(); + return Arrays.toString(minAllocResource.getResource().getResources()); } public int getMaxClusterLevelAppPriority() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto index 247cd2195d9..39a56a811a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto @@ -87,6 +87,8 @@ message ApplicationAttemptStateDataProto { optional int64 finish_time = 12; optional int64 preempted_memory_seconds = 13; optional int64 preempted_vcore_seconds = 14; + repeated StringLongMapProto application_resource_usage_map = 15; + repeated StringLongMapProto preempted_resource_usage_map = 16; } message EpochProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 1235774d934..6943731d7de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -104,6 +104,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.YarnVersionInfo; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -150,6 +151,7 @@ public MockRM(Configuration conf, RMStateStore store, public MockRM(Configuration conf, RMStateStore store, boolean useNullRMNodeLabelsManager, boolean useRealElector) { super(); + ResourceUtils.resetResourceTypes(conf); this.useNullRMNodeLabelsManager = useNullRMNodeLabelsManager; this.useRealElector = useRealElector; init(conf instanceof YarnConfiguration ? conf : new YarnConfiguration(conf)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index db26a875990..b24a309fc10 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -35,6 +35,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; @@ -56,6 +57,7 @@ import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; @@ -247,6 +249,8 @@ public void setUp() { ResourceScheduler scheduler = mockResourceScheduler(); ((RMContextImpl)rmContext).setScheduler(scheduler); Configuration conf = new Configuration(); + conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true); + ((RMContextImpl) rmContext).setYarnConfiguration(conf); ApplicationMasterService masterService = new ApplicationMasterService(rmContext, scheduler); appMonitor = new TestRMAppManager(rmContext, @@ -827,9 +831,12 @@ public void testEscapeApplicationSummary() { when(app.getState()).thenReturn(RMAppState.RUNNING); when(app.getApplicationType()).thenReturn("MAPREDUCE"); when(app.getSubmitTime()).thenReturn(1000L); + Map resourceSecondsMap = new HashMap<>(); + resourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), 16384L); + resourceSecondsMap.put(ResourceInformation.VCORES.getName(), 64L); RMAppMetrics metrics = new RMAppMetrics(Resource.newInstance(1234, 56), - 10, 1, 16384, 64, 0, 0); + 10, 1, resourceSecondsMap, new HashMap<>()); when(app.getRMAppMetrics()).thenReturn(metrics); RMAppManager.ApplicationSummary.SummaryBuilder summary = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java index e684f3c1ad3..7a2e500e661 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java @@ -48,6 +48,7 @@ import org.apache.hadoop.yarn.api.records.ContainerUpdateType; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.UpdateContainerRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -102,9 +103,11 @@ public void init(ApplicationMasterServiceContext amsContext, } @Override - public void registerApplicationMaster(ApplicationAttemptId - applicationAttemptId, RegisterApplicationMasterRequest request, - RegisterApplicationMasterResponse response) throws IOException { + public void registerApplicationMaster( + ApplicationAttemptId applicationAttemptId, + RegisterApplicationMasterRequest request, + RegisterApplicationMasterResponse response) + throws IOException, YarnException { nextProcessor.registerApplicationMaster( applicationAttemptId, request, response); } @@ -144,7 +147,8 @@ public void init(ApplicationMasterServiceContext amsContext, public void registerApplicationMaster( ApplicationAttemptId applicationAttemptId, RegisterApplicationMasterRequest request, - RegisterApplicationMasterResponse response) throws IOException { + RegisterApplicationMasterResponse response) + throws IOException, YarnException { beforeRegCount.incrementAndGet(); nextProcessor.registerApplicationMaster(applicationAttemptId, request, response); @@ -672,4 +676,38 @@ private void sentRMContainerLaunched(MockRM rm, ContainerId containerId) { Assert.fail("Cannot find RMContainer"); } } + + @Test(timeout = 3000000) + public void testResourceProfiles() throws Exception { + + MockRM rm = new MockRM(conf); + rm.start(); + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB); + RMApp app1 = rm.submitApp(2048); + nm1.nodeHeartbeat(true); + RMAppAttempt attempt1 = app1.getCurrentAppAttempt(); + MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId()); + RegisterApplicationMasterResponse resp = am1.registerAppAttempt(); + Assert.assertEquals(0, resp.getResourceProfiles().size()); + rm.stop(); + conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, true); + conf.set(YarnConfiguration.RM_RESOURCE_PROFILES_SOURCE_FILE, + "profiles/sample-profiles-1.json"); + rm = new MockRM(conf); + rm.start(); + nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB); + app1 = rm.submitApp(2048); + nm1.nodeHeartbeat(true); + attempt1 = app1.getCurrentAppAttempt(); + am1 = rm.sendAMLaunched(attempt1.getAppAttemptId()); + resp = am1.registerAppAttempt(); + Assert.assertEquals(3, resp.getResourceProfiles().size()); + Assert.assertEquals(Resource.newInstance(1024, 1), + resp.getResourceProfiles().get("minimum")); + Assert.assertEquals(Resource.newInstance(2048, 2), + resp.getResourceProfiles().get("default")); + Assert.assertEquals(Resource.newInstance(4096, 4), + resp.getResourceProfiles().get("maximum")); + rm.stop(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index ea733a4ce4a..49718a2af72 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -56,6 +56,8 @@ import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; @@ -116,6 +118,7 @@ import org.apache.hadoop.yarn.api.records.ReservationRequest; import org.apache.hadoop.yarn.api.records.ReservationRequests; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -2061,4 +2064,47 @@ protected ClientRMService createClientRMService() { rpc.stopProxy(client, conf); new File(excludeFile).delete(); } + + @Test + public void testGetResourceTypesInfoWhenResourceProfileDisabled() + throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + MockRM rm = new MockRM(conf) { + protected ClientRMService createClientRMService() { + return new ClientRMService(this.rmContext, scheduler, + this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, + this.getRMContext().getRMDelegationTokenSecretManager()); + } + }; + rm.start(); + + YarnRPC rpc = YarnRPC.create(conf); + InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress(); + LOG.info("Connecting to ResourceManager at " + rmAddress); + ApplicationClientProtocol client = + (ApplicationClientProtocol) rpc + .getProxy(ApplicationClientProtocol.class, rmAddress, conf); + + // Make call + GetAllResourceTypeInfoRequest request = + GetAllResourceTypeInfoRequest.newInstance(); + GetAllResourceTypeInfoResponse response = client.getResourceTypeInfo(request); + + Assert.assertEquals(2, response.getResourceTypeInfo().size()); + + // Check memory + Assert.assertEquals(ResourceInformation.MEMORY_MB.getName(), + response.getResourceTypeInfo().get(0).getName()); + Assert.assertEquals(ResourceInformation.MEMORY_MB.getUnits(), + response.getResourceTypeInfo().get(0).getDefaultUnit()); + + // Check vcores + Assert.assertEquals(ResourceInformation.VCORES.getName(), + response.getResourceTypeInfo().get(1).getName()); + Assert.assertEquals(ResourceInformation.VCORES.getUnits(), + response.getResourceTypeInfo().get(1).getDefaultUnit()); + + rm.stop(); + rpc.stopProxy(client, conf); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java index ba9de6c8d36..9ed4978bdd6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java @@ -23,6 +23,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.commons.lang.time.DateUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -32,6 +33,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore; @@ -422,6 +424,9 @@ private AggregateAppResourceUsage calculateContainerResourceMetrics( * usedMillis / DateUtils.MILLIS_PER_SECOND; long vcoreSeconds = resource.getVirtualCores() * usedMillis / DateUtils.MILLIS_PER_SECOND; - return new AggregateAppResourceUsage(memorySeconds, vcoreSeconds); + Map map = new HashMap<>(); + map.put(ResourceInformation.MEMORY_MB.getName(), memorySeconds); + map.put(ResourceInformation.VCORES.getName(), vcoreSeconds); + return new AggregateAppResourceUsage(map); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java index f826631a21d..399df02465e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -189,7 +190,8 @@ public YarnApplicationState createApplicationState() { @Override public RMAppMetrics getRMAppMetrics() { - return new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, 0, 0, 0, 0); + return new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, new HashMap<>(), + new HashMap<>()); } @Override @@ -337,8 +339,9 @@ public int getMaxAppAttempts() { public ApplicationReport createAndGetApplicationReport( String clientUserName, boolean allowAccess) { ApplicationResourceUsageReport usageReport = - ApplicationResourceUsageReport.newInstance(0, 0, null, null, null, - 0, 0, 0, 0, 0, 0); + ApplicationResourceUsageReport + .newInstance(0, 0, null, null, null, new HashMap<>(), 0, 0, + new HashMap<>()); ApplicationReport report = ApplicationReport.newInstance( getApplicationId(), appAttemptId, getUser(), getQueue(), getName(), null, 0, null, null, getDiagnostics().toString(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java index 7005bca6585..2287617b215 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java @@ -24,6 +24,7 @@ import java.util.Collection; import java.util.Collections; import java.util.EnumSet; +import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -40,6 +41,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; @@ -506,9 +508,16 @@ private static RMApp createRMApp(ApplicationId appId) { when(app.getCurrentAppAttempt()).thenReturn(appAttempt); when(app.getFinalApplicationStatus()).thenReturn( FinalApplicationStatus.UNDEFINED); - when(app.getRMAppMetrics()).thenReturn( - new RMAppMetrics(null, 0, 0, Integer.MAX_VALUE, Long.MAX_VALUE, - Integer.MAX_VALUE, Long.MAX_VALUE)); + Map resourceMap = new HashMap<>(); + resourceMap + .put(ResourceInformation.MEMORY_MB.getName(), (long) Integer.MAX_VALUE); + resourceMap.put(ResourceInformation.VCORES.getName(), Long.MAX_VALUE); + Map preemptedMap = new HashMap<>(); + preemptedMap + .put(ResourceInformation.MEMORY_MB.getName(), (long) Integer.MAX_VALUE); + preemptedMap.put(ResourceInformation.VCORES.getName(), Long.MAX_VALUE); + when(app.getRMAppMetrics()) + .thenReturn(new RMAppMetrics(null, 0, 0, resourceMap, preemptedMap)); Set appTags = new HashSet(); appTags.add("test"); appTags.add("tags"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java index c6bfcc71b23..68bb325a620 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java @@ -29,6 +29,8 @@ import java.io.FileReader; import java.io.IOException; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -46,6 +48,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent; @@ -357,15 +360,20 @@ private static RMApp createRMApp(ApplicationId appId) { when(app.getDiagnostics()).thenReturn( new StringBuilder("test diagnostics info")); RMAppAttempt appAttempt = mock(RMAppAttempt.class); - when(appAttempt.getAppAttemptId()).thenReturn( - ApplicationAttemptId.newInstance(appId, 1)); + when(appAttempt.getAppAttemptId()) + .thenReturn(ApplicationAttemptId.newInstance(appId, 1)); when(app.getCurrentAppAttempt()).thenReturn(appAttempt); - when(app.getFinalApplicationStatus()).thenReturn( - FinalApplicationStatus.UNDEFINED); + when(app.getFinalApplicationStatus()) + .thenReturn(FinalApplicationStatus.UNDEFINED); + Map resourceSecondsMap = new HashMap<>(); + resourceSecondsMap + .put(ResourceInformation.MEMORY_MB.getName(), (long) Integer.MAX_VALUE); + resourceSecondsMap + .put(ResourceInformation.VCORES.getName(), Long.MAX_VALUE); when(app.getRMAppMetrics()).thenReturn( - new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, Integer.MAX_VALUE, - Long.MAX_VALUE, 0, 0)); - when(app.getApplicationTags()).thenReturn(Collections. emptySet()); + new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, resourceSecondsMap, + new HashMap<>())); + when(app.getApplicationTags()).thenReturn(Collections.emptySet()); ApplicationSubmissionContext appSubmissionContext = mock(ApplicationSubmissionContext.class); when(appSubmissionContext.getPriority()) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java index 06a16ffeffe..453d805a843 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java @@ -194,7 +194,7 @@ protected RMAppAttempt storeAttempt(RMStateStore store, when(mockAttempt.getRMAppAttemptMetrics()) .thenReturn(mockRmAppAttemptMetrics); when(mockRmAppAttemptMetrics.getAggregateAppResourceUsage()) - .thenReturn(new AggregateAppResourceUsage(0, 0)); + .thenReturn(new AggregateAppResourceUsage(new HashMap<>())); dispatcher.attemptId = attemptId; store.storeNewApplicationAttempt(mockAttempt); waitNotify(dispatcher); @@ -292,7 +292,7 @@ void testRMAppStateStore(RMStateStoreHelper stateStoreHelper, when(mockRemovedAttempt.getRMAppAttemptMetrics()) .thenReturn(mockRmAppAttemptMetrics); when(mockRmAppAttemptMetrics.getAggregateAppResourceUsage()) - .thenReturn(new AggregateAppResourceUsage(0,0)); + .thenReturn(new AggregateAppResourceUsage(new HashMap<>())); attempts.put(attemptIdRemoved, mockRemovedAttempt); store.removeApplication(mockRemovedApp); @@ -369,7 +369,7 @@ void testRMAppStateStore(RMStateStoreHelper stateStoreHelper, oldAttemptState.getStartTime(), RMAppAttemptState.FINISHED, "myTrackingUrl", "attemptDiagnostics", FinalApplicationStatus.SUCCEEDED, 100, - oldAttemptState.getFinishTime(), 0, 0, 0, 0); + oldAttemptState.getFinishTime(), new HashMap<>(), new HashMap<>()); store.updateApplicationAttemptState(newAttemptState); // test updating the state of an app/attempt whose initial state was not @@ -393,7 +393,7 @@ void testRMAppStateStore(RMStateStoreHelper stateStoreHelper, oldAttemptState.getStartTime(), RMAppAttemptState.FINISHED, "myTrackingUrl", "attemptDiagnostics", FinalApplicationStatus.SUCCEEDED, 111, - oldAttemptState.getFinishTime(), 0, 0, 0, 0); + oldAttemptState.getFinishTime(), new HashMap<>(), new HashMap<>()); store.updateApplicationAttemptState(dummyAttempt); // let things settle down diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java index 5ae82391bdf..e5cf95d07b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java @@ -33,12 +33,7 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.service.Service; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; -import org.apache.hadoop.yarn.api.records.Container; -import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.*; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl; import org.apache.hadoop.yarn.conf.HAUtil; @@ -511,7 +506,7 @@ public void testFencedState() throws Exception { when(mockAttempt.getRMAppAttemptMetrics()) .thenReturn(mockRmAppAttemptMetrics); when(mockRmAppAttemptMetrics.getAggregateAppResourceUsage()) - .thenReturn(new AggregateAppResourceUsage(0,0)); + .thenReturn(new AggregateAppResourceUsage(new HashMap<>())); store.storeNewApplicationAttempt(mockAttempt); assertEquals("RMStateStore should have been in fenced state", true, store.isFencedState()); @@ -523,7 +518,7 @@ public void testFencedState() throws Exception { store.getCredentialsFromAppAttempt(mockAttempt), startTime, RMAppAttemptState.FINISHED, "testUrl", "test", FinalApplicationStatus.SUCCEEDED, 100, - finishTime, 0, 0, 0, 0); + finishTime, new HashMap<>(), new HashMap<>()); store.updateApplicationAttemptState(newAttemptState); assertEquals("RMStateStore should have been in fenced state", true, store.isFencedState()); @@ -751,10 +746,20 @@ private static ApplicationStateData createAppState( private static ApplicationAttemptStateData createFinishedAttempt( ApplicationAttemptId attemptId, Container container, long startTime, int amExitStatus) { + Map resourceSecondsMap = new HashMap<>(); + Map preemptedResoureSecondsMap = new HashMap<>(); + resourceSecondsMap + .put(ResourceInformation.MEMORY_MB.getName(), 0L); + resourceSecondsMap + .put(ResourceInformation.VCORES.getName(), 0L); + preemptedResoureSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), + 0L); + preemptedResoureSecondsMap + .put(ResourceInformation.VCORES.getName(), 0L); return ApplicationAttemptStateData.newInstance(attemptId, container, null, startTime, RMAppAttemptState.FINISHED, "myTrackingUrl", "attemptDiagnostics", FinalApplicationStatus.SUCCEEDED, - amExitStatus, 0, 0, 0, 0, 0); + amExitStatus, 0, resourceSecondsMap, preemptedResoureSecondsMap); } private ApplicationAttemptId storeAttempt(RMStateStore store, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResourceProfiles.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResourceProfiles.java new file mode 100644 index 00000000000..a38f59bbe4d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResourceProfiles.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.resource; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * Common test class for resource profile related tests. + */ +public class TestResourceProfiles { + + @Test + public void testProfilesEnabled() throws Exception { + ResourceProfilesManager manager = new ResourceProfilesManagerImpl(); + Configuration conf = new Configuration(); + // be default resource profiles should not be enabled + manager.init(conf); + try { + manager.getResourceProfiles(); + Assert + .fail("Exception should be thrown as resource profile is not enabled" + + " and getResourceProfiles is invoked."); + } catch (YarnException ie) { + } + conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, true); + try { + manager.init(conf); + Assert.fail( + "Exception should be thrown due to missing resource profiles file"); + } catch (IOException ie) { + } + conf.set(YarnConfiguration.RM_RESOURCE_PROFILES_SOURCE_FILE, + "profiles/sample-profiles-1.json"); + manager.init(conf); + } + + @Test + public void testLoadProfiles() throws Exception { + ResourceProfilesManager manager = new ResourceProfilesManagerImpl(); + Configuration conf = new Configuration(); + conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, true); + conf.set(YarnConfiguration.RM_RESOURCE_PROFILES_SOURCE_FILE, + "profiles/sample-profiles-1.json"); + manager.init(conf); + Map profiles = manager.getResourceProfiles(); + Map expected = new HashMap<>(); + expected.put("minimum", Resource.newInstance(1024, 1)); + expected.put("default", Resource.newInstance(2048, 2)); + expected.put("maximum", Resource.newInstance(4096, 4)); + + for (Map.Entry entry : expected.entrySet()) { + String profile = entry.getKey(); + Resource res = entry.getValue(); + Assert.assertTrue("Mandatory profile '" + profile + "' missing", + profiles.containsKey(profile)); + Assert.assertEquals("Profile " + profile + "' resources don't match", res, + manager.getProfile(profile)); + } + } + + @Test + public void testLoadProfilesMissingMandatoryProfile() throws Exception { + + Configuration conf = new Configuration(); + conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, true); + + String[] badProfiles = {"profiles/illegal-profiles-1.json", + "profiles/illegal-profiles-2.json", "profiles/illegal-profiles-3.json"}; + for (String file : badProfiles) { + ResourceProfilesManager manager = new ResourceProfilesManagerImpl(); + conf.set(YarnConfiguration.RM_RESOURCE_PROFILES_SOURCE_FILE, file); + try { + manager.init(conf); + Assert.fail("Bad profile '" + file + "' is not valid"); + } catch (IOException ie) { + } + } + } + + @Test + public void testGetProfile() throws Exception { + Configuration conf = new Configuration(); + conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, true); + ResourceProfilesManager manager = new ResourceProfilesManagerImpl(); + conf.set(YarnConfiguration.RM_RESOURCE_PROFILES_SOURCE_FILE, + "profiles/sample-profiles-2.json"); + manager.init(conf); + Map expected = new HashMap<>(); + expected.put("minimum", Resource.newInstance(1024, 1)); + expected.put("default", Resource.newInstance(2048, 2)); + expected.put("maximum", Resource.newInstance(4096, 4)); + expected.put("small", Resource.newInstance(1024, 1)); + expected.put("medium", Resource.newInstance(2048, 1)); + expected.put("large", Resource.newInstance(4096, 4)); + + for (Map.Entry entry : expected.entrySet()) { + String profile = entry.getKey(); + Resource res = entry.getValue(); + Assert.assertEquals("Profile " + profile + "' resources don't match", res, + manager.getProfile(profile)); + } + } + + @Test + public void testGetMandatoryProfiles() throws Exception { + ResourceProfilesManager manager = new ResourceProfilesManagerImpl(); + Configuration conf = new Configuration(); + conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, true); + conf.set(YarnConfiguration.RM_RESOURCE_PROFILES_SOURCE_FILE, + "profiles/sample-profiles-1.json"); + manager.init(conf); + Map expected = new HashMap<>(); + expected.put("minimum", Resource.newInstance(1024, 1)); + expected.put("default", Resource.newInstance(2048, 2)); + expected.put("maximum", Resource.newInstance(4096, 4)); + + Assert.assertEquals("Profile 'minimum' resources don't match", + expected.get("minimum"), manager.getMinimumProfile()); + Assert.assertEquals("Profile 'default' resources don't match", + expected.get("default"), manager.getDefaultProfile()); + Assert.assertEquals("Profile 'maximum' resources don't match", + expected.get("maximum"), manager.getMaximumProfile()); + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java deleted file mode 100644 index 2a10747ac9d..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.resourcemanager.resource; - -import static org.apache.hadoop.yarn.util.resource.Resources.*; -import static org.junit.Assert.*; -import org.junit.Test; - -public class TestResources { - @Test(timeout=10000) - public void testFitsIn() { - assertTrue(fitsIn(createResource(1, 1), createResource(2, 2))); - assertTrue(fitsIn(createResource(2, 2), createResource(2, 2))); - assertFalse(fitsIn(createResource(2, 2), createResource(1, 1))); - assertFalse(fitsIn(createResource(1, 2), createResource(2, 1))); - assertFalse(fitsIn(createResource(2, 1), createResource(1, 2))); - } - - @Test(timeout=10000) - public void testComponentwiseMin() { - assertEquals(createResource(1, 1), - componentwiseMin(createResource(1, 1), createResource(2, 2))); - assertEquals(createResource(1, 1), - componentwiseMin(createResource(2, 2), createResource(1, 1))); - assertEquals(createResource(1, 1), - componentwiseMin(createResource(1, 2), createResource(2, 1))); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 941c215df4c..446b6ee4e9a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -113,6 +113,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.ControlledClock; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.After; import org.junit.Assert; @@ -212,6 +213,7 @@ public void testLoadConfigurationOnInitialize() throws IOException { conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512); conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB, 128); + ResourceUtils.resetResourceTypes(conf); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); @@ -240,6 +242,7 @@ public void testNonMinZeroResourcesSettings() throws IOException { FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB, 512); conf.setInt( FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2); + ResourceUtils.resetResourceTypes(conf); scheduler.init(conf); scheduler.reinitialize(conf, null); Assert.assertEquals(256, scheduler.getMinimumResourceCapability().getMemorySize()); @@ -257,6 +260,7 @@ public void testMinZeroResourcesSettings() throws IOException { FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB, 512); conf.setInt( FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2); + ResourceUtils.resetResourceTypes(conf); scheduler.init(conf); scheduler.reinitialize(conf, null); Assert.assertEquals(0, scheduler.getMinimumResourceCapability().getMemorySize()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java index a009bc07d47..1010c0e2b43 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java @@ -22,6 +22,7 @@ import static org.mockito.Mockito.when; import java.io.IOException; +import java.util.HashMap; import org.apache.hadoop.yarn.api.ApplicationBaseProtocol; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -63,9 +64,10 @@ public void testAppBlockRenderWithNullCurrentAppAttempt() throws Exception { when(app.getStartTime()).thenReturn(0L); when(app.getFinishTime()).thenReturn(0L); when(app.createApplicationState()).thenReturn(YarnApplicationState.FAILED); - - RMAppMetrics appMetrics = new RMAppMetrics( - Resource.newInstance(0, 0), 0, 0, 0, 0, 0, 0); + + RMAppMetrics appMetrics = + new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, new HashMap<>(), + new HashMap<>()); when(app.getRMAppMetrics()).thenReturn(appMetrics); // initialize RM Context, and create RMApp, without creating RMAppAttempt diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java index 78fadef03e5..1a40e612111 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java @@ -52,6 +52,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.concurrent.ConcurrentMap; @@ -142,8 +143,8 @@ private static RMContext mockRMContext(List states) { MockRMApp app = new MockRMApp(i, i, state) { @Override public RMAppMetrics getRMAppMetrics() { - return new RMAppMetrics(Resource.newInstance(0, 0), - 0, 0, 0, 0, 0, 0); + return new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, + new HashMap<>(), new HashMap<>()); } @Override public YarnApplicationState createApplicationState() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java index 1cbdec3a225..07f74a358e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java @@ -1528,7 +1528,7 @@ public void verifyAppsXML(NodeList nodes, RMApp app, boolean hasResourceReq) public void verifyAppInfo(JSONObject info, RMApp app, boolean hasResourceReqs) throws JSONException, Exception { - int expectedNumberOfElements = 36 + (hasResourceReqs ? 2 : 0); + int expectedNumberOfElements = 38 + (hasResourceReqs ? 2 : 0); String appNodeLabelExpression = null; String amNodeLabelExpression = null; if (app.getApplicationSubmissionContext() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/illegal-profiles-1.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/illegal-profiles-1.json new file mode 100644 index 00000000000..b6aca96bd0f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/illegal-profiles-1.json @@ -0,0 +1,10 @@ +{ + "minimum": { + "memoryMB" : 1024, + "vcores" : 1 + }, + "default" : { + "memoryMB" : 2048, + "vcores" : 2 + }, +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/illegal-profiles-2.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/illegal-profiles-2.json new file mode 100644 index 00000000000..d62a3116abe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/illegal-profiles-2.json @@ -0,0 +1,10 @@ +{ + "minimum": { + "memoryMB" : 1024, + "vcores" : 1 + }, + "maximum" : { + "memoryMB": 4096, + "vcores" : 4 + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/illegal-profiles-3.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/illegal-profiles-3.json new file mode 100644 index 00000000000..9ee74dee611 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/illegal-profiles-3.json @@ -0,0 +1,10 @@ +{ + "default" : { + "memoryMB" : 2048, + "vcores" : 2 + }, + "maximum" : { + "memoryMB": 4096, + "vcores" : 4 + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/sample-profiles-1.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/sample-profiles-1.json new file mode 100644 index 00000000000..65b736078ba --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/sample-profiles-1.json @@ -0,0 +1,14 @@ +{ + "minimum": { + "memory-mb" : 1024, + "vcores" : 1 + }, + "default" : { + "memory-mb" : 2048, + "vcores" : 2 + }, + "maximum" : { + "memory-mb": 4096, + "vcores" : 4 + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/sample-profiles-2.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/sample-profiles-2.json new file mode 100644 index 00000000000..c2356710383 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/profiles/sample-profiles-2.json @@ -0,0 +1,26 @@ +{ + "minimum": { + "memory-mb" : 1024, + "vcores" : 1 + }, + "default" : { + "memory-mb" : 2048, + "vcores" : 2 + }, + "maximum" : { + "memory-mb": 4096, + "vcores" : 4 + }, + "small" : { + "memory-mb": 1024, + "vcores": 1 + }, + "medium" : { + "memory-mb": 2048, + "vcores": 1 + }, + "large": { + "memory-mb" : 4096, + "vcores" : 4 + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java index 71de6b470e9..5ce4803f603 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java @@ -27,6 +27,10 @@ import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest; import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; @@ -59,6 +63,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; @@ -303,6 +309,24 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( return clientRMProxy.updateApplicationTimeouts(request); } + @Override + public GetAllResourceProfilesResponse getResourceProfiles( + GetAllResourceProfilesRequest request) throws YarnException, IOException { + return clientRMProxy.getResourceProfiles(request); + } + + @Override + public GetResourceProfileResponse getResourceProfile( + GetResourceProfileRequest request) throws YarnException, IOException { + return clientRMProxy.getResourceProfile(request); + } + + @Override + public GetAllResourceTypeInfoResponse getResourceTypeInfo( + GetAllResourceTypeInfoRequest request) throws YarnException, IOException { + return clientRMProxy.getResourceTypeInfo(request); + } + @VisibleForTesting public void setRMClient(ApplicationClientProtocol clientRM) { this.clientRMProxy = clientRM; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java index 3a36eec66ac..b8f8a9fd841 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java @@ -32,6 +32,10 @@ import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest; import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; @@ -64,6 +68,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; @@ -709,4 +715,21 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( throw new NotImplementedException(); } + @Override + public GetAllResourceProfilesResponse getResourceProfiles( + GetAllResourceProfilesRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetResourceProfileResponse getResourceProfile( + GetResourceProfileRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } + + @Override + public GetAllResourceTypeInfoResponse getResourceTypeInfo( + GetAllResourceTypeInfoRequest request) throws YarnException, IOException { + throw new NotImplementedException(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java index fd2c610c7fe..73cc18558d1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java @@ -38,6 +38,10 @@ import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest; import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; @@ -70,6 +74,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; @@ -403,6 +409,27 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( return pipeline.getRootInterceptor().updateApplicationTimeouts(request); } + @Override + public GetAllResourceProfilesResponse getResourceProfiles( + GetAllResourceProfilesRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getResourceProfiles(request); + } + + @Override + public GetResourceProfileResponse getResourceProfile( + GetResourceProfileRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getResourceProfile(request); + } + + @Override + public GetAllResourceTypeInfoResponse getResourceTypeInfo( + GetAllResourceTypeInfoRequest request) throws YarnException, IOException { + RequestInterceptorChainWrapper pipeline = getInterceptorChain(); + return pipeline.getRootInterceptor().getResourceTypeInfo(request); + } + private RequestInterceptorChainWrapper getInterceptorChain() throws IOException { String user = UserGroupInformation.getCurrentUser().getUserName(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/PassThroughClientRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/PassThroughClientRequestInterceptor.java index c403bd5006c..cb1b529d8eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/PassThroughClientRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/PassThroughClientRequestInterceptor.java @@ -24,6 +24,10 @@ import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest; import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; @@ -56,6 +60,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; @@ -264,4 +270,22 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( throws YarnException, IOException { return getNextInterceptor().updateApplicationTimeouts(request); } + + @Override + public GetAllResourceProfilesResponse getResourceProfiles( + GetAllResourceProfilesRequest request) throws YarnException, IOException { + return getNextInterceptor().getResourceProfiles(request); + } + + @Override + public GetResourceProfileResponse getResourceProfile( + GetResourceProfileRequest request) throws YarnException, IOException { + return getNextInterceptor().getResourceProfile(request); + } + + @Override + public GetAllResourceTypeInfoResponse getResourceTypeInfo( + GetAllResourceTypeInfoRequest request) throws YarnException, IOException { + return getNextInterceptor().getResourceTypeInfo(request); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java index de282fd0631..3b247fe19c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java @@ -95,6 +95,7 @@ import org.apache.hadoop.yarn.server.timeline.TimelineStore; import org.apache.hadoop.yarn.server.timeline.recovery.MemoryTimelineStateStore; import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; @@ -250,6 +251,7 @@ public void serviceInit(Configuration conf) throws Exception { YarnConfiguration.DEFAULT_YARN_MINICLUSTER_USE_RPC); failoverTimeout = conf.getInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS); + ResourceUtils.resetResourceTypes(conf); if (useRpc && !useFixedPorts) { throw new YarnRuntimeException("Invalid configuration!" +