diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 62aa4972929..ae051654f41 100644
--- hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -66,6 +66,7 @@
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
@@ -517,4 +518,10 @@ public void killApplication(ApplicationId appId, String diagnostics)
throws YarnException, IOException {
client.killApplication(appId, diagnostics);
}
+
+ @Override
+ public List getResourceTypeInfo()
+ throws YarnException, IOException {
+ return client.getResourceTypeInfo();
+ }
}
diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index 65eac654845..8b6ea64118c 100644
--- hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -72,6 +72,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
@@ -494,6 +496,13 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts(
throws YarnException, IOException {
return null;
}
+
+ @Override
+ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
+ GetAllResourceTypeInfoRequest request)
+ throws YarnException, IOException {
+ return null;
+ }
}
class HistoryService extends AMService implements HSClientProtocol {
diff --git hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 6825a36ebdd..92d2b3b5061 100644
--- hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -599,4 +599,22 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
index b7d925a6592..8e76a11dc27 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
@@ -52,11 +52,13 @@ void init(ApplicationMasterServiceContext amsContext,
* @param request Register Request.
* @param response Register Response.
* @throws IOException IOException.
+ * @throws YarnException in critical situation where invalid
+ * profiles/resources are added.
*/
- void registerApplicationMaster(
- ApplicationAttemptId applicationAttemptId,
+ void registerApplicationMaster(ApplicationAttemptId applicationAttemptId,
RegisterApplicationMasterRequest request,
- RegisterApplicationMasterResponse response) throws IOException;
+ RegisterApplicationMasterResponse response)
+ throws IOException, YarnException;
/**
* Allocate call.
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
index 6d39366dccd..1f0a360b5b2 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
@@ -65,6 +65,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
@@ -75,6 +77,7 @@
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException;
/**
* The protocol between clients and the ResourceManager
@@ -589,4 +592,18 @@ SignalContainerResponse signalToContainer(
public UpdateApplicationTimeoutsResponse updateApplicationTimeouts(
UpdateApplicationTimeoutsRequest request)
throws YarnException, IOException;
+
+ /**
+ *
+ * The interface to get the details for a specific resource profile.
+ *
+ * @param request request to get the details of a resource profile
+ * @return Response containing the details for a particular resource profile
+ * @throws YarnException if any error happens inside YARN
+ * @throws IOException in case of other errors
+ */
+ @Public
+ @Unstable
+ GetAllResourceTypeInfoResponse getResourceTypeInfo(
+ GetAllResourceTypeInfoRequest request) throws YarnException, IOException;
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoRequest.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoRequest.java
new file mode 100644
index 00000000000..3bda4f54ec5
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoRequest.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * Request class for getting all the resource profiles from the RM.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public abstract class GetAllResourceTypeInfoRequest {
+
+ public static GetAllResourceTypeInfoRequest newInstance() {
+ return Records.newRecord(GetAllResourceTypeInfoRequest.class);
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoResponse.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoResponse.java
new file mode 100644
index 00000000000..b57b96df3fd
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetAllResourceTypeInfoResponse.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.util.List;
+
+/**
+ * Response class for getting all the resource profiles from the RM.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public abstract class GetAllResourceTypeInfoResponse {
+
+ public static GetAllResourceTypeInfoResponse newInstance() {
+ return Records.newRecord(GetAllResourceTypeInfoResponse.class);
+ }
+
+ public abstract void setResourceTypeInfo(List resourceTypes);
+
+ public abstract List getResourceTypeInfo();
+
+ @Override
+ public boolean equals(Object other) {
+ if (this == other) {
+ return true;
+ }
+ if (other == null || !(other instanceof GetAllResourceTypeInfoResponse)) {
+ return false;
+ }
+ return ((GetAllResourceTypeInfoResponse) other).getResourceTypeInfo()
+ .equals(this.getResourceTypeInfo());
+ }
+
+ @Override
+ public int hashCode() {
+ return this.getResourceTypeInfo().hashCode();
+ }
+
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ResourceTypes.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ResourceTypes.java
new file mode 100644
index 00000000000..dbd9c37ceec
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ResourceTypes.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+/**
+ * Enum which represents the resource type. Currently, the only type allowed is
+ * COUNTABLE.
+ */
+public enum ResourceTypes {
+ COUNTABLE
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
index 3cf8f3defa3..d2e33ff9bca 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
@@ -24,6 +24,8 @@
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records;
+import java.util.Map;
+
/**
* Contains various scheduling metrics to be reported by UI and CLI.
*/
@@ -35,9 +37,9 @@
@Unstable
public static ApplicationResourceUsageReport newInstance(
int numUsedContainers, int numReservedContainers, Resource usedResources,
- Resource reservedResources, Resource neededResources, long memorySeconds,
- long vcoreSeconds, float queueUsagePerc, float clusterUsagePerc,
- long preemptedMemorySeconds, long preemptedVcoresSeconds) {
+ Resource reservedResources, Resource neededResources,
+ Map resourceSecondsMap, float queueUsagePerc,
+ float clusterUsagePerc, Map preemtedResourceSecondsMap) {
ApplicationResourceUsageReport report =
Records.newRecord(ApplicationResourceUsageReport.class);
report.setNumUsedContainers(numUsedContainers);
@@ -45,12 +47,10 @@ public static ApplicationResourceUsageReport newInstance(
report.setUsedResources(usedResources);
report.setReservedResources(reservedResources);
report.setNeededResources(neededResources);
- report.setMemorySeconds(memorySeconds);
- report.setVcoreSeconds(vcoreSeconds);
+ report.setResourceSecondsMap(resourceSecondsMap);
report.setQueueUsagePercentage(queueUsagePerc);
report.setClusterUsagePercentage(clusterUsagePerc);
- report.setPreemptedMemorySeconds(preemptedMemorySeconds);
- report.setPreemptedVcoreSeconds(preemptedVcoresSeconds);
+ report.setPreemptedResourceSecondsMap(preemtedResourceSecondsMap);
return report;
}
@@ -229,4 +229,47 @@ public static ApplicationResourceUsageReport newInstance(
@Public
@Unstable
public abstract long getPreemptedVcoreSeconds();
+
+ /**
+ * Get the aggregated number of resources that the application has
+ * allocated times the number of seconds the application has been running.
+ * @return map containing the resource name and aggregated resource-seconds
+ */
+ @Public
+ @Unstable
+ public abstract Map getResourceSecondsMap();
+
+ /**
+ * Set the aggregated number of resources that the application has
+ * allocated times the number of seconds the application has been running.
+ * @param resourceSecondsMap map containing the resource name and aggregated
+ * resource-seconds
+ */
+ @Private
+ @Unstable
+ public abstract void setResourceSecondsMap(
+ Map resourceSecondsMap);
+
+
+ /**
+ * Get the aggregated number of resources preempted that the application has
+ * allocated times the number of seconds the application has been running.
+ * @return map containing the resource name and aggregated preempted
+ * resource-seconds
+ */
+ @Public
+ @Unstable
+ public abstract Map getPreemptedResourceSecondsMap();
+
+ /**
+ * Set the aggregated number of resources preempted that the application has
+ * allocated times the number of seconds the application has been running.
+ * @param preemptedResourceSecondsMap map containing the resource name and
+ * aggregated preempted resource-seconds
+ */
+ @Private
+ @Unstable
+ public abstract void setPreemptedResourceSecondsMap(
+ Map preemptedResourceSecondsMap);
+
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 89ca5d62f7e..9a5bc79ae08 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -18,12 +18,19 @@
package org.apache.hadoop.yarn.api.records;
+import java.util.Arrays;
+
import org.apache.commons.lang.NotImplementedException;
+import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
-
+import org.apache.hadoop.yarn.api.records.impl.LightWeightResource;
+import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
/**
* Resource models a set of computer resources in the
@@ -38,10 +45,10 @@
* the average number of threads it expects to have runnable at a time.
*
* Virtual cores take integer values and thus currently CPU-scheduling is
- * very coarse. A complementary axis for CPU requests that represents processing
- * power will likely be added in the future to enable finer-grained resource
- * configuration.
- *
+ * very coarse. A complementary axis for CPU requests that represents
+ * processing power will likely be added in the future to enable finer-grained
+ * resource configuration.
+ *
* Typically, applications request Resource of suitable
* capability to run their component tasks.
*
@@ -52,64 +59,84 @@
@Stable
public abstract class Resource implements Comparable {
- private static class SimpleResource extends Resource {
- private long memory;
- private long vcores;
- SimpleResource(long memory, long vcores) {
- this.memory = memory;
- this.vcores = vcores;
- }
- @Override
- public int getMemory() {
- return castToIntSafely(memory);
- }
- @Override
- public void setMemory(int memory) {
- this.memory = memory;
- }
- @Override
- public long getMemorySize() {
- return memory;
- }
- @Override
- public void setMemorySize(long memory) {
- this.memory = memory;
- }
- @Override
- public int getVirtualCores() {
- return castToIntSafely(vcores);
- }
- @Override
- public void setVirtualCores(int vcores) {
- this.vcores = vcores;
- }
- }
+ protected ResourceInformation[] resources = null;
+
+ // Number of mandatory resources, this is added to avoid invoke
+ // MandatoryResources.values().length, since values() internally will
+ // copy array, etc.
+ protected static final int NUM_MANDATORY_RESOURCES = 2;
+
+ protected static final int MEMORY_INDEX = 0;
+ protected static final int VCORES_INDEX = 1;
@Public
@Stable
public static Resource newInstance(int memory, int vCores) {
- return new SimpleResource(memory, vCores);
+ if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
+ Resource ret = Records.newRecord(Resource.class);
+ ret.setMemorySize(memory);
+ ret.setVirtualCores(vCores);
+ return ret;
+ }
+ return new LightWeightResource(memory, vCores);
}
@Public
@Stable
public static Resource newInstance(long memory, int vCores) {
- return new SimpleResource(memory, vCores);
+ if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
+ Resource ret = Records.newRecord(Resource.class);
+ ret.setMemorySize(memory);
+ ret.setVirtualCores(vCores);
+ return ret;
+ }
+ return new LightWeightResource(memory, vCores);
+ }
+
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public static Resource newInstance(Resource resource) {
+ Resource ret = Resource.newInstance(resource.getMemorySize(),
+ resource.getVirtualCores());
+ if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
+ Resource.copy(resource, ret);
+ }
+ return ret;
+ }
+
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public static void copy(Resource source, Resource dest) {
+ for (ResourceInformation entry : source.getResources()) {
+ dest.setResourceInformation(entry.getName(), entry);
+ }
}
/**
* This method is DEPRECATED:
* Use {@link Resource#getMemorySize()} instead
*
- * Get memory of the resource.
- * @return memory of the resource
+ * Get memory of the resource. Note - while memory has
+ * never had a unit specified, all YARN configurations have specified memory
+ * in MB. The assumption has been that the daemons and applications are always
+ * using the same units. With the introduction of the ResourceInformation
+ * class we have support for units - so this function will continue to return
+ * memory but in the units of MB
+ *
+ * @return memory(in MB) of the resource
*/
@Public
@Deprecated
public abstract int getMemory();
/**
- * Get memory of the resource.
+ * Get memory of the resource. Note - while memory has
+ * never had a unit specified, all YARN configurations have specified memory
+ * in MB. The assumption has been that the daemons and applications are always
+ * using the same units. With the introduction of the ResourceInformation
+ * class we have support for units - so this function will continue to return
+ * memory but in the units of MB
+ *
* @return memory of the resource
*/
@Public
@@ -120,8 +147,14 @@ public long getMemorySize() {
}
/**
- * Set memory of the resource.
- * @param memory memory of the resource
+ * Set memory of the resource. Note - while memory has
+ * never had a unit specified, all YARN configurations have specified memory
+ * in MB. The assumption has been that the daemons and applications are always
+ * using the same units. With the introduction of the ResourceInformation
+ * class we have support for units - so this function will continue to set
+ * memory but the assumption is that the value passed is in units of MB.
+ *
+ * @param memory memory(in MB) of the resource
*/
@Public
@Deprecated
@@ -138,73 +171,300 @@ public void setMemorySize(long memory) {
"This method is implemented by ResourcePBImpl");
}
-
/**
* Get number of virtual cpu cores of the resource.
*
* Virtual cores are a unit for expressing CPU parallelism. A node's capacity
- * should be configured with virtual cores equal to its number of physical cores.
- * A container should be requested with the number of cores it can saturate, i.e.
- * the average number of threads it expects to have runnable at a time.
- *
+ * should be configured with virtual cores equal to its number of physical
+ * cores. A container should be requested with the number of cores it can
+ * saturate, i.e. the average number of threads it expects to have runnable
+ * at a time.
+ *
* @return num of virtual cpu cores of the resource
*/
@Public
@Evolving
public abstract int getVirtualCores();
-
+
/**
* Set number of virtual cpu cores of the resource.
*
* Virtual cores are a unit for expressing CPU parallelism. A node's capacity
- * should be configured with virtual cores equal to its number of physical cores.
- * A container should be requested with the number of cores it can saturate, i.e.
- * the average number of threads it expects to have runnable at a time.
- *
+ * should be configured with virtual cores equal to its number of physical
+ * cores. A container should be requested with the number of cores it can
+ * saturate, i.e. the average number of threads it expects to have runnable
+ * at a time.
+ *
* @param vCores number of virtual cpu cores of the resource
*/
@Public
@Evolving
public abstract void setVirtualCores(int vCores);
- @Override
- public int hashCode() {
- final int prime = 263167;
+ /**
+ * Get ResourceInformation for all resources.
+ *
+ * @return Map of resource name to ResourceInformation
+ */
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public ResourceInformation[] getResources() {
+ return resources;
+ }
- int result = (int) (939769357
- + getMemorySize()); // prime * result = 939769357 initially
- result = prime * result + getVirtualCores();
- return result;
+ /**
+ * Get ResourceInformation for a specified resource.
+ *
+ * @param resource name of the resource
+ * @return the ResourceInformation object for the resource
+ * @throws ResourceNotFoundException if the resource can't be found
+ */
+ @Public
+ @InterfaceStability.Unstable
+ public ResourceInformation getResourceInformation(String resource)
+ throws ResourceNotFoundException {
+ Integer index = ResourceUtils.getResourceTypeIndex().get(resource);
+ if (index != null) {
+ return resources[index];
+ }
+ throw new ResourceNotFoundException("Unknown resource '" + resource
+ + "'. Known resources are " + Arrays.toString(resources));
+ }
+
+ /**
+ * Get ResourceInformation for a specified resource from a given index.
+ *
+ * @param index
+ * of the resource
+ * @return the ResourceInformation object for the resource
+ * @throws ResourceNotFoundException
+ * if the resource can't be found
+ */
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public ResourceInformation getResourceInformation(int index)
+ throws ResourceNotFoundException {
+ ResourceInformation ri = null;
+ try {
+ ri = resources[index];
+ } catch (ArrayIndexOutOfBoundsException e) {
+ throwExceptionWhenArrayOutOfBound(index);
+ }
+ return ri;
+ }
+
+ /**
+ * Get the value for a specified resource. No information about the units is
+ * returned.
+ *
+ * @param resource name of the resource
+ * @return the value for the resource
+ * @throws ResourceNotFoundException if the resource can't be found
+ */
+ @Public
+ @InterfaceStability.Unstable
+ public long getResourceValue(String resource)
+ throws ResourceNotFoundException {
+ return getResourceInformation(resource).getValue();
+ }
+
+ /**
+ * Set the ResourceInformation object for a particular resource.
+ *
+ * @param resource the resource for which the ResourceInformation is provided
+ * @param resourceInformation ResourceInformation object
+ * @throws ResourceNotFoundException if the resource is not found
+ */
+ @Public
+ @InterfaceStability.Unstable
+ public void setResourceInformation(String resource,
+ ResourceInformation resourceInformation)
+ throws ResourceNotFoundException {
+ if (resource.equals(ResourceInformation.MEMORY_URI)) {
+ this.setMemorySize(resourceInformation.getValue());
+ return;
+ }
+ if (resource.equals(ResourceInformation.VCORES_URI)) {
+ this.setVirtualCores((int) resourceInformation.getValue());
+ return;
+ }
+ ResourceInformation storedResourceInfo = getResourceInformation(resource);
+ ResourceInformation.copy(resourceInformation, storedResourceInfo);
+ }
+
+ /**
+ * Set the ResourceInformation object for a particular resource.
+ *
+ * @param index
+ * the resource index for which the ResourceInformation is provided
+ * @param resourceInformation
+ * ResourceInformation object
+ * @throws ResourceNotFoundException
+ * if the resource is not found
+ */
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public void setResourceInformation(int index,
+ ResourceInformation resourceInformation)
+ throws ResourceNotFoundException {
+ if (index < 0 || index >= resources.length) {
+ throw new ResourceNotFoundException("Unknown resource at index '" + index
+ + "'. Valid resources are " + Arrays.toString(resources));
+ }
+ ResourceInformation.copy(resourceInformation, resources[index]);
+ }
+
+ /**
+ * Set the value of a resource in the ResourceInformation object. The unit of
+ * the value is assumed to be the one in the ResourceInformation object.
+ *
+ * @param resource the resource for which the value is provided.
+ * @param value the value to set
+ * @throws ResourceNotFoundException if the resource is not found
+ */
+ @Public
+ @InterfaceStability.Unstable
+ public void setResourceValue(String resource, long value)
+ throws ResourceNotFoundException {
+ if (resource.equals(ResourceInformation.MEMORY_URI)) {
+ this.setMemorySize(value);
+ return;
+ }
+ if (resource.equals(ResourceInformation.VCORES_URI)) {
+ this.setVirtualCores((int)value);
+ return;
+ }
+
+ ResourceInformation storedResourceInfo = getResourceInformation(resource);
+ storedResourceInfo.setValue(value);
+ }
+
+ /**
+ * Set the value of a resource in the ResourceInformation object. The unit of
+ * the value is assumed to be the one in the ResourceInformation object.
+ *
+ * @param index
+ * the resource index for which the value is provided.
+ * @param value
+ * the value to set
+ * @throws ResourceNotFoundException
+ * if the resource is not found
+ */
+ @InterfaceAudience.Private
+ @InterfaceStability.Unstable
+ public void setResourceValue(int index, long value)
+ throws ResourceNotFoundException {
+ try {
+ resources[index].setValue(value);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ throwExceptionWhenArrayOutOfBound(index);
+ }
+ }
+
+ private void throwExceptionWhenArrayOutOfBound(int index) {
+ String exceptionMsg = String.format(
+ "Trying to access ResourceInformation for given index=%d. "
+ + "Acceptable index range is [0,%d), please check double check "
+ + "configured resources in resource-types.xml",
+ index, ResourceUtils.getNumberOfKnownResourceTypes());
+
+ throw new ResourceNotFoundException(exceptionMsg);
}
@Override
public boolean equals(Object obj) {
- if (this == obj)
+ if (this == obj) {
return true;
- if (obj == null)
+ }
+ if (obj == null) {
return false;
- if (!(obj instanceof Resource))
+ }
+ if (!(obj instanceof Resource)) {
return false;
+ }
Resource other = (Resource) obj;
- if (getMemorySize() != other.getMemorySize() ||
- getVirtualCores() != other.getVirtualCores()) {
+
+ ResourceInformation[] otherVectors = other.getResources();
+
+ if (resources.length != otherVectors.length) {
return false;
}
+
+ for (int i = 0; i < resources.length; i++) {
+ ResourceInformation a = resources[i];
+ ResourceInformation b = otherVectors[i];
+ if ((a != b) && ((a == null) || !a.equals(b))) {
+ return false;
+ }
+ }
return true;
}
@Override
public int compareTo(Resource other) {
- long diff = this.getMemorySize() - other.getMemorySize();
- if (diff == 0) {
- diff = this.getVirtualCores() - other.getVirtualCores();
+ ResourceInformation[] otherResources = other.getResources();
+
+ int arrLenThis = this.resources.length;
+ int arrLenOther = otherResources.length;
+
+ // compare memory and vcores first(in that order) to preserve
+ // existing behaviour
+ for (int i = 0; i < arrLenThis; i++) {
+ ResourceInformation otherEntry;
+ try {
+ otherEntry = otherResources[i];
+ } catch (ArrayIndexOutOfBoundsException e) {
+ // For two vectors with different size and same prefix. Shorter vector
+ // goes first.
+ return 1;
+ }
+ ResourceInformation entry = resources[i];
+
+ long diff = entry.compareTo(otherEntry);
+ if (diff > 0) {
+ return 1;
+ } else if (diff < 0) {
+ return -1;
+ }
+ }
+
+ if (arrLenThis < arrLenOther) {
+ return -1;
}
- return diff == 0 ? 0 : (diff > 0 ? 1 : -1);
+
+ return 0;
}
@Override
public String toString() {
- return "";
+ StringBuilder sb = new StringBuilder();
+
+ sb.append("");
+ return sb.toString();
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 47;
+ long result = 0;
+ for (ResourceInformation entry : resources) {
+ result = prime * result + entry.hashCode();
+ }
+ return (int) result;
}
/**
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
new file mode 100644
index 00000000000..9af7b5806ff
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -0,0 +1,244 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.curator.shaded.com.google.common.reflect.ClassPath;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+
+/**
+ * Class to encapsulate information about a Resource - the name of the resource,
+ * the units(milli, micro, etc), the type(countable), and the value.
+ */
+public class ResourceInformation implements Comparable {
+
+ private String name;
+ private String units;
+ private ResourceTypes resourceType;
+ private long value;
+
+ public static final String MEMORY_URI = "memory-mb";
+ public static final String VCORES_URI = "vcores";
+
+ public static final ResourceInformation MEMORY_MB =
+ ResourceInformation.newInstance(MEMORY_URI, "Mi");
+ public static final ResourceInformation VCORES =
+ ResourceInformation.newInstance(VCORES_URI);
+
+ /**
+ * Get the name for the resource.
+ *
+ * @return resource name
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * Set the name for the resource.
+ *
+ * @param rName name for the resource
+ */
+ public void setName(String rName) {
+ this.name = rName;
+ }
+
+ /**
+ * Get units for the resource.
+ *
+ * @return units for the resource
+ */
+ public String getUnits() {
+ return units;
+ }
+
+ /**
+ * Set the units for the resource.
+ *
+ * @param rUnits units for the resource
+ */
+ public void setUnits(String rUnits) {
+ if (!UnitsConversionUtil.KNOWN_UNITS.contains(rUnits)) {
+ throw new IllegalArgumentException(
+ "Unknown unit '" + rUnits + "'. Known units are "
+ + UnitsConversionUtil.KNOWN_UNITS);
+ }
+ this.units = rUnits;
+ }
+
+ /**
+ * Checking if a unit included by KNOWN_UNITS is an expensive operation. This
+ * can be avoided in critical path in RM.
+ * @param rUnits units for the resource
+ */
+ @InterfaceAudience.Private
+ public void setUnitsWithoutValidation(String rUnits) {
+ this.units = rUnits;
+ }
+
+ /**
+ * Get the resource type.
+ *
+ * @return the resource type
+ */
+ public ResourceTypes getResourceType() {
+ return resourceType;
+ }
+
+ /**
+ * Set the resource type.
+ *
+ * @param type the resource type
+ */
+ public void setResourceType(ResourceTypes type) {
+ this.resourceType = type;
+ }
+
+ /**
+ * Get the value for the resource.
+ *
+ * @return the resource value
+ */
+ public long getValue() {
+ return value;
+ }
+
+ /**
+ * Set the value for the resource.
+ *
+ * @param rValue the resource value
+ */
+ public void setValue(long rValue) {
+ this.value = rValue;
+ }
+
+ /**
+ * Create a new instance of ResourceInformation from another object.
+ *
+ * @param other the object from which the new object should be created
+ * @return the new ResourceInformation object
+ */
+ public static ResourceInformation newInstance(ResourceInformation other) {
+ ResourceInformation ret = new ResourceInformation();
+ copy(other, ret);
+ return ret;
+ }
+
+ public static ResourceInformation newInstance(String name, String units,
+ long value, ResourceTypes type) {
+ ResourceInformation ret = new ResourceInformation();
+ ret.setName(name);
+ ret.setResourceType(type);
+ ret.setUnits(units);
+ ret.setValue(value);
+ return ret;
+ }
+
+ public static ResourceInformation newInstance(String name, String units,
+ long value) {
+ return ResourceInformation
+ .newInstance(name, units, value, ResourceTypes.COUNTABLE);
+ }
+
+ public static ResourceInformation newInstance(String name, String units) {
+ return ResourceInformation
+ .newInstance(name, units, 0L, ResourceTypes.COUNTABLE);
+ }
+
+ public static ResourceInformation newInstance(String name, long value) {
+ return ResourceInformation
+ .newInstance(name, "", value, ResourceTypes.COUNTABLE);
+ }
+
+ public static ResourceInformation newInstance(String name) {
+ return ResourceInformation.newInstance(name, "");
+ }
+
+ /**
+ * Copies the content of the source ResourceInformation object to the
+ * destination object, overwriting all properties of the destination object.
+ * @param src Source ResourceInformation object
+ * @param dst Destination ResourceInformation object
+ */
+
+ public static void copy(ResourceInformation src, ResourceInformation dst) {
+ dst.setName(src.getName());
+ dst.setResourceType(src.getResourceType());
+ dst.setUnits(src.getUnits());
+ dst.setValue(src.getValue());
+ }
+
+ @Override
+ public String toString() {
+ return "name: " + this.name + ", units: " + this.units + ", type: "
+ + resourceType + ", value: " + value;
+ }
+
+ public String getShorthandRepresentation() {
+ return "" + this.value + this.units;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (!(obj instanceof ResourceInformation)) {
+ return false;
+ }
+ ResourceInformation r = (ResourceInformation) obj;
+ if (!this.name.equals(r.getName())
+ || !this.resourceType.equals(r.getResourceType())) {
+ return false;
+ }
+ if (this.units.equals(r.units)) {
+ return this.value == r.value;
+ }
+ return (UnitsConversionUtil.compare(this.units, this.value, r.units,
+ r.value) == 0);
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 263167;
+ int result =
+ 939769357 + name.hashCode(); // prime * result = 939769357 initially
+ result = prime * result + resourceType.hashCode();
+ result = prime * result + units.hashCode();
+ result = prime * result + Long.hashCode(value);
+ return result;
+ }
+
+ @Override
+ public int compareTo(ResourceInformation other) {
+ int diff = this.name.compareTo(other.name);
+ if (diff == 0) {
+ diff = UnitsConversionUtil
+ .compare(this.units, this.value, other.units, other.value);
+ if (diff == 0) {
+ diff = this.resourceType.compareTo(other.resourceType);
+ }
+ }
+ return diff;
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
index e9be6c3c14b..43a339cbc92 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
@@ -21,6 +21,7 @@
import java.io.Serializable;
import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
new file mode 100644
index 00000000000..b6f7f147658
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceTypeInfo.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * Class to encapsulate information about a ResourceType - the name of the
+ * resource, the units(milli, micro, etc), the type(countable).
+ */
+public abstract class ResourceTypeInfo implements Comparable {
+
+ /**
+ * Get the name for the resource.
+ *
+ * @return resource name
+ */
+ public abstract String getName();
+
+ /**
+ * Set the name for the resource.
+ *
+ * @param rName
+ * name for the resource
+ */
+ public abstract void setName(String rName);
+
+ /**
+ * Get units for the resource.
+ *
+ * @return units for the resource
+ */
+ public abstract String getDefaultUnit();
+
+ /**
+ * Set the units for the resource.
+ *
+ * @param rUnits
+ * units for the resource
+ */
+ public abstract void setDefaultUnit(String rUnits);
+
+ /**
+ * Get the resource type.
+ *
+ * @return the resource type
+ */
+ public abstract ResourceTypes getResourceType();
+
+ /**
+ * Set the resource type.
+ *
+ * @param type
+ * the resource type
+ */
+ public abstract void setResourceType(ResourceTypes type);
+
+ /**
+ * Create a new instance of ResourceTypeInfo from another object.
+ *
+ * @param other
+ * the object from which the new object should be created
+ * @return the new ResourceTypeInfo object
+ */
+ @InterfaceAudience.Public
+ @InterfaceStability.Unstable
+ public static ResourceTypeInfo newInstance(ResourceTypeInfo other) {
+ ResourceTypeInfo resourceType = Records.newRecord(ResourceTypeInfo.class);
+ copy(other, resourceType);
+ return resourceType;
+ }
+
+ /**
+ * Create a new instance of ResourceTypeInfo from name, units and type.
+ *
+ * @param name name of resource type
+ * @param units units of resource type
+ * @param type such as countable, etc.
+ * @return the new ResourceTypeInfo object
+ */
+ @InterfaceAudience.Public
+ @InterfaceStability.Unstable
+ public static ResourceTypeInfo newInstance(String name, String units,
+ ResourceTypes type) {
+ ResourceTypeInfo resourceType = Records.newRecord(ResourceTypeInfo.class);
+ resourceType.setName(name);
+ resourceType.setResourceType(type);
+ resourceType.setDefaultUnit(units);
+ return resourceType;
+ }
+
+ /**
+ * Create a new instance of ResourceTypeInfo from name, units.
+ *
+ * @param name name of resource type
+ * @param units units of resource type
+ * @return the new ResourceTypeInfo object
+ */
+ @InterfaceAudience.Public
+ @InterfaceStability.Unstable
+ public static ResourceTypeInfo newInstance(String name, String units) {
+ return ResourceTypeInfo.newInstance(name, units, ResourceTypes.COUNTABLE);
+ }
+
+ /**
+ * Create a new instance of ResourceTypeInfo from name.
+ *
+ * @param name name of resource type
+ * @return the new ResourceTypeInfo object
+ */
+ @InterfaceAudience.Public
+ @InterfaceStability.Unstable
+ public static ResourceTypeInfo newInstance(String name) {
+ return ResourceTypeInfo.newInstance(name, "");
+ }
+
+ /**
+ * Copies the content of the source ResourceTypeInfo object to the
+ * destination object, overwriting all properties of the destination object.
+ *
+ * @param src
+ * Source ResourceTypeInfo object
+ * @param dst
+ * Destination ResourceTypeInfo object
+ */
+
+ public static void copy(ResourceTypeInfo src, ResourceTypeInfo dst) {
+ dst.setName(src.getName());
+ dst.setResourceType(src.getResourceType());
+ dst.setDefaultUnit(src.getDefaultUnit());
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("");
+ return sb.toString();
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (!(obj instanceof ResourceTypeInfo)) {
+ return false;
+ }
+ ResourceTypeInfo r = (ResourceTypeInfo) obj;
+ return this.getName().equals(r.getName())
+ && this.getResourceType().equals(r.getResourceType())
+ && this.getDefaultUnit().equals(r.getDefaultUnit());
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 47;
+ int result = prime + getName().hashCode();
+ result = prime * result + getResourceType().hashCode();
+ return result;
+ }
+
+ @Override
+ public int compareTo(ResourceTypeInfo other) {
+ int diff = this.getName().compareTo(other.getName());
+ if (diff == 0) {
+ diff = this.getDefaultUnit().compareTo(other.getDefaultUnit());
+ if (diff == 0) {
+ diff = this.getResourceType().compareTo(other.getResourceType());
+ }
+ }
+ return diff;
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
new file mode 100644
index 00000000000..bb7809e96a0
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+
+import static org.apache.hadoop.yarn.api.records.ResourceInformation.MEMORY_MB;
+import static org.apache.hadoop.yarn.api.records.ResourceInformation.MEMORY_URI;
+import static org.apache.hadoop.yarn.api.records.ResourceInformation.VCORES_URI;
+
+/**
+ *
+ * LightResource extends Resource to handle base resources such
+ * as memory and CPU.
+ * TODO: We have a long term plan to use AbstractResource when additional
+ * resource types are to be handled as well.
+ * This will be used to speed up internal calculation to avoid creating
+ * costly PB-backed Resource object: ResourcePBImpl
+ *
+ *
+ *
+ * Currently it models both memory and CPU.
+ *
+ *
+ *
+ * The unit for memory is megabytes. CPU is modeled with virtual cores (vcores),
+ * a unit for expressing parallelism. A node's capacity should be configured
+ * with virtual cores equal to its number of physical cores. A container should
+ * be requested with the number of cores it can saturate, i.e. the average
+ * number of threads it expects to have runnable at a time.
+ *
+ *
+ *
+ * Virtual cores take integer values and thus currently CPU-scheduling is very
+ * coarse. A complementary axis for CPU requests that represents processing
+ * power will likely be added in the future to enable finer-grained resource
+ * configuration.
+ *
+ *
+ * @see Resource
+ */
+@InterfaceAudience.Private
+@Unstable
+public class LightWeightResource extends Resource {
+
+ private ResourceInformation memoryResInfo;
+ private ResourceInformation vcoresResInfo;
+
+ public LightWeightResource(long memory, long vcores) {
+ this.memoryResInfo = LightWeightResource.newDefaultInformation(MEMORY_URI,
+ MEMORY_MB.getUnits(), memory);
+ this.vcoresResInfo = LightWeightResource.newDefaultInformation(VCORES_URI,
+ "", vcores);
+
+ resources = new ResourceInformation[NUM_MANDATORY_RESOURCES];
+ resources[MEMORY_INDEX] = memoryResInfo;
+ resources[VCORES_INDEX] = vcoresResInfo;
+ }
+
+ private static ResourceInformation newDefaultInformation(String name,
+ String unit, long value) {
+ ResourceInformation ri = new ResourceInformation();
+ ri.setName(name);
+ ri.setValue(value);
+ ri.setResourceType(ResourceTypes.COUNTABLE);
+ ri.setUnitsWithoutValidation(unit);
+ return ri;
+ }
+
+ @Override
+ @SuppressWarnings("deprecation")
+ public int getMemory() {
+ return (int) memoryResInfo.getValue();
+ }
+
+ @Override
+ @SuppressWarnings("deprecation")
+ public void setMemory(int memory) {
+ this.memoryResInfo.setValue(memory);
+ }
+
+ @Override
+ public long getMemorySize() {
+ return memoryResInfo.getValue();
+ }
+
+ @Override
+ public void setMemorySize(long memory) {
+ this.memoryResInfo.setValue(memory);
+ }
+
+ @Override
+ public int getVirtualCores() {
+ return (int) vcoresResInfo.getValue();
+ }
+
+ @Override
+ public void setVirtualCores(int vcores) {
+ this.vcoresResInfo.setValue(vcores);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || !(obj instanceof Resource)) {
+ return false;
+ }
+ Resource other = (Resource) obj;
+ if (getMemorySize() != other.getMemorySize()
+ || getVirtualCores() != other.getVirtualCores()) {
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int compareTo(Resource other) {
+ // compare memory and vcores first(in that order) to preserve
+ // existing behaviour
+ long diff = this.getMemorySize() - other.getMemorySize();
+ if (diff == 0) {
+ return this.getVirtualCores() - other.getVirtualCores();
+ } else if (diff > 0){
+ return 1;
+ } else {
+ return -1;
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 47;
+ long result = prime + getMemorySize();
+ result = prime * result + getVirtualCores();
+
+ return (int) result;
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/package-info.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/package-info.java
new file mode 100644
index 00000000000..b2420bc50f8
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/package-info.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Package org.apache.hadoop.yarn.api.records.impl contains classes
+ * which define basic resources.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.api.records.impl;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 0f4080a6337..707fcfffefc 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -61,8 +61,27 @@
public static final String CORE_SITE_CONFIGURATION_FILE = "core-site.xml";
@Private
+ public static final String RESOURCE_TYPES_CONFIGURATION_FILE =
+ "resource-types.xml";
+
+ @Private
+ public static final String NODE_RESOURCES_CONFIGURATION_FILE =
+ "node-resources.xml";
+
+ @Private
public static final List RM_CONFIGURATION_FILES =
Collections.unmodifiableList(Arrays.asList(
+ RESOURCE_TYPES_CONFIGURATION_FILE,
+ DR_CONFIGURATION_FILE,
+ CS_CONFIGURATION_FILE,
+ HADOOP_POLICY_CONFIGURATION_FILE,
+ YARN_SITE_CONFIGURATION_FILE,
+ CORE_SITE_CONFIGURATION_FILE));
+
+ @Private
+ public static final List NM_CONFIGURATION_FILES =
+ Collections.unmodifiableList(Arrays.asList(
+ NODE_RESOURCES_CONFIGURATION_FILE,
DR_CONFIGURATION_FILE,
CS_CONFIGURATION_FILE,
HADOOP_POLICY_CONFIGURATION_FILE,
@@ -106,6 +125,16 @@ private static void addDeprecatedKeys() {
public static final String YARN_PREFIX = "yarn.";
+ /////////////////////////////
+ // Resource types configs
+ ////////////////////////////
+
+ public static final String RESOURCE_TYPES =
+ YarnConfiguration.YARN_PREFIX + "resource-types";
+
+ public static final String NM_RESOURCES_PREFIX =
+ YarnConfiguration.NM_PREFIX + "resource-type.";
+
/** Delay before deleting resource to ease debugging of NM issues */
public static final String DEBUG_NM_DELETE_DELAY_SEC =
YarnConfiguration.NM_PREFIX + "delete.debug-delay-sec";
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceNotFoundException.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceNotFoundException.java
new file mode 100644
index 00000000000..b5fece7dc8c
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceNotFoundException.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.exceptions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This exception is thrown when details of an unknown resource type
+ * are requested.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class ResourceNotFoundException extends YarnRuntimeException {
+
+ private static final long serialVersionUID = 10081982L;
+
+ public ResourceNotFoundException(String message) {
+ super(message);
+ }
+
+ public ResourceNotFoundException(Throwable cause) {
+ super(cause);
+ }
+
+ public ResourceNotFoundException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YARNFeatureNotEnabledException.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YARNFeatureNotEnabledException.java
new file mode 100644
index 00000000000..62340fea363
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YARNFeatureNotEnabledException.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.exceptions;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This exception is thrown when a feature is being used which is not enabled
+ * yet.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class YARNFeatureNotEnabledException extends YarnException {
+ private static final long serialVersionUID = 898023752676L;
+
+ public YARNFeatureNotEnabledException(Throwable cause) {
+ super(cause);
+ }
+
+ public YARNFeatureNotEnabledException(String message) {
+ super(message);
+ }
+
+ public YARNFeatureNotEnabledException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
new file mode 100644
index 00000000000..7a212e163d9
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
@@ -0,0 +1,221 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.math.BigInteger;
+import java.util.*;
+
+/**
+ * A util to convert values in one unit to another. Units refers to whether
+ * the value is expressed in pico, nano, etc.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class UnitsConversionUtil {
+
+ /**
+ * Helper class for encapsulating conversion values.
+ */
+ public static class Converter {
+ private long numerator;
+ private long denominator;
+
+ Converter(long n, long d) {
+ this.numerator = n;
+ this.denominator = d;
+ }
+ }
+
+ private static final String[] UNITS = {"p", "n", "u", "m", "", "k", "M", "G",
+ "T", "P", "Ki", "Mi", "Gi", "Ti", "Pi"};
+ private static final List SORTED_UNITS = Arrays.asList(UNITS);
+ public static final Set KNOWN_UNITS = createKnownUnitsSet();
+ private static final Converter PICO =
+ new Converter(1L, 1000L * 1000L * 1000L * 1000L);
+ private static final Converter NANO =
+ new Converter(1L, 1000L * 1000L * 1000L);
+ private static final Converter MICRO = new Converter(1L, 1000L * 1000L);
+ private static final Converter MILLI = new Converter(1L, 1000L);
+ private static final Converter BASE = new Converter(1L, 1L);
+ private static final Converter KILO = new Converter(1000L, 1L);
+ private static final Converter MEGA = new Converter(1000L * 1000L, 1L);
+ private static final Converter GIGA =
+ new Converter(1000L * 1000L * 1000L, 1L);
+ private static final Converter TERA =
+ new Converter(1000L * 1000L * 1000L * 1000L, 1L);
+ private static final Converter PETA =
+ new Converter(1000L * 1000L * 1000L * 1000L * 1000L, 1L);
+
+ private static final Converter KILO_BINARY = new Converter(1024L, 1L);
+ private static final Converter MEGA_BINARY = new Converter(1024L * 1024L, 1L);
+ private static final Converter GIGA_BINARY =
+ new Converter(1024L * 1024L * 1024L, 1L);
+ private static final Converter TERA_BINARY =
+ new Converter(1024L * 1024L * 1024L * 1024L, 1L);
+ private static final Converter PETA_BINARY =
+ new Converter(1024L * 1024L * 1024L * 1024L * 1024L, 1L);
+
+ private static Set createKnownUnitsSet() {
+ Set ret = new HashSet<>();
+ ret.addAll(Arrays.asList(UNITS));
+ return ret;
+ }
+
+ private static Converter getConverter(String unit) {
+ switch (unit) {
+ case "p":
+ return PICO;
+ case "n":
+ return NANO;
+ case "u":
+ return MICRO;
+ case "m":
+ return MILLI;
+ case "":
+ return BASE;
+ case "k":
+ return KILO;
+ case "M":
+ return MEGA;
+ case "G":
+ return GIGA;
+ case "T":
+ return TERA;
+ case "P":
+ return PETA;
+ case "Ki":
+ return KILO_BINARY;
+ case "Mi":
+ return MEGA_BINARY;
+ case "Gi":
+ return GIGA_BINARY;
+ case "Ti":
+ return TERA_BINARY;
+ case "Pi":
+ return PETA_BINARY;
+ default:
+ throw new IllegalArgumentException(
+ "Unknown unit '" + unit + "'. Known units are " + KNOWN_UNITS);
+ }
+ }
+
+ /**
+ * Converts a value from one unit to another. Supported units can be obtained
+ * by inspecting the KNOWN_UNITS set.
+ *
+ * @param fromUnit the unit of the from value
+ * @param toUnit the target unit
+ * @param fromValue the value you wish to convert
+ * @return the value in toUnit
+ */
+ public static long convert(String fromUnit, String toUnit, long fromValue) {
+ if (toUnit == null || fromUnit == null) {
+ throw new IllegalArgumentException("One or more arguments are null");
+ }
+
+ if (fromUnit.equals(toUnit)) {
+ return fromValue;
+ }
+ Converter fc = getConverter(fromUnit);
+ Converter tc = getConverter(toUnit);
+ long numerator = fc.numerator * tc.denominator;
+ long denominator = fc.denominator * tc.numerator;
+ long numeratorMultiplierLimit = Long.MAX_VALUE / numerator;
+ if (numerator < denominator) {
+ if (numeratorMultiplierLimit < fromValue) {
+ String overflowMsg =
+ "Converting " + fromValue + " from '" + fromUnit + "' to '" + toUnit
+ + "' will result in an overflow of Long";
+ throw new IllegalArgumentException(overflowMsg);
+ }
+ return (fromValue * numerator) / denominator;
+ }
+ if (numeratorMultiplierLimit > fromValue) {
+ return (numerator * fromValue) / denominator;
+ }
+ long tmp = numerator / denominator;
+ if ((Long.MAX_VALUE / tmp) < fromValue) {
+ String overflowMsg =
+ "Converting " + fromValue + " from '" + fromUnit + "' to '" + toUnit
+ + "' will result in an overflow of Long";
+ throw new IllegalArgumentException(overflowMsg);
+ }
+ return fromValue * tmp;
+ }
+
+ /**
+ * Compare a value in a given unit with a value in another unit. The return
+ * value is equivalent to the value returned by compareTo.
+ *
+ * @param unitA first unit
+ * @param valueA first value
+ * @param unitB second unit
+ * @param valueB second value
+ * @return +1, 0 or -1 depending on whether the relationship is greater than,
+ * equal to or lesser than
+ */
+ public static int compare(String unitA, long valueA, String unitB,
+ long valueB) {
+ if (unitA == null || unitB == null || !KNOWN_UNITS.contains(unitA)
+ || !KNOWN_UNITS.contains(unitB)) {
+ throw new IllegalArgumentException("Units cannot be null");
+ }
+ if (!KNOWN_UNITS.contains(unitA)) {
+ throw new IllegalArgumentException("Unknown unit '" + unitA + "'");
+ }
+ if (!KNOWN_UNITS.contains(unitB)) {
+ throw new IllegalArgumentException("Unknown unit '" + unitB + "'");
+ }
+ if (unitA.equals(unitB)) {
+ return Long.compare(valueA, valueB);
+ }
+ Converter unitAC = getConverter(unitA);
+ Converter unitBC = getConverter(unitB);
+ int unitAPos = SORTED_UNITS.indexOf(unitA);
+ int unitBPos = SORTED_UNITS.indexOf(unitB);
+ try {
+ long tmpA = valueA;
+ long tmpB = valueB;
+ if (unitAPos < unitBPos) {
+ tmpB = convert(unitB, unitA, valueB);
+ } else {
+ tmpA = convert(unitA, unitB, valueA);
+ }
+ return Long.compare(tmpA, tmpB);
+ } catch (IllegalArgumentException ie) {
+ BigInteger tmpA = BigInteger.valueOf(valueA);
+ BigInteger tmpB = BigInteger.valueOf(valueB);
+ if (unitAPos < unitBPos) {
+ tmpB = tmpB.multiply(BigInteger.valueOf(unitBC.numerator));
+ tmpB = tmpB.multiply(BigInteger.valueOf(unitAC.denominator));
+ tmpB = tmpB.divide(BigInteger.valueOf(unitBC.denominator));
+ tmpB = tmpB.divide(BigInteger.valueOf(unitAC.numerator));
+ } else {
+ tmpA = tmpA.multiply(BigInteger.valueOf(unitAC.numerator));
+ tmpA = tmpA.multiply(BigInteger.valueOf(unitBC.denominator));
+ tmpA = tmpA.divide(BigInteger.valueOf(unitAC.denominator));
+ tmpA = tmpA.divide(BigInteger.valueOf(unitBC.numerator));
+ }
+ return tmpA.compareTo(tmpB);
+ }
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
new file mode 100644
index 00000000000..fd1da35c860
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -0,0 +1,442 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util.resource;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
+import org.apache.hadoop.yarn.conf.ConfigurationProvider;
+import org.apache.hadoop.yarn.conf.ConfigurationProviderFactory;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Helper class to read the resource-types to be supported by the system.
+ */
+public class ResourceUtils {
+
+ public static final String UNITS = ".units";
+ public static final String TYPE = ".type";
+
+ private static final String MEMORY = ResourceInformation.MEMORY_MB.getName();
+ private static final String VCORES = ResourceInformation.VCORES.getName();
+
+ private static volatile boolean initializedResources = false;
+ private static final Map RESOURCE_NAME_TO_INDEX =
+ new ConcurrentHashMap();
+ private static volatile Map resourceTypes;
+ private static volatile ResourceInformation[] resourceTypesArray;
+ private static volatile boolean initializedNodeResources = false;
+ private static volatile Map readOnlyNodeResources;
+ private static volatile int numKnownResourceTypes = -1;
+
+ static final Log LOG = LogFactory.getLog(ResourceUtils.class);
+
+ private ResourceUtils() {
+ }
+
+ private static void checkMandatoryResources(
+ Map resourceInformationMap)
+ throws YarnRuntimeException {
+ /*
+ * Supporting 'memory' also as invalid resource name, in addition to
+ * 'MEMORY' for historical reasons
+ */
+ String key = "memory";
+ if (resourceInformationMap.containsKey(key)) {
+ LOG.warn("Attempt to define resource '" + key +
+ "', but it is not allowed.");
+ throw new YarnRuntimeException("Attempt to re-define mandatory resource '"
+ + key + "'.");
+ }
+
+ if (resourceInformationMap.containsKey(MEMORY)) {
+ ResourceInformation memInfo = resourceInformationMap.get(MEMORY);
+ String memUnits = ResourceInformation.MEMORY_MB.getUnits();
+ ResourceTypes memType = ResourceInformation.MEMORY_MB.getResourceType();
+ if (!memInfo.getUnits().equals(memUnits) || !memInfo.getResourceType()
+ .equals(memType)) {
+ throw new YarnRuntimeException(
+ "Attempt to re-define mandatory resource 'memory-mb'. It can only"
+ + " be of type 'COUNTABLE' and have units 'Mi'.");
+ }
+ }
+
+ if (resourceInformationMap.containsKey(VCORES)) {
+ ResourceInformation vcoreInfo = resourceInformationMap.get(VCORES);
+ String vcoreUnits = ResourceInformation.VCORES.getUnits();
+ ResourceTypes vcoreType = ResourceInformation.VCORES.getResourceType();
+ if (!vcoreInfo.getUnits().equals(vcoreUnits) || !vcoreInfo
+ .getResourceType().equals(vcoreType)) {
+ throw new YarnRuntimeException(
+ "Attempt to re-define mandatory resource 'vcores'. It can only be"
+ + " of type 'COUNTABLE' and have units ''(no units).");
+ }
+ }
+ }
+
+ private static void addMandatoryResources(
+ Map res) {
+ ResourceInformation ri;
+ if (!res.containsKey(MEMORY)) {
+ LOG.info("Adding resource type - name = " + MEMORY + ", units = "
+ + ResourceInformation.MEMORY_MB.getUnits() + ", type = "
+ + ResourceTypes.COUNTABLE);
+ ri = ResourceInformation
+ .newInstance(MEMORY,
+ ResourceInformation.MEMORY_MB.getUnits());
+ res.put(MEMORY, ri);
+ }
+ if (!res.containsKey(VCORES)) {
+ LOG.info("Adding resource type - name = " + VCORES + ", units = , type = "
+ + ResourceTypes.COUNTABLE);
+ ri =
+ ResourceInformation.newInstance(VCORES);
+ res.put(VCORES, ri);
+ }
+ }
+
+ @VisibleForTesting
+ static void initializeResourcesMap(Configuration conf) {
+
+ Map resourceInformationMap = new HashMap<>();
+ String[] resourceNames = conf.getStrings(YarnConfiguration.RESOURCE_TYPES);
+
+ if (resourceNames != null && resourceNames.length != 0) {
+ for (String resourceName : resourceNames) {
+ String resourceUnits = conf.get(
+ YarnConfiguration.RESOURCE_TYPES + "." + resourceName + UNITS, "");
+ String resourceTypeName = conf.get(
+ YarnConfiguration.RESOURCE_TYPES + "." + resourceName + TYPE,
+ ResourceTypes.COUNTABLE.toString());
+ if (resourceName == null || resourceName.isEmpty()
+ || resourceUnits == null || resourceTypeName == null) {
+ throw new YarnRuntimeException(
+ "Incomplete configuration for resource type '" + resourceName
+ + "'. One of name, units or type is configured incorrectly.");
+ }
+ ResourceTypes resourceType = ResourceTypes.valueOf(resourceTypeName);
+ LOG.info("Adding resource type - name = " + resourceName + ", units = "
+ + resourceUnits + ", type = " + resourceTypeName);
+ if (resourceInformationMap.containsKey(resourceName)) {
+ throw new YarnRuntimeException(
+ "Error in config, key '" + resourceName + "' specified twice");
+ }
+ resourceInformationMap.put(resourceName, ResourceInformation
+ .newInstance(resourceName, resourceUnits, 0L, resourceType));
+ }
+ }
+
+ checkMandatoryResources(resourceInformationMap);
+ addMandatoryResources(resourceInformationMap);
+
+ initializeResourcesFromResourceInformationMap(resourceInformationMap);
+ }
+
+ /**
+ * This method is visible for testing, unit test can construct a
+ * resourceInformationMap and pass it to this method to initialize multiple resources.
+ * @param resourceInformationMap constructed resource information map.
+ */
+ @VisibleForTesting
+ public static void initializeResourcesFromResourceInformationMap(
+ Map resourceInformationMap) {
+ resourceTypes = Collections.unmodifiableMap(resourceInformationMap);
+ updateKnownResources();
+ updateResourceTypeIndex();
+ initializedResources = true;
+ }
+
+ private static void updateKnownResources() {
+ // Update resource names.
+ resourceTypesArray = new ResourceInformation[resourceTypes.size()];
+
+ int index = 2;
+ for (ResourceInformation resInfo : resourceTypes.values()) {
+ if (resInfo.getName().equals(MEMORY)) {
+ resourceTypesArray[0] = ResourceInformation
+ .newInstance(resourceTypes.get(MEMORY));
+ } else if (resInfo.getName().equals(VCORES)) {
+ resourceTypesArray[1] = ResourceInformation
+ .newInstance(resourceTypes.get(VCORES));
+ } else {
+ resourceTypesArray[index] = ResourceInformation.newInstance(resInfo);
+ index++;
+ }
+ }
+ }
+
+ private static void updateResourceTypeIndex() {
+ RESOURCE_NAME_TO_INDEX.clear();
+
+ for (int index = 0; index < resourceTypesArray.length; index++) {
+ ResourceInformation resInfo = resourceTypesArray[index];
+ RESOURCE_NAME_TO_INDEX.put(resInfo.getName(), index);
+ }
+ }
+
+ /**
+ * Get associate index of resource types such memory, cpu etc.
+ * This could help to access each resource types in a resource faster.
+ * @return Index map for all Resource Types.
+ */
+ public static Map getResourceTypeIndex() {
+ return RESOURCE_NAME_TO_INDEX;
+ }
+
+ /**
+ * Get the resource types to be supported by the system.
+ * @return A map of the resource name to a ResouceInformation object
+ * which contains details such as the unit.
+ */
+ public static Map getResourceTypes() {
+ return getResourceTypes(null,
+ YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE);
+ }
+
+ public static ResourceInformation[] getResourceTypesArray() {
+ initializeResourceTypesIfNeeded(null,
+ YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE);
+ return resourceTypesArray;
+ }
+
+ public static int getNumberOfKnownResourceTypes() {
+ if (numKnownResourceTypes < 0) {
+ initializeResourceTypesIfNeeded(null,
+ YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE);
+ }
+ return numKnownResourceTypes;
+ }
+
+ private static Map getResourceTypes(
+ Configuration conf) {
+ return getResourceTypes(conf,
+ YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE);
+ }
+
+ private static void initializeResourceTypesIfNeeded(Configuration conf,
+ String resourceFile) {
+ if (!initializedResources) {
+ synchronized (ResourceUtils.class) {
+ if (!initializedResources) {
+ if (conf == null) {
+ conf = new YarnConfiguration();
+ }
+ try {
+ addResourcesFileToConf(resourceFile, conf);
+ LOG.debug("Found " + resourceFile + ", adding to configuration");
+ } catch (FileNotFoundException fe) {
+ LOG.debug("Unable to find '" + resourceFile + "'.");
+ }
+
+ initializeResourcesMap(conf);
+ }
+ }
+ }
+ numKnownResourceTypes = resourceTypes.size();
+ }
+
+ private static Map getResourceTypes(
+ Configuration conf, String resourceFile) {
+ initializeResourceTypesIfNeeded(conf, resourceFile);
+ return resourceTypes;
+ }
+
+ private static InputStream getConfInputStream(String resourceFile,
+ Configuration conf) throws IOException, YarnException {
+
+ ConfigurationProvider provider =
+ ConfigurationProviderFactory.getConfigurationProvider(conf);
+ InputStream ris = provider.getConfigurationInputStream(conf, resourceFile);
+ if (ris == null) {
+ if (conf.getResource(resourceFile) == null) {
+ throw new FileNotFoundException("Unable to find " + resourceFile);
+ }
+ throw new IOException(
+ "Unable to open resource types file '" + resourceFile
+ + "'. Using provider " + provider);
+ }
+ return ris;
+ }
+
+ private static void addResourcesFileToConf(String resourceFile,
+ Configuration conf) throws FileNotFoundException {
+ try {
+ InputStream ris = getConfInputStream(resourceFile, conf);
+ LOG.debug("Found " + resourceFile + ", adding to configuration");
+ conf.addResource(ris);
+ } catch (FileNotFoundException fe) {
+ throw fe;
+ } catch (IOException ie) {
+ LOG.fatal("Exception trying to read resource types configuration '"
+ + resourceFile + "'.", ie);
+ throw new YarnRuntimeException(ie);
+ } catch (YarnException ye) {
+ LOG.fatal("YARN Exception trying to read resource types configuration '"
+ + resourceFile + "'.", ye);
+ throw new YarnRuntimeException(ye);
+ }
+ }
+
+ @VisibleForTesting
+ synchronized static void resetResourceTypes() {
+ initializedResources = false;
+ }
+
+ @VisibleForTesting
+ public static Map
+ resetResourceTypes(Configuration conf) {
+ synchronized (ResourceUtils.class) {
+ initializedResources = false;
+ }
+ return getResourceTypes(conf);
+ }
+
+ public static String getUnits(String resourceValue) {
+ String units;
+ for (int i = 0; i < resourceValue.length(); i++) {
+ if (Character.isAlphabetic(resourceValue.charAt(i))) {
+ units = resourceValue.substring(i);
+ if (StringUtils.isAlpha(units)) {
+ return units;
+ }
+ }
+ }
+ return "";
+ }
+
+ /**
+ * Function to get the resources for a node. This function will look at the
+ * file {@link YarnConfiguration#NODE_RESOURCES_CONFIGURATION_FILE} to
+ * determine the node resources.
+ *
+ * @param conf configuration file
+ * @return a map to resource name to the ResourceInformation object. The map
+ * is guaranteed to have entries for memory and vcores
+ */
+ public static Map getNodeResourceInformation(
+ Configuration conf) {
+ if (!initializedNodeResources) {
+ synchronized (ResourceUtils.class) {
+ if (!initializedNodeResources) {
+ Map nodeResources = initializeNodeResourceInformation(
+ conf);
+ addMandatoryResources(nodeResources);
+ checkMandatoryResources(nodeResources);
+ readOnlyNodeResources = Collections.unmodifiableMap(nodeResources);
+ initializedNodeResources = true;
+ }
+ }
+ }
+ return readOnlyNodeResources;
+ }
+
+ private static Map initializeNodeResourceInformation(
+ Configuration conf) {
+ Map nodeResources = new HashMap<>();
+ try {
+ addResourcesFileToConf(
+ YarnConfiguration.NODE_RESOURCES_CONFIGURATION_FILE, conf);
+ for (Map.Entry entry : conf) {
+ String key = entry.getKey();
+ String value = entry.getValue();
+ if (key.startsWith(YarnConfiguration.NM_RESOURCES_PREFIX)) {
+ addResourceInformation(key, value, nodeResources);
+ }
+ }
+ } catch (FileNotFoundException fe) {
+ LOG.info("Couldn't find node resources file");
+ }
+ return nodeResources;
+ }
+
+ private static void addResourceInformation(String prop, String value,
+ Map nodeResources) {
+ String[] parts = prop.split("\\.");
+ LOG.info("Found resource entry " + prop);
+ if (parts.length == 4) {
+ String resourceType = parts[3];
+ if (!nodeResources.containsKey(resourceType)) {
+ nodeResources
+ .put(resourceType, ResourceInformation.newInstance(resourceType));
+ }
+ String units = getUnits(value);
+ Long resourceValue =
+ Long.valueOf(value.substring(0, value.length() - units.length()));
+ nodeResources.get(resourceType).setValue(resourceValue);
+ nodeResources.get(resourceType).setUnits(units);
+ LOG.debug("Setting value for resource type " + resourceType + " to "
+ + resourceValue + " with units " + units);
+ }
+ }
+
+ @VisibleForTesting
+ synchronized public static void resetNodeResources() {
+ initializedNodeResources = false;
+ }
+
+ /**
+ * Get default unit by given resource type.
+ * @param resourceType resourceType
+ * @return default unit
+ */
+ public static String getDefaultUnit(String resourceType) {
+ ResourceInformation ri = getResourceTypes().get(resourceType);
+ if (ri != null) {
+ return ri.getUnits();
+ }
+ return "";
+ }
+
+ /**
+ * Get all resource types information from known resource types.
+ * @return List of ResourceTypeInfo
+ */
+ public static List getResourcesTypeInfo() {
+ List array = new ArrayList<>();
+ // Add all resource types
+ Collection resourcesInfo =
+ ResourceUtils.getResourceTypes().values();
+ for (ResourceInformation resourceInfo : resourcesInfo) {
+ array.add(ResourceTypeInfo
+ .newInstance(resourceInfo.getName(), resourceInfo.getUnits(),
+ resourceInfo.getResourceType()));
+ }
+ return array;
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/package-info.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/package-info.java
new file mode 100644
index 00000000000..d7c799d7cbf
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Package org.apache.hadoop.yarn.util.resource contains classes
+ * which is used as utility class for resource profile computations.
+ */
+package org.apache.hadoop.yarn.util.resource;
\ No newline at end of file
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
index ba79db09a6f..512d3a15c1f 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/applicationclient_protocol.proto
@@ -61,4 +61,5 @@ service ApplicationClientProtocolService {
rpc updateApplicationPriority (UpdateApplicationPriorityRequestProto) returns (UpdateApplicationPriorityResponseProto);
rpc signalToContainer(SignalContainerRequestProto) returns (SignalContainerResponseProto);
rpc updateApplicationTimeouts (UpdateApplicationTimeoutsRequestProto) returns (UpdateApplicationTimeoutsResponseProto);
+ rpc getResourceTypeInfo(GetAllResourceTypeInfoRequestProto) returns (GetAllResourceTypeInfoResponseProto);
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index d15d484f811..77732d8f1e8 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -53,9 +53,27 @@ message ContainerIdProto {
optional int64 id = 3;
}
+enum ResourceTypesProto {
+ COUNTABLE = 0;
+}
+
+message ResourceInformationProto {
+ required string key = 1;
+ optional int64 value = 2;
+ optional string units = 3;
+ optional ResourceTypesProto type = 4;
+}
+
+message ResourceTypeInfoProto {
+ required string name = 1;
+ optional string units = 2;
+ optional ResourceTypesProto type = 3;
+}
+
message ResourceProto {
optional int64 memory = 1;
optional int32 virtual_cores = 2;
+ repeated ResourceInformationProto resource_value_map = 3;
}
message ResourceUtilizationProto {
@@ -205,6 +223,11 @@ message LocalResourceProto {
optional bool should_be_uploaded_to_shared_cache = 7;
}
+message StringLongMapProto {
+ required string key = 1;
+ required int64 value = 2;
+}
+
message ApplicationResourceUsageReportProto {
optional int32 num_used_containers = 1;
optional int32 num_reserved_containers = 2;
@@ -217,6 +240,8 @@ message ApplicationResourceUsageReportProto {
optional float cluster_usage_percentage = 9;
optional int64 preempted_memory_seconds = 10;
optional int64 preempted_vcore_seconds = 11;
+ repeated StringLongMapProto application_resource_usage_map = 12;
+ repeated StringLongMapProto application_preempted_resource_usage_map = 13;
}
message ApplicationReportProto {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 24981428341..a18df260092 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -279,6 +279,13 @@ message UpdateApplicationTimeoutsResponseProto {
repeated ApplicationUpdateTimeoutMapProto application_timeouts = 1;
}
+message GetAllResourceTypeInfoRequestProto {
+}
+
+message GetAllResourceTypeInfoResponseProto {
+ repeated ResourceTypeInfoProto resource_type_info = 1;
+}
+
//////////////////////////////////////////////////////
/////// client_NM_Protocol ///////////////////////////
//////////////////////////////////////////////////////
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestResourceInformation.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestResourceInformation.java
new file mode 100644
index 00000000000..66bf3204bf6
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestResourceInformation.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.conf;
+
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class to verify various resource informations in a given resource.
+ */
+public class TestResourceInformation {
+
+ @Test
+ public void testName() {
+ String name = "yarn.io/test";
+ ResourceInformation ri = ResourceInformation.newInstance(name);
+ Assert.assertEquals("Resource name incorrect", name, ri.getName());
+ }
+
+ @Test
+ public void testUnits() {
+ String name = "yarn.io/test";
+ String units = "m";
+ ResourceInformation ri = ResourceInformation.newInstance(name, units);
+ Assert.assertEquals("Resource name incorrect", name, ri.getName());
+ Assert.assertEquals("Resource units incorrect", units, ri.getUnits());
+ units = "z";
+ try {
+ ResourceInformation.newInstance(name, units);
+ Assert.fail(units + "is not a valid unit");
+ } catch (IllegalArgumentException ie) {
+ // do nothing
+ }
+ }
+
+ @Test
+ public void testValue() {
+ String name = "yarn.io/test";
+ long value = 1L;
+ ResourceInformation ri = ResourceInformation.newInstance(name, value);
+ Assert.assertEquals("Resource name incorrect", name, ri.getName());
+ Assert.assertEquals("Resource value incorrect", value, ri.getValue());
+ }
+
+ @Test
+ public void testResourceInformation() {
+ String name = "yarn.io/test";
+ long value = 1L;
+ String units = "m";
+ ResourceInformation ri =
+ ResourceInformation.newInstance(name, units, value);
+ Assert.assertEquals("Resource name incorrect", name, ri.getName());
+ Assert.assertEquals("Resource value incorrect", value, ri.getValue());
+ Assert.assertEquals("Resource units incorrect", units, ri.getUnits());
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/util/TestUnitsConversionUtil.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/util/TestUnitsConversionUtil.java
new file mode 100644
index 00000000000..a412faebed8
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/util/TestUnitsConversionUtil.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class to handle all test cases needed to verify basic unit conversion
+ * scenarios.
+ */
+public class TestUnitsConversionUtil {
+
+ @Test
+ public void testUnitsConversion() {
+ int value = 5;
+ String fromUnit = "";
+ long test = value;
+ Assert.assertEquals("pico test failed",
+ value * 1000L * 1000L * 1000L * 1000L,
+ UnitsConversionUtil.convert(fromUnit, "p", test));
+ Assert.assertEquals("nano test failed",
+ value * 1000L * 1000L * 1000L,
+ UnitsConversionUtil.convert(fromUnit, "n", test));
+ Assert
+ .assertEquals("micro test failed", value * 1000L * 1000L,
+ UnitsConversionUtil.convert(fromUnit, "u", test));
+ Assert.assertEquals("milli test failed", value * 1000L,
+ UnitsConversionUtil.convert(fromUnit, "m", test));
+
+ test = value * 1000L * 1000L * 1000L * 1000L * 1000L;
+ fromUnit = "";
+ Assert.assertEquals("kilo test failed", test / 1000L,
+ UnitsConversionUtil.convert(fromUnit, "k", test));
+
+ Assert
+ .assertEquals("mega test failed", test / (1000L * 1000L),
+ UnitsConversionUtil.convert(fromUnit, "M", test));
+ Assert.assertEquals("giga test failed",
+ test / (1000L * 1000L * 1000L),
+ UnitsConversionUtil.convert(fromUnit, "G", test));
+ Assert.assertEquals("tera test failed",
+ test / (1000L * 1000L * 1000L * 1000L),
+ UnitsConversionUtil.convert(fromUnit, "T", test));
+ Assert.assertEquals("peta test failed",
+ test / (1000L * 1000L * 1000L * 1000L * 1000L),
+ UnitsConversionUtil.convert(fromUnit, "P", test));
+
+ Assert.assertEquals("nano to pico test failed", value * 1000L,
+ UnitsConversionUtil.convert("n", "p", value));
+
+ Assert.assertEquals("mega to giga test failed", value,
+ UnitsConversionUtil.convert("M", "G", value * 1000L));
+
+ Assert.assertEquals("Mi to Gi test failed", value,
+ UnitsConversionUtil.convert("Mi", "Gi", value * 1024L));
+
+ Assert.assertEquals("Mi to Ki test failed", value * 1024,
+ UnitsConversionUtil.convert("Mi", "Ki", value));
+
+ Assert.assertEquals("Ki to base units test failed", 5 * 1024,
+ UnitsConversionUtil.convert("Ki", "", 5));
+
+ Assert.assertEquals("Mi to k test failed", 1073741,
+ UnitsConversionUtil.convert("Mi", "k", 1024));
+
+ Assert.assertEquals("M to Mi test failed", 953,
+ UnitsConversionUtil.convert("M", "Mi", 1000));
+ }
+
+ @Test
+ public void testOverflow() {
+ long test = 5 * 1000L * 1000L * 1000L * 1000L * 1000L;
+ try {
+ UnitsConversionUtil.convert("P", "p", test);
+ Assert.fail("this operation should result in an overflow");
+ } catch (IllegalArgumentException ie) {
+ // do nothing
+ }
+ try {
+ UnitsConversionUtil.convert("m", "p", Long.MAX_VALUE - 1);
+ Assert.fail("this operation should result in an overflow");
+ } catch (IllegalArgumentException ie) {
+ // do nothing
+ }
+ }
+
+ @Test
+ public void testCompare() {
+ String unitA = "P";
+ long valueA = 1;
+ String unitB = "p";
+ long valueB = 2;
+ Assert.assertEquals(1,
+ UnitsConversionUtil.compare(unitA, valueA, unitB, valueB));
+ Assert.assertEquals(-1,
+ UnitsConversionUtil.compare(unitB, valueB, unitA, valueA));
+ Assert.assertEquals(0,
+ UnitsConversionUtil.compare(unitA, valueA, unitA, valueA));
+ Assert.assertEquals(-1,
+ UnitsConversionUtil.compare(unitA, valueA, unitA, valueB));
+ Assert.assertEquals(1,
+ UnitsConversionUtil.compare(unitA, valueB, unitA, valueA));
+
+ unitB = "T";
+ Assert.assertEquals(1,
+ UnitsConversionUtil.compare(unitA, valueA, unitB, valueB));
+ Assert.assertEquals(-1,
+ UnitsConversionUtil.compare(unitB, valueB, unitA, valueA));
+ Assert.assertEquals(0,
+ UnitsConversionUtil.compare(unitA, valueA, unitB, 1000L));
+
+ unitA = "p";
+ unitB = "n";
+ Assert.assertEquals(-1,
+ UnitsConversionUtil.compare(unitA, valueA, unitB, valueB));
+ Assert.assertEquals(1,
+ UnitsConversionUtil.compare(unitB, valueB, unitA, valueA));
+ Assert.assertEquals(0,
+ UnitsConversionUtil.compare(unitA, 1000L, unitB, valueA));
+
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index eedb5016e4f..5633c8a6ec0 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -79,6 +79,7 @@
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.client.util.YarnClientUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
index 0d0c351b6b7..a3505054326 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
@@ -61,6 +61,8 @@
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.ReservationDefinition;
import org.apache.hadoop.yarn.api.records.ReservationId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
@@ -70,6 +72,7 @@
import org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
+import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
@@ -855,4 +858,17 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts(
throw new UnsupportedOperationException("The sub-class extending "
+ YarnClient.class.getName() + " is expected to implement this !");
}
+
+ /**
+ *
+ * Get available resource types supported by RM.
+ *
+ * @return list of supported resource types with detailed information
+ * @throws YarnException if any issue happens inside YARN
+ * @throws IOException in case of other others
+ */
+ @Public
+ @Unstable
+ public abstract List getResourceTypeInfo()
+ throws YarnException, IOException;
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
index 19cb10b1d11..d741b571e80 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
@@ -43,6 +43,7 @@
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
@@ -101,6 +102,8 @@
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
@@ -937,4 +940,12 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts(
throws YarnException, IOException {
return rmClient.updateApplicationTimeouts(request);
}
+
+ @Override
+ public List getResourceTypeInfo()
+ throws YarnException, IOException {
+ GetAllResourceTypeInfoRequest request =
+ GetAllResourceTypeInfoRequest.newInstance();
+ return rmClient.getResourceTypeInfo(request).getResourceTypeInfo();
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index 5f6b30017d1..2a9b3bcd925 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -62,6 +62,8 @@
import com.google.common.annotations.VisibleForTesting;
+import static org.apache.hadoop.yarn.util.StringHelper.getResourceSecondsString;
+
@Private
@Unstable
public class ApplicationCLI extends YarnCLI {
@@ -711,24 +713,9 @@ private int printApplicationReport(String applicationId)
appReportStr.println(appReport.getRpcPort());
appReportStr.print("\tAM Host : ");
appReportStr.println(appReport.getHost());
- appReportStr.print("\tAggregate Resource Allocation : ");
-
ApplicationResourceUsageReport usageReport =
appReport.getApplicationResourceUsageReport();
- if (usageReport != null) {
- //completed app report in the timeline server doesn't have usage report
- appReportStr.print(usageReport.getMemorySeconds() + " MB-seconds, ");
- appReportStr.println(usageReport.getVcoreSeconds() + " vcore-seconds");
- appReportStr.print("\tAggregate Resource Preempted : ");
- appReportStr.print(usageReport.getPreemptedMemorySeconds() +
- " MB-seconds, ");
- appReportStr.println(usageReport.getPreemptedVcoreSeconds() +
- " vcore-seconds");
- } else {
- appReportStr.println("N/A");
- appReportStr.print("\tAggregate Resource Preempted : ");
- appReportStr.println("N/A");
- }
+ printResourceUsage(appReportStr, usageReport);
appReportStr.print("\tLog Aggregation Status : ");
appReportStr.println(appReport.getLogAggregationStatus() == null ? "N/A"
: appReport.getLogAggregationStatus());
@@ -759,6 +746,22 @@ private int printApplicationReport(String applicationId)
return 0;
}
+ private void printResourceUsage(PrintWriter appReportStr,
+ ApplicationResourceUsageReport usageReport) {
+ appReportStr.print("\tAggregate Resource Allocation : ");
+ if (usageReport != null) {
+ appReportStr.println(
+ getResourceSecondsString(usageReport.getResourceSecondsMap()));
+ appReportStr.print("\tAggregate Resource Preempted : ");
+ appReportStr.println(getResourceSecondsString(
+ usageReport.getPreemptedResourceSecondsMap()));
+ } else {
+ appReportStr.println("N/A");
+ appReportStr.print("\tAggregate Resource Preempted : ");
+ appReportStr.println("N/A");
+ }
+ }
+
private String getAllValidApplicationStates() {
StringBuilder sb = new StringBuilder();
sb.append("The valid application state can be" + " one of the following: ");
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index 13730f1c830..84cfb0ad222 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -39,8 +39,10 @@
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
@@ -69,6 +71,7 @@
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
@@ -118,9 +121,18 @@ public void testGetApplicationReport() throws Exception {
for (int i = 0; i < 2; ++i) {
ApplicationCLI cli = createAndGetAppCLI();
ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
+ Map resourceSecondsMap = new HashMap<>();
+ Map preemptedResoureSecondsMap = new HashMap<>();
+ resourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), 123456L);
+ resourceSecondsMap.put(ResourceInformation.VCORES.getName(), 4567L);
+ preemptedResoureSecondsMap
+ .put(ResourceInformation.MEMORY_MB.getName(), 1111L);
+ preemptedResoureSecondsMap
+ .put(ResourceInformation.VCORES.getName(), 2222L);
ApplicationResourceUsageReport usageReport = i == 0 ? null :
- ApplicationResourceUsageReport.newInstance(
- 2, 0, null, null, null, 123456, 4567, 0, 0, 1111, 2222);
+ ApplicationResourceUsageReport
+ .newInstance(2, 0, null, null, null, resourceSecondsMap, 0, 0,
+ preemptedResoureSecondsMap);
ApplicationReport newApplicationReport = ApplicationReport.newInstance(
applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
"user", "queue", "appname", "host", 124, null,
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index f50da016f6b..cc99095047f 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -213,6 +213,14 @@
true
+
+
+ ${project.basedir}/src/test/resources
+
+
+ ${project.basedir}/src/test/resources/resource-types
+
+
org.apache.rat
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
index cef03b9b052..73c49906c37 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
@@ -51,7 +51,8 @@ public synchronized InputStream getConfigurationInputStream(
"Illegal argument! The parameter should not be null or empty");
}
Path filePath;
- if (YarnConfiguration.RM_CONFIGURATION_FILES.contains(name)) {
+ if (YarnConfiguration.RM_CONFIGURATION_FILES.contains(name) ||
+ YarnConfiguration.NM_CONFIGURATION_FILES.contains(name)) {
filePath = new Path(this.configDir, name);
if (!fs.exists(filePath)) {
LOG.info(filePath + " not found");
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/LocalConfigurationProvider.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/LocalConfigurationProvider.java
index cfa194fb5b2..0cdbd1516d5 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/LocalConfigurationProvider.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/LocalConfigurationProvider.java
@@ -39,7 +39,8 @@ public InputStream getConfigurationInputStream(Configuration bootstrapConf,
if (name == null || name.isEmpty()) {
throw new YarnException(
"Illegal argument! The parameter should not be null or empty");
- } else if (YarnConfiguration.RM_CONFIGURATION_FILES.contains(name)) {
+ } else if (YarnConfiguration.RM_CONFIGURATION_FILES.contains(name) ||
+ YarnConfiguration.NM_CONFIGURATION_FILES.contains(name)) {
return bootstrapConf.getConfResourceAsInputStream(name);
}
return new FileInputStream(name);
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java
index ad7cb296080..a6ccd2a04f2 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ApplicationClientProtocolPBClientImpl.java
@@ -89,6 +89,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FailApplicationAttemptRequestPBImpl;
@@ -147,6 +149,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SignalContainerResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.proto.YarnServiceProtos;
@@ -619,4 +623,18 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts(
return null;
}
}
+
+ @Override
+ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
+ GetAllResourceTypeInfoRequest request) throws YarnException, IOException {
+ YarnServiceProtos.GetAllResourceTypeInfoRequestProto requestProto =
+ ((GetAllResourceTypeInfoRequestPBImpl) request).getProto();
+ try {
+ return new GetAllResourceTypeInfoResponsePBImpl(
+ proxy.getResourceTypeInfo(null, requestProto));
+ } catch (ServiceException e) {
+ RPCUtil.unwrapAndThrowException(e);
+ return null;
+ }
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java
index 93ce6a343c5..ca4276a92ee 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ApplicationClientProtocolPBServiceImpl.java
@@ -58,6 +58,7 @@
import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FailApplicationAttemptRequestPBImpl;
@@ -116,6 +117,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.UpdateApplicationTimeoutsResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoResponsePBImpl;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.proto.YarnServiceProtos;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.FailApplicationAttemptRequestProto;
@@ -169,6 +172,8 @@
import org.apache.hadoop.yarn.proto.YarnServiceProtos.UpdateApplicationTimeoutsResponseProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceTypeInfoRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceTypeInfoResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@@ -631,4 +636,20 @@ public UpdateApplicationTimeoutsResponseProto updateApplicationTimeouts(
throw new ServiceException(e);
}
}
+
+ @Override
+ public GetAllResourceTypeInfoResponseProto getResourceTypeInfo(
+ RpcController controller, GetAllResourceTypeInfoRequestProto proto)
+ throws ServiceException {
+ GetAllResourceTypeInfoRequestPBImpl req = new GetAllResourceTypeInfoRequestPBImpl(
+ proto);
+ try {
+ GetAllResourceTypeInfoResponse resp = real.getResourceTypeInfo(req);
+ return ((GetAllResourceTypeInfoResponsePBImpl) resp).getProto();
+ } catch (YarnException ye) {
+ throw new ServiceException(ye);
+ } catch (IOException ie) {
+ throw new ServiceException(ie);
+ }
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoRequestPBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoRequestPBImpl.java
new file mode 100644
index 00000000000..b3f4692412e
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoRequestPBImpl.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceTypeInfoRequestProto;
+
+/**
+ * Protobuf implementation class for GetAllResourceTypeInfoRequest.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class GetAllResourceTypeInfoRequestPBImpl
+ extends GetAllResourceTypeInfoRequest {
+
+ private GetAllResourceTypeInfoRequestProto proto =
+ GetAllResourceTypeInfoRequestProto.getDefaultInstance();
+ private GetAllResourceTypeInfoRequestProto.Builder builder = null;
+
+ private boolean viaProto = false;
+
+ public GetAllResourceTypeInfoRequestPBImpl() {
+ builder = GetAllResourceTypeInfoRequestProto.newBuilder();
+ }
+
+ public GetAllResourceTypeInfoRequestPBImpl(
+ GetAllResourceTypeInfoRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetAllResourceTypeInfoRequestProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null) {
+ return false;
+ }
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoResponsePBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoResponsePBImpl.java
new file mode 100644
index 00000000000..28decebcabf
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetAllResourceTypeInfoResponsePBImpl.java
@@ -0,0 +1,184 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourceTypeInfoPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceTypeInfoProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceTypeInfoResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllResourceTypeInfoResponseProtoOrBuilder;
+
+import com.google.protobuf.TextFormat;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Protobuf implementation class for the GetAllResourceTypeInfoResponse.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class GetAllResourceTypeInfoResponsePBImpl
+ extends
+ GetAllResourceTypeInfoResponse {
+
+ private GetAllResourceTypeInfoResponseProto proto = GetAllResourceTypeInfoResponseProto
+ .getDefaultInstance();
+ private GetAllResourceTypeInfoResponseProto.Builder builder = null;
+ private boolean viaProto = false;
+
+ private List resourceTypeInfo;
+
+ public GetAllResourceTypeInfoResponsePBImpl() {
+ builder = GetAllResourceTypeInfoResponseProto.newBuilder();
+ }
+
+ public GetAllResourceTypeInfoResponsePBImpl(
+ GetAllResourceTypeInfoResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public GetAllResourceTypeInfoResponseProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public void setResourceTypeInfo(List resourceTypes) {
+ if (resourceTypeInfo == null) {
+ builder.clearResourceTypeInfo();
+ }
+ this.resourceTypeInfo = resourceTypes;
+ }
+
+ @Override
+ public List getResourceTypeInfo() {
+ initResourceTypeInfosList();
+ return this.resourceTypeInfo;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null) {
+ return false;
+ }
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return TextFormat.shortDebugString(getProto());
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.resourceTypeInfo != null) {
+ addResourceTypeInfosToProto();
+ }
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto) {
+ maybeInitBuilder();
+ }
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = GetAllResourceTypeInfoResponseProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ // Once this is called. containerList will never be null - until a getProto
+ // is called.
+ private void initResourceTypeInfosList() {
+ if (this.resourceTypeInfo != null) {
+ return;
+ }
+ GetAllResourceTypeInfoResponseProtoOrBuilder p = viaProto ? proto : builder;
+ List list = p.getResourceTypeInfoList();
+ resourceTypeInfo = new ArrayList();
+
+ for (ResourceTypeInfoProto a : list) {
+ resourceTypeInfo.add(convertFromProtoFormat(a));
+ }
+ }
+
+ private void addResourceTypeInfosToProto() {
+ maybeInitBuilder();
+ builder.clearResourceTypeInfo();
+ if (resourceTypeInfo == null) {
+ return;
+ }
+ Iterable iterable = new Iterable() {
+ @Override
+ public Iterator iterator() {
+ return new Iterator() {
+
+ Iterator iter = resourceTypeInfo.iterator();
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+
+ @Override
+ public ResourceTypeInfoProto next() {
+ return convertToProtoFormat(iter.next());
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+
+ }
+ };
+
+ }
+ };
+ builder.addAllResourceTypeInfo(iterable);
+ }
+
+ private ResourceTypeInfoPBImpl convertFromProtoFormat(
+ ResourceTypeInfoProto p) {
+ return new ResourceTypeInfoPBImpl(p);
+ }
+
+ private ResourceTypeInfoProto convertToProtoFormat(ResourceTypeInfo t) {
+ return ((ResourceTypeInfoPBImpl) t).getProto();
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java
index 1c85e28dca8..14ede5dbf34 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java
@@ -22,12 +22,15 @@
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
import com.google.protobuf.TextFormat;
+import java.util.Map;
+
@Private
@Unstable
public class ApplicationResourceUsageReportPBImpl
@@ -41,6 +44,9 @@
Resource reservedResources;
Resource neededResources;
+ private Map resourceSecondsMap;
+ private Map preemptedResourceSecondsMap;
+
public ApplicationResourceUsageReportPBImpl() {
builder = ApplicationResourceUsageReportProto.newBuilder();
}
@@ -49,6 +55,8 @@ public ApplicationResourceUsageReportPBImpl(
ApplicationResourceUsageReportProto proto) {
this.proto = proto;
viaProto = true;
+ getResourceSecondsMap();
+ getPreemptedResourceSecondsMap();
}
public synchronized ApplicationResourceUsageReportProto getProto() {
@@ -89,6 +97,23 @@ private void mergeLocalToBuilder() {
if (this.neededResources != null) {
builder.setNeededResources(convertToProtoFormat(this.neededResources));
}
+ builder.clearApplicationResourceUsageMap();
+ builder.clearApplicationPreemptedResourceUsageMap();
+
+ if (preemptedResourceSecondsMap != null && !preemptedResourceSecondsMap
+ .isEmpty()) {
+ builder.addAllApplicationPreemptedResourceUsageMap(ProtoUtils
+ .convertMapToStringLongMapProtoList(preemptedResourceSecondsMap));
+ }
+ if (resourceSecondsMap != null && !resourceSecondsMap.isEmpty()) {
+ builder.addAllApplicationResourceUsageMap(
+ ProtoUtils.convertMapToStringLongMapProtoList(resourceSecondsMap));
+ }
+
+ builder.setMemorySeconds(this.getMemorySeconds());
+ builder.setVcoreSeconds(this.getVcoreSeconds());
+ builder.setPreemptedMemorySeconds(this.getPreemptedMemorySeconds());
+ builder.setPreemptedVcoreSeconds(this.getPreemptedVcoreSeconds());
}
private void mergeLocalToProto() {
@@ -196,54 +221,64 @@ public synchronized void setNeededResources(Resource reserved_resources) {
@Override
public synchronized void setMemorySeconds(long memory_seconds) {
- maybeInitBuilder();
- builder.setMemorySeconds(memory_seconds);
+ getResourceSecondsMap()
+ .put(ResourceInformation.MEMORY_MB.getName(), memory_seconds);
}
-
+
@Override
public synchronized long getMemorySeconds() {
- ApplicationResourceUsageReportProtoOrBuilder p = viaProto ? proto : builder;
- return p.getMemorySeconds();
+ Map tmp = getResourceSecondsMap();
+ if (tmp.containsKey(ResourceInformation.MEMORY_MB.getName())) {
+ return tmp.get(ResourceInformation.MEMORY_MB.getName());
+ }
+ return 0;
}
@Override
public synchronized void setVcoreSeconds(long vcore_seconds) {
- maybeInitBuilder();
- builder.setVcoreSeconds(vcore_seconds);
+ getResourceSecondsMap()
+ .put(ResourceInformation.VCORES.getName(), vcore_seconds);
}
@Override
public synchronized long getVcoreSeconds() {
- ApplicationResourceUsageReportProtoOrBuilder p = viaProto ? proto : builder;
- return (p.getVcoreSeconds());
+ Map tmp = getResourceSecondsMap();
+ if (tmp.containsKey(ResourceInformation.VCORES.getName())) {
+ return tmp.get(ResourceInformation.VCORES.getName());
+ }
+ return 0;
}
@Override
public synchronized void setPreemptedMemorySeconds(
long preemptedMemorySeconds) {
- maybeInitBuilder();
- builder.setPreemptedMemorySeconds(preemptedMemorySeconds);
+ getPreemptedResourceSecondsMap()
+ .put(ResourceInformation.MEMORY_MB.getName(), preemptedMemorySeconds);
}
@Override
public synchronized long getPreemptedMemorySeconds() {
- ApplicationResourceUsageReportProtoOrBuilder p =
- viaProto ? proto : builder;
- return p.getPreemptedMemorySeconds();
+ Map tmp = getPreemptedResourceSecondsMap();
+ if (tmp.containsKey(ResourceInformation.MEMORY_MB.getName())) {
+ return tmp.get(ResourceInformation.MEMORY_MB.getName());
+ }
+ return 0;
}
@Override
public synchronized void setPreemptedVcoreSeconds(
long vcoreSeconds) {
- maybeInitBuilder();
- builder.setPreemptedVcoreSeconds(vcoreSeconds);
+ getPreemptedResourceSecondsMap()
+ .put(ResourceInformation.VCORES.getName(), vcoreSeconds);
}
@Override
public synchronized long getPreemptedVcoreSeconds() {
- ApplicationResourceUsageReportProtoOrBuilder p =
- viaProto ? proto : builder;
- return (p.getPreemptedVcoreSeconds());
+ Map tmp = getPreemptedResourceSecondsMap();
+ if (tmp.containsKey(ResourceInformation.VCORES.getName())) {
+ return tmp.get(ResourceInformation.VCORES.getName());
+ }
+ return 0;
}
private ResourcePBImpl convertFromProtoFormat(ResourceProto p) {
@@ -277,4 +312,81 @@ public synchronized void setClusterUsagePercentage(float clusterUsagePerc) {
maybeInitBuilder();
builder.setClusterUsagePercentage((clusterUsagePerc));
}
+
+ @Override
+ public synchronized void setResourceSecondsMap(
+ Map resourceSecondsMap) {
+ this.resourceSecondsMap = resourceSecondsMap;
+ if (resourceSecondsMap == null) {
+ return;
+ }
+ if (!resourceSecondsMap
+ .containsKey(ResourceInformation.MEMORY_MB.getName())) {
+ this.setMemorySeconds(0L);
+ }
+ if (!resourceSecondsMap.containsKey(ResourceInformation.VCORES.getName())) {
+ this.setVcoreSeconds(0L);
+ }
+ }
+
+ @Override
+ public synchronized Map getResourceSecondsMap() {
+ if (this.resourceSecondsMap != null) {
+ return this.resourceSecondsMap;
+ }
+ ApplicationResourceUsageReportProtoOrBuilder p = viaProto ? proto : builder;
+ this.resourceSecondsMap = ProtoUtils
+ .convertStringLongMapProtoListToMap(
+ p.getApplicationResourceUsageMapList());
+ if (!this.resourceSecondsMap
+ .containsKey(ResourceInformation.MEMORY_MB.getName())) {
+ this.setMemorySeconds(p.getMemorySeconds());
+ }
+ if (!this.resourceSecondsMap
+ .containsKey(ResourceInformation.VCORES.getName())) {
+ this.setVcoreSeconds(p.getVcoreSeconds());
+ }
+ this.setMemorySeconds(p.getMemorySeconds());
+ this.setVcoreSeconds(p.getVcoreSeconds());
+ return this.resourceSecondsMap;
+ }
+
+ @Override
+ public synchronized void setPreemptedResourceSecondsMap(
+ Map preemptedResourceSecondsMap) {
+ this.preemptedResourceSecondsMap = preemptedResourceSecondsMap;
+ if (preemptedResourceSecondsMap == null) {
+ return;
+ }
+ if (!preemptedResourceSecondsMap
+ .containsKey(ResourceInformation.MEMORY_MB.getName())) {
+ this.setPreemptedMemorySeconds(0L);
+ }
+ if (!preemptedResourceSecondsMap
+ .containsKey(ResourceInformation.VCORES.getName())) {
+ this.setPreemptedVcoreSeconds(0L);
+ }
+ }
+
+ @Override
+ public synchronized Map getPreemptedResourceSecondsMap() {
+ if (this.preemptedResourceSecondsMap != null) {
+ return this.preemptedResourceSecondsMap;
+ }
+ ApplicationResourceUsageReportProtoOrBuilder p = viaProto ? proto : builder;
+ this.preemptedResourceSecondsMap = ProtoUtils
+ .convertStringLongMapProtoListToMap(
+ p.getApplicationPreemptedResourceUsageMapList());
+ if (!this.preemptedResourceSecondsMap
+ .containsKey(ResourceInformation.MEMORY_MB.getName())) {
+ this.setPreemptedMemorySeconds(p.getPreemptedMemorySeconds());
+ }
+ if (!this.preemptedResourceSecondsMap
+ .containsKey(ResourceInformation.VCORES.getName())) {
+ this.setPreemptedVcoreSeconds(p.getPreemptedVcoreSeconds());
+ }
+ this.setPreemptedMemorySeconds(p.getPreemptedMemorySeconds());
+ this.setPreemptedVcoreSeconds(p.getPreemptedVcoreSeconds());
+ return this.preemptedResourceSecondsMap;
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
index 6ce42e82cf4..528cf8e6003 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
@@ -19,10 +19,15 @@
package org.apache.hadoop.yarn.api.records.impl.pb;
import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
import org.apache.hadoop.yarn.api.records.AMCommand;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
@@ -45,6 +50,7 @@
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.UpdateContainerError;
import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
@@ -73,6 +79,7 @@
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerTypeProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ExecutionTypeRequestProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceTypesProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerUpdateTypeProto;
import org.apache.hadoop.yarn.server.api.ContainerType;
@@ -449,6 +456,45 @@ public static UpdateContainerErrorPBImpl convertFromProtoFormat(
convertToProtoFormat(UpdateContainerError t) {
return ((UpdateContainerErrorPBImpl) t).getProto();
}
+
+ /*
+ * ResourceTypes
+ */
+ public static ResourceTypesProto converToProtoFormat(ResourceTypes e) {
+ return ResourceTypesProto.valueOf(e.name());
+ }
+
+ public static ResourceTypes convertFromProtoFormat(ResourceTypesProto e) {
+ return ResourceTypes.valueOf(e.name());
+ }
+
+ public static Map convertStringLongMapProtoListToMap(
+ List pList) {
+ Resource tmp = Resource.newInstance(0, 0);
+ Map ret = new HashMap<>();
+ for (ResourceInformation entry : tmp.getResources()) {
+ ret.put(entry.getName(), 0L);
+ }
+ if (pList != null) {
+ for (YarnProtos.StringLongMapProto p : pList) {
+ ret.put(p.getKey(), p.getValue());
+ }
+ }
+ return ret;
+ }
+
+ public static List convertMapToStringLongMapProtoList(
+ Map map) {
+ List ret = new ArrayList<>();
+ for (Map.Entry entry : map.entrySet()) {
+ YarnProtos.StringLongMapProto.Builder tmp =
+ YarnProtos.StringLongMapProto.newBuilder();
+ tmp.setKey(entry.getKey());
+ tmp.setValue(entry.getValue());
+ ret.add(tmp.build());
+ }
+ return ret;
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 2404a63ea69..72aaffd5874 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -18,16 +18,30 @@
package org.apache.hadoop.yarn.api.records.impl.pb;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+
+import java.util.Map;
+
@Private
@Unstable
public class ResourcePBImpl extends Resource {
+
+ private static final Log LOG = LogFactory.getLog(ResourcePBImpl.class);
+
ResourceProto proto = ResourceProto.getDefaultInstance();
ResourceProto.Builder builder = null;
boolean viaProto = false;
@@ -47,14 +61,17 @@ static ResourceProto getProto(Resource r) {
public ResourcePBImpl() {
builder = ResourceProto.newBuilder();
+ initResources();
}
public ResourcePBImpl(ResourceProto proto) {
this.proto = proto;
viaProto = true;
+ initResources();
}
-
+
public ResourceProto getProto() {
+ mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
@@ -75,8 +92,14 @@ public int getMemory() {
@Override
public long getMemorySize() {
- ResourceProtoOrBuilder p = viaProto ? proto : builder;
- return p.getMemory();
+ // memory should always be present
+ ResourceInformation ri = resources[MEMORY_INDEX];
+
+ if (ri.getUnits().isEmpty()) {
+ return ri.getValue();
+ }
+ return UnitsConversionUtil.convert(ri.getUnits(),
+ ResourceInformation.MEMORY_MB.getUnits(), ri.getValue());
}
@Override
@@ -88,18 +111,129 @@ public void setMemory(int memory) {
@Override
public void setMemorySize(long memory) {
maybeInitBuilder();
- builder.setMemory(memory);
+ getResourceInformation(ResourceInformation.MEMORY_URI).setValue(memory);
}
@Override
public int getVirtualCores() {
- ResourceProtoOrBuilder p = viaProto ? proto : builder;
- return p.getVirtualCores();
+ // vcores should always be present
+ return (int) resources[VCORES_INDEX].getValue();
}
@Override
public void setVirtualCores(int vCores) {
maybeInitBuilder();
- builder.setVirtualCores(vCores);
+ getResourceInformation(ResourceInformation.VCORES_URI).setValue(vCores);
+ }
+
+ private void initResources() {
+ if (this.resources != null) {
+ return;
+ }
+ ResourceProtoOrBuilder p = viaProto ? proto : builder;
+ initResourcesMap();
+ Map indexMap = ResourceUtils.getResourceTypeIndex();
+ for (ResourceInformationProto entry : p.getResourceValueMapList()) {
+ ResourceTypes type =
+ entry.hasType() ? ProtoUtils.convertFromProtoFormat(entry.getType()) :
+ ResourceTypes.COUNTABLE;
+
+ // When unit not specified in proto, use the default unit.
+ String units =
+ entry.hasUnits() ? entry.getUnits() : ResourceUtils.getDefaultUnit(
+ entry.getKey());
+ long value = entry.hasValue() ? entry.getValue() : 0L;
+ ResourceInformation ri =
+ ResourceInformation.newInstance(entry.getKey(), units, value, type);
+ Integer index = indexMap.get(entry.getKey());
+ if (index == null) {
+ LOG.warn("Got unknown resource type: " + ri.getName() + "; skipping");
+ } else {
+ resources[index].setResourceType(ri.getResourceType());
+ resources[index].setUnits(ri.getUnits());
+ resources[index].setValue(value);
+ }
+ }
+ this.setMemorySize(p.getMemory());
+ this.setVirtualCores(p.getVirtualCores());
+ }
+
+ @Override
+ public void setResourceInformation(String resource,
+ ResourceInformation resourceInformation) {
+ maybeInitBuilder();
+ if (resource == null || resourceInformation == null) {
+ throw new IllegalArgumentException(
+ "resource and/or resourceInformation cannot be null");
+ }
+ if (!resource.equals(resourceInformation.getName())) {
+ resourceInformation.setName(resource);
+ }
+ ResourceInformation storedResourceInfo = getResourceInformation(resource);
+ ResourceInformation.copy(resourceInformation, storedResourceInfo);
+ }
+
+ @Override
+ public void setResourceValue(String resource, long value)
+ throws ResourceNotFoundException {
+ maybeInitBuilder();
+ if (resource == null) {
+ throw new IllegalArgumentException("resource type object cannot be null");
+ }
+ getResourceInformation(resource).setValue(value);
+ }
+
+ @Override
+ public ResourceInformation getResourceInformation(String resource)
+ throws ResourceNotFoundException {
+ return super.getResourceInformation(resource);
+ }
+
+ @Override
+ public long getResourceValue(String resource)
+ throws ResourceNotFoundException {
+ return super.getResourceValue(resource);
+ }
+
+ private void initResourcesMap() {
+ if (resources == null) {
+ ResourceInformation[] types = ResourceUtils.getResourceTypesArray();
+ if (types == null) {
+ throw new YarnRuntimeException(
+ "Got null return value from ResourceUtils.getResourceTypes()");
+ }
+
+ resources = new ResourceInformation[types.length];
+ for (ResourceInformation entry : types) {
+ int index = ResourceUtils.getResourceTypeIndex().get(entry.getName());
+ resources[index] = ResourceInformation.newInstance(entry);
+ }
+ }
+ }
+
+ synchronized private void mergeLocalToBuilder() {
+ builder.clearResourceValueMap();
+ if(resources != null && resources.length != 0) {
+ for (ResourceInformation resInfo : resources) {
+ ResourceInformationProto.Builder e = ResourceInformationProto
+ .newBuilder();
+ e.setKey(resInfo.getName());
+ e.setUnits(resInfo.getUnits());
+ e.setType(ProtoUtils.converToProtoFormat(resInfo.getResourceType()));
+ e.setValue(resInfo.getValue());
+ builder.addResourceValueMap(e);
+ }
+ }
+ builder.setMemory(this.getMemorySize());
+ builder.setVirtualCores(this.getVirtualCores());
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto) {
+ maybeInitBuilder();
+ }
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
}
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceTypeInfoPBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceTypeInfoPBImpl.java
new file mode 100644
index 00000000000..17230e7dfa5
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceTypeInfoPBImpl.java
@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
+import org.apache.hadoop.yarn.proto.YarnProtos;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceTypeInfoProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceTypesProto;
+
+/**
+ * {@code ResourceTypeInfoPBImpl} which implements the
+ * {@link ResourceTypeInfo} class which represents different resource types
+ * supported in YARN.
+ */
+@Private
+@Unstable
+public class ResourceTypeInfoPBImpl extends ResourceTypeInfo {
+
+ ResourceTypeInfoProto proto = ResourceTypeInfoProto.getDefaultInstance();
+ ResourceTypeInfoProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private String name = null;
+ private String defaultUnit = null;
+ private ResourceTypes resourceTypes = null;
+
+ public ResourceTypeInfoPBImpl() {
+ builder = ResourceTypeInfoProto.newBuilder();
+ }
+
+ public ResourceTypeInfoPBImpl(ResourceTypeInfoProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public ResourceTypeInfoProto getProto() {
+ mergeLocalToProto();
+ return proto;
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto) {
+ maybeInitBuilder();
+ }
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.name != null) {
+ builder.setName(this.name);
+ }
+ if (this.defaultUnit != null) {
+ builder.setUnits(this.defaultUnit);
+ }
+ if (this.resourceTypes != null) {
+ builder.setType(convertToProtoFormat(this.resourceTypes));
+ }
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = YarnProtos.ResourceTypeInfoProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public String getName() {
+ if (this.name != null) {
+ return this.name;
+ }
+
+ YarnProtos.ResourceTypeInfoProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getName();
+ }
+
+ @Override
+ public void setName(String rName) {
+ maybeInitBuilder();
+ if (rName == null) {
+ builder.clearName();
+ }
+ this.name = rName;
+ }
+
+ @Override
+ public String getDefaultUnit() {
+ if (this.defaultUnit != null) {
+ return this.defaultUnit;
+ }
+
+ YarnProtos.ResourceTypeInfoProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getUnits();
+ }
+
+ @Override
+ public void setDefaultUnit(String rUnits) {
+ maybeInitBuilder();
+ if (rUnits == null) {
+ builder.clearUnits();
+ }
+ this.defaultUnit = rUnits;
+ }
+
+ @Override
+ public ResourceTypes getResourceType() {
+ if (this.resourceTypes != null) {
+ return this.resourceTypes;
+ }
+
+ YarnProtos.ResourceTypeInfoProtoOrBuilder p = viaProto ? proto : builder;
+ return convertFromProtoFormat(p.getType());
+ }
+
+ @Override
+ public void setResourceType(ResourceTypes type) {
+ maybeInitBuilder();
+ if (type == null) {
+ builder.clearType();
+ }
+ this.resourceTypes = type;
+ }
+
+ public static ResourceTypesProto convertToProtoFormat(ResourceTypes e) {
+ return ResourceTypesProto.valueOf(e.name());
+ }
+
+ public static ResourceTypes convertFromProtoFormat(ResourceTypesProto e) {
+ return ResourceTypes.valueOf(e.name());
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java
index 0240fbcd59f..331be308e2e 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java
@@ -20,9 +20,15 @@
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
/**
* Common string manipulation helpers
@@ -174,4 +180,34 @@ private static void uappend(StringBuilder sb, String part) {
}
sb.append(part);
}
+
+ public static String getResourceSecondsString(Map targetMap) {
+ List strings = new ArrayList<>(targetMap.size());
+ //completed app report in the timeline server doesn't have usage report
+ Long memorySeconds = 0L;
+ Long vcoreSeconds = 0L;
+ if (targetMap.containsKey(ResourceInformation.MEMORY_MB.getName())) {
+ memorySeconds = targetMap.get(ResourceInformation.MEMORY_MB.getName());
+ }
+ if (targetMap.containsKey(ResourceInformation.VCORES.getName())) {
+ vcoreSeconds = targetMap.get(ResourceInformation.VCORES.getName());
+ }
+ strings.add(memorySeconds + " MB-seconds");
+ strings.add(vcoreSeconds + " vcore-seconds");
+ Map tmp = ResourceUtils.getResourceTypes();
+ if (targetMap.size() > 2) {
+ for (Map.Entry entry : targetMap.entrySet()) {
+ if (!entry.getKey().equals(ResourceInformation.MEMORY_MB.getName())
+ && !entry.getKey().equals(ResourceInformation.VCORES.getName())) {
+ String units = "";
+ if (tmp.containsKey(entry.getKey())) {
+ units = tmp.get(entry.getKey()).getUnits();
+ }
+ strings.add(entry.getValue() + " " + entry.getKey() + "-" + units
+ + "seconds");
+ }
+ }
+ }
+ return String.join(", ", strings);
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index bdf60bd9a5b..7f155e7a40e 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -123,8 +123,7 @@ public Resource multiplyAndNormalizeDown(Resource r, double by,
}
@Override
- public boolean fitsIn(Resource cluster,
- Resource smaller, Resource bigger) {
+ public boolean fitsIn(Resource smaller, Resource bigger) {
return smaller.getMemorySize() <= bigger.getMemorySize();
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 7697e1dfc33..ca828a5251b 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -22,136 +22,395 @@
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.Arrays;
/**
- * A {@link ResourceCalculator} which uses the concept of
+ * A {@link ResourceCalculator} which uses the concept of
* dominant resource to compare multi-dimensional resources.
*
- * Essentially the idea is that the in a multi-resource environment,
- * the resource allocation should be determined by the dominant share
- * of an entity (user or queue), which is the maximum share that the
- * entity has been allocated of any resource.
- *
- * In a nutshell, it seeks to maximize the minimum dominant share across
- * all entities.
- *
+ * Essentially the idea is that the in a multi-resource environment,
+ * the resource allocation should be determined by the dominant share
+ * of an entity (user or queue), which is the maximum share that the
+ * entity has been allocated of any resource.
+ *
+ * In a nutshell, it seeks to maximize the minimum dominant share across
+ * all entities.
+ *
* For example, if user A runs CPU-heavy tasks and user B runs
- * memory-heavy tasks, it attempts to equalize CPU share of user A
- * with Memory-share of user B.
- *
+ * memory-heavy tasks, it attempts to equalize CPU share of user A
+ * with Memory-share of user B.
+ *
* In the single resource case, it reduces to max-min fairness for that resource.
- *
+ *
* See the Dominant Resource Fairness paper for more details:
* www.cs.berkeley.edu/~matei/papers/2011/nsdi_drf.pdf
*/
@Private
@Unstable
public class DominantResourceCalculator extends ResourceCalculator {
- private static final Log LOG =
- LogFactory.getLog(DominantResourceCalculator.class);
+ static final Log LOG = LogFactory.getLog(DominantResourceCalculator.class);
+
+ public DominantResourceCalculator() {
+ }
+
+ /**
+ * Compare two resources - if the value for every resource type for the lhs
+ * is greater than that of the rhs, return 1. If the value for every resource
+ * type in the lhs is less than the rhs, return -1. Otherwise, return 0
+ *
+ * @param lhs resource to be compared
+ * @param rhs resource to be compared
+ * @return 0, 1, or -1
+ */
+ private int compare(Resource lhs, Resource rhs) {
+ boolean lhsGreater = false;
+ boolean rhsGreater = false;
+ int ret = 0;
+
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ ResourceInformation lhsResourceInformation = lhs
+ .getResourceInformation(i);
+ ResourceInformation rhsResourceInformation = rhs
+ .getResourceInformation(i);
+ int diff = lhsResourceInformation.compareTo(rhsResourceInformation);
+ if (diff >= 1) {
+ lhsGreater = true;
+ } else if (diff <= -1) {
+ rhsGreater = true;
+ }
+ }
+ if (lhsGreater && rhsGreater) {
+ ret = 0;
+ } else if (lhsGreater) {
+ ret = 1;
+ } else if (rhsGreater) {
+ ret = -1;
+ }
+ return ret;
+ }
@Override
public int compare(Resource clusterResource, Resource lhs, Resource rhs,
boolean singleType) {
-
if (lhs.equals(rhs)) {
return 0;
}
-
+
if (isInvalidDivisor(clusterResource)) {
- if ((lhs.getMemorySize() < rhs.getMemorySize() &&
- lhs.getVirtualCores() > rhs.getVirtualCores()) ||
- (lhs.getMemorySize() > rhs.getMemorySize() &&
- lhs.getVirtualCores() < rhs.getVirtualCores())) {
- return 0;
- } else if (lhs.getMemorySize() > rhs.getMemorySize()
- || lhs.getVirtualCores() > rhs.getVirtualCores()) {
- return 1;
- } else if (lhs.getMemorySize() < rhs.getMemorySize()
- || lhs.getVirtualCores() < rhs.getVirtualCores()) {
- return -1;
+ return this.compare(lhs, rhs);
+ }
+
+ // We have to calculate the shares for all resource types for both
+ // resources and then look for which resource has the biggest
+ // share overall.
+ ResourceInformation[] clusterRes = clusterResource.getResources();
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+
+ // If array creation shows up as a time sink, these arrays could be cached
+ // because they're always the same length.
+ double[] lhsShares = new double[maxLength];
+ double[] rhsShares = new double[maxLength];
+ double diff;
+
+ try {
+ if (singleType) {
+ double[] max = new double[2];
+
+ calculateShares(clusterRes, lhs, rhs, lhsShares, rhsShares, max);
+
+ diff = max[0] - max[1];
+ } else if (maxLength == 2) {
+ // Special case to handle the common scenario of only CPU and memory
+ // so that we can optimize for performance
+ diff = calculateSharesForTwoMandatoryResources(clusterRes, lhs, rhs,
+ lhsShares, rhsShares);
+ } else {
+ calculateShares(clusterRes, lhs, rhs, lhsShares, rhsShares);
+
+ Arrays.sort(lhsShares);
+ Arrays.sort(rhsShares);
+
+ diff = compareShares(lhsShares, rhsShares);
}
+ } catch (ArrayIndexOutOfBoundsException ex) {
+ StringWriter out = new StringWriter(); // No need to close a StringWriter
+ ex.printStackTrace(new PrintWriter(out));
+
+ LOG.error("A problem was encountered while calculating resource "
+ + "availability that should not occur under normal circumstances. "
+ + "Please report this error to the Hadoop community by opening a "
+ + "JIRA ticket at http://issues.apache.org/jira and including the "
+ + "following information:\n* Exception encountered: " + out + "* "
+ + "Cluster resources: " + Arrays.toString(clusterRes) + "\n* "
+ + "LHS resource: " + Arrays.toString(lhs.getResources()) + "\n* "
+ + "RHS resource: " + Arrays.toString(rhs.getResources()));
+ LOG.error("The resource manager is in an inconsistent state. It is safe "
+ + "for the resource manager to be restarted as the error encountered "
+ + "should be transitive. If high availability is enabled, failing "
+ + "over to a standby resource manager is also safe.");
+ throw new YarnRuntimeException("A problem was encountered while "
+ + "calculating resource availability that should not occur under "
+ + "normal circumstances. Please see the log for more information.",
+ ex);
}
- float l = getResourceAsValue(clusterResource, lhs, true);
- float r = getResourceAsValue(clusterResource, rhs, true);
-
- if (l < r) {
+ return (int) Math.signum(diff);
+ }
+
+ /**
+ * Calculate the shares for {@code first} and {@code second} according to
+ * {@code clusterRes}, and store the results in {@code firstShares} and
+ * {@code secondShares}, respectively. All parameters must be non-null.
+ * @param clusterRes the array of ResourceInformation instances that
+ * represents the cluster's maximum resources
+ * @param first the first resource to compare
+ * @param second the second resource to compare
+ * @param firstShares an array to store the shares for the first resource
+ * @param secondShares an array to store the shares for the second resource
+ * @return -1.0, 0.0, or 1.0, depending on whether the max share of the first
+ * resource is less than, equal to, or greater than the max share of the
+ * second resource, respectively
+ * @throws NullPointerException if any parameter is null
+ */
+ private void calculateShares(ResourceInformation[] clusterRes, Resource first,
+ Resource second, double[] firstShares, double[] secondShares) {
+ ResourceInformation[] firstRes = first.getResources();
+ ResourceInformation[] secondRes = second.getResources();
+
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ firstShares[i] = calculateShare(clusterRes[i], firstRes[i]);
+ secondShares[i] = calculateShare(clusterRes[i], secondRes[i]);
+ }
+ }
+
+ /**
+ * Calculate the shares for {@code first} and {@code second} according to
+ * {@code clusterRes}, and store the results in {@code firstShares} and
+ * {@code secondShares}, respectively. All parameters must be non-null.
+ * This method assumes that the length of {@code clusterRes} is exactly 2 and
+ * makes performance optimizations based on that assumption.
+ * @param clusterRes the array of ResourceInformation instances that
+ * represents the cluster's maximum resources
+ * @param first the first resource to compare
+ * @param second the second resource to compare
+ * @param firstShares an array to store the shares for the first resource
+ * @param secondShares an array to store the shares for the second resource
+ * @return -1.0, 0.0, or 1.0, depending on whether the max share of the first
+ * resource is less than, equal to, or greater than the max share of the
+ * second resource, respectively
+ * @throws NullPointerException if any parameter is null
+ */
+ private int calculateSharesForTwoMandatoryResources(
+ ResourceInformation[] clusterRes, Resource first, Resource second,
+ double[] firstShares, double[] secondShares) {
+ ResourceInformation[] firstRes = first.getResources();
+ ResourceInformation[] secondRes = second.getResources();
+ firstShares[0] = calculateShare(clusterRes[0], firstRes[0]);
+ secondShares[0] = calculateShare(clusterRes[0], secondRes[0]);
+ firstShares[1] = calculateShare(clusterRes[1], firstRes[1]);
+ secondShares[1] = calculateShare(clusterRes[1], secondRes[1]);
+
+ int firstDom = 0;
+ int firstSub = 1;
+ if (firstShares[1] > firstShares[0]) {
+ firstDom = 1;
+ firstSub = 0;
+ }
+ int secondDom = 0;
+ int secondSub = 1;
+ if (secondShares[1] > secondShares[0]) {
+ secondDom = 1;
+ secondSub = 0;
+ }
+
+ if (firstShares[firstDom] > secondShares[secondDom]) {
+ return 1;
+ } else if (firstShares[firstDom] < secondShares[secondDom]) {
return -1;
- } else if (l > r) {
+ } else if (firstShares[firstSub] > secondShares[secondSub]) {
return 1;
- } else if (!singleType) {
- l = getResourceAsValue(clusterResource, lhs, false);
- r = getResourceAsValue(clusterResource, rhs, false);
- if (l < r) {
- return -1;
- } else if (l > r) {
- return 1;
+ } else if (firstShares[firstSub] < secondShares[secondSub]) {
+ return -1;
+ } else {
+ return 0;
+ }
+ }
+
+ /**
+ * Calculate the shares for {@code first} and {@code second} according to
+ * {@code clusterRes}, and store the results in {@code firstShares} and
+ * {@code secondShares}, respectively. {@code max} will be populated with
+ * the max shares from {@code firstShare} and {@code secondShare} in the
+ * first and second indices, respectively. All parameters must be non-null,
+ * and {@code max} must have a length of at least 2.
+ * @param clusterRes the array of ResourceInformation instances that
+ * represents the cluster's maximum resources
+ * @param first the first resource to compare
+ * @param second the second resource to compare
+ * @param firstShares an array to store the shares for the first resource
+ * @param secondShares an array to store the shares for the second resource
+ * @param max an array to store the max shares of the first and second
+ * resources
+ * @return -1.0, 0.0, or 1.0, depending on whether the max share of the first
+ * resource is less than, equal to, or greater than the max share of the
+ * second resource, respectively
+ * @throws NullPointerException if any parameter is null
+ * @throws ArrayIndexOutOfBoundsException if the length of {@code max} is
+ * less than 2
+ */
+ private void calculateShares(ResourceInformation[] clusterRes, Resource first,
+ Resource second, double[] firstShares, double[] secondShares,
+ double[] max) {
+ ResourceInformation[] firstRes = first.getResources();
+ ResourceInformation[] secondRes = second.getResources();
+
+ max[0] = 0.0;
+ max[1] = 0.0;
+
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ firstShares[i] = calculateShare(clusterRes[i], firstRes[i]);
+ secondShares[i] = calculateShare(clusterRes[i], secondRes[i]);
+
+ if (firstShares[i] > max[0]) {
+ max[0] = firstShares[i];
+ }
+
+ if (secondShares[i] > max[1]) {
+ max[1] = secondShares[i];
}
}
-
- return 0;
}
/**
- * Use 'dominant' for now since we only have 2 resources - gives us a slight
- * performance boost.
- *
- * Once we add more resources, we'll need a more complicated (and slightly
- * less performant algorithm).
+ * Calculate the share for a resource type.
+ * @param clusterRes the resource type for the cluster maximum
+ * @param res the resource type for which to calculate the share
+ * @return the share
+ */
+ private double calculateShare(ResourceInformation clusterRes,
+ ResourceInformation res) {
+ // Convert the resources' units into the cluster resource's units
+ long value = UnitsConversionUtil.convert(res.getUnits(),
+ clusterRes.getUnits(), res.getValue());
+
+ return (double) value / clusterRes.getValue();
+ }
+
+ /**
+ * Compare the two shares arrays by comparing the largest elements, then the
+ * next largest if the previous were equal, etc. The share arrays must be
+ * sorted in ascending order.
+ * @param lhsShares the first share array to compare
+ * @param rhsShares the second share array to compare
+ * @return a number that is less than 0 if the first array is less than the
+ * second, equal to 0 if the arrays are equal, and greater than 0 if the
+ * first array is greater than the second
*/
- protected float getResourceAsValue(
- Resource clusterResource, Resource resource, boolean dominant) {
- // Just use 'dominant' resource
- return (dominant) ?
- Math.max(
- (float)resource.getMemorySize() / clusterResource.getMemorySize(),
- (float)resource.getVirtualCores() / clusterResource.getVirtualCores()
- )
- :
- Math.min(
- (float)resource.getMemorySize() / clusterResource.getMemorySize(),
- (float)resource.getVirtualCores() / clusterResource.getVirtualCores()
- );
- }
-
+ private double compareShares(double[] lhsShares, double[] rhsShares) {
+ double diff = 0.0;
+
+ // lhsShares and rhsShares must necessarily have the same length, because
+ // everyone uses the same master resource list.
+ for (int i = lhsShares.length - 1; i >= 0; i--) {
+ diff = lhsShares[i] - rhsShares[i];
+
+ if (diff != 0.0) {
+ break;
+ }
+ }
+
+ return diff;
+ }
+
@Override
- public long computeAvailableContainers(Resource available, Resource required) {
- return Math.min(
- available.getMemorySize() / required.getMemorySize(),
- available.getVirtualCores() / required.getVirtualCores());
+ public long computeAvailableContainers(Resource available,
+ Resource required) {
+ long min = Long.MAX_VALUE;
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ ResourceInformation availableResource = available
+ .getResourceInformation(i);
+ ResourceInformation requiredResource = required.getResourceInformation(i);
+ long requiredResourceValue = UnitsConversionUtil.convert(
+ requiredResource.getUnits(), availableResource.getUnits(),
+ requiredResource.getValue());
+ if (requiredResourceValue != 0) {
+ long tmp = availableResource.getValue() / requiredResourceValue;
+ min = min < tmp ? min : tmp;
+ }
+ }
+ return min > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) min;
}
@Override
- public float divide(Resource clusterResource,
+ public float divide(Resource clusterResource,
Resource numerator, Resource denominator) {
- return
- getResourceAsValue(clusterResource, numerator, true) /
- getResourceAsValue(clusterResource, denominator, true);
+ int nKnownResourceTypes = ResourceUtils.getNumberOfKnownResourceTypes();
+ ResourceInformation[] clusterRes = clusterResource.getResources();
+ // We have to provide the calculateShares() method with somewhere to store
+ // the shares. We don't actually need these shares afterwards.
+ double[] numeratorShares = new double[nKnownResourceTypes];
+ double[] denominatorShares = new double[nKnownResourceTypes];
+ // We also have to provide a place for calculateShares() to store the max
+ // shares so that we can use them.
+ double[] max = new double[2];
+
+ calculateShares(clusterRes, numerator, denominator, numeratorShares,
+ denominatorShares, max);
+
+ return (float) (max[0] / max[1]);
}
-
+
@Override
public boolean isInvalidDivisor(Resource r) {
- if (r.getMemorySize() == 0.0f || r.getVirtualCores() == 0.0f) {
- return true;
+ for (ResourceInformation res : r.getResources()) {
+ if (res.getValue() == 0L) {
+ return true;
+ }
}
return false;
}
@Override
public float ratio(Resource a, Resource b) {
- return Math.max(
- (float)a.getMemorySize()/b.getMemorySize(),
- (float)a.getVirtualCores()/b.getVirtualCores()
- );
+ float ratio = 0.0f;
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ ResourceInformation aResourceInformation = a.getResourceInformation(i);
+ ResourceInformation bResourceInformation = b.getResourceInformation(i);
+ long bResourceValue = UnitsConversionUtil.convert(
+ bResourceInformation.getUnits(), aResourceInformation.getUnits(),
+ bResourceInformation.getValue());
+ float tmp = (float) aResourceInformation.getValue()
+ / (float) bResourceValue;
+ ratio = ratio > tmp ? ratio : tmp;
+ }
+ return ratio;
}
@Override
public Resource divideAndCeil(Resource numerator, int denominator) {
- return Resources.createResource(
- divideAndCeil(numerator.getMemorySize(), denominator),
- divideAndCeil(numerator.getVirtualCores(), denominator)
- );
+ return divideAndCeil(numerator, (long) denominator);
+ }
+
+ public Resource divideAndCeil(Resource numerator, long denominator) {
+ Resource ret = Resource.newInstance(numerator);
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ ResourceInformation resourceInformation = ret.getResourceInformation(i);
+ resourceInformation
+ .setValue(divideAndCeil(resourceInformation.getValue(), denominator));
+ }
+ return ret;
}
@Override
@@ -164,80 +423,136 @@ public Resource divideAndCeil(Resource numerator, float denominator) {
@Override
public Resource normalize(Resource r, Resource minimumResource,
- Resource maximumResource, Resource stepFactor) {
- if (stepFactor.getMemorySize() == 0 || stepFactor.getVirtualCores() == 0) {
- Resource step = Resources.clone(stepFactor);
- if (stepFactor.getMemorySize() == 0) {
- LOG.error("Memory cannot be allocated in increments of zero. Assuming "
- + minimumResource.getMemorySize() + "MB increment size. "
- + "Please ensure the scheduler configuration is correct.");
- step.setMemorySize(minimumResource.getMemorySize());
- }
+ Resource maximumResource, Resource stepFactor) {
+ Resource ret = Resource.newInstance(r);
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ ResourceInformation rResourceInformation = r.getResourceInformation(i);
+ ResourceInformation minimumResourceInformation = minimumResource
+ .getResourceInformation(i);
+ ResourceInformation maximumResourceInformation = maximumResource
+ .getResourceInformation(i);
+ ResourceInformation stepFactorResourceInformation = stepFactor
+ .getResourceInformation(i);
+ ResourceInformation tmp = ret.getResourceInformation(i);
- if (stepFactor.getVirtualCores() == 0) {
- LOG.error("VCore cannot be allocated in increments of zero. Assuming "
- + minimumResource.getVirtualCores() + "VCores increment size. "
- + "Please ensure the scheduler configuration is correct.");
- step.setVirtualCores(minimumResource.getVirtualCores());
+ long rValue = rResourceInformation.getValue();
+ long minimumValue = UnitsConversionUtil.convert(
+ minimumResourceInformation.getUnits(),
+ rResourceInformation.getUnits(),
+ minimumResourceInformation.getValue());
+ long maximumValue = UnitsConversionUtil.convert(
+ maximumResourceInformation.getUnits(),
+ rResourceInformation.getUnits(),
+ maximumResourceInformation.getValue());
+ long stepFactorValue = UnitsConversionUtil.convert(
+ stepFactorResourceInformation.getUnits(),
+ rResourceInformation.getUnits(),
+ stepFactorResourceInformation.getValue());
+ long value = Math.max(rValue, minimumValue);
+ if (stepFactorValue != 0) {
+ value = roundUp(value, stepFactorValue);
}
-
- stepFactor = step;
+ tmp.setValue(Math.min(value, maximumValue));
+ ret.setResourceInformation(i, tmp);
}
-
- long normalizedMemory = Math.min(
- roundUp(
- Math.max(r.getMemorySize(), minimumResource.getMemorySize()),
- stepFactor.getMemorySize()),
- maximumResource.getMemorySize());
- int normalizedCores = Math.min(
- roundUp(
- Math.max(r.getVirtualCores(), minimumResource.getVirtualCores()),
- stepFactor.getVirtualCores()),
- maximumResource.getVirtualCores());
- return Resources.createResource(normalizedMemory,
- normalizedCores);
+ return ret;
}
@Override
public Resource roundUp(Resource r, Resource stepFactor) {
- return Resources.createResource(
- roundUp(r.getMemorySize(), stepFactor.getMemorySize()),
- roundUp(r.getVirtualCores(), stepFactor.getVirtualCores())
- );
+ return this.rounding(r, stepFactor, true);
}
@Override
public Resource roundDown(Resource r, Resource stepFactor) {
- return Resources.createResource(
- roundDown(r.getMemorySize(), stepFactor.getMemorySize()),
- roundDown(r.getVirtualCores(), stepFactor.getVirtualCores())
- );
+ return this.rounding(r, stepFactor, false);
+ }
+
+ private Resource rounding(Resource r, Resource stepFactor, boolean roundUp) {
+ Resource ret = Resource.newInstance(r);
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ ResourceInformation rResourceInformation = r.getResourceInformation(i);
+ ResourceInformation stepFactorResourceInformation = stepFactor
+ .getResourceInformation(i);
+
+ long rValue = rResourceInformation.getValue();
+ long stepFactorValue = UnitsConversionUtil.convert(
+ stepFactorResourceInformation.getUnits(),
+ rResourceInformation.getUnits(),
+ stepFactorResourceInformation.getValue());
+ long value = rValue;
+ if (stepFactorValue != 0) {
+ value = roundUp
+ ? roundUp(rValue, stepFactorValue)
+ : roundDown(rValue, stepFactorValue);
+ }
+ ResourceInformation.copy(rResourceInformation,
+ ret.getResourceInformation(i));
+ ret.getResourceInformation(i).setValue(value);
+ }
+ return ret;
}
@Override
public Resource multiplyAndNormalizeUp(Resource r, double by,
Resource stepFactor) {
- return Resources.createResource(
- roundUp((long) Math.ceil((float) (r.getMemorySize() * by)),
- stepFactor.getMemorySize()),
- roundUp((int) Math.ceil((float) (r.getVirtualCores() * by)),
- stepFactor.getVirtualCores()));
+ return this.multiplyAndNormalize(r, by, stepFactor, true);
}
@Override
public Resource multiplyAndNormalizeDown(Resource r, double by,
Resource stepFactor) {
- return Resources.createResource(
- roundDown((long) (r.getMemorySize() * by), stepFactor.getMemorySize()),
- roundDown((int) (r.getVirtualCores() * by),
- stepFactor.getVirtualCores()));
+ return this.multiplyAndNormalize(r, by, stepFactor, false);
+ }
+
+ private Resource multiplyAndNormalize(Resource r, double by,
+ Resource stepFactor, boolean roundUp) {
+ Resource ret = Resource.newInstance(r);
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ ResourceInformation rResourceInformation = r.getResourceInformation(i);
+ ResourceInformation stepFactorResourceInformation = stepFactor
+ .getResourceInformation(i);
+ ResourceInformation tmp = ret.getResourceInformation(i);
+
+ long rValue = rResourceInformation.getValue();
+ long stepFactorValue = UnitsConversionUtil.convert(
+ stepFactorResourceInformation.getUnits(),
+ rResourceInformation.getUnits(),
+ stepFactorResourceInformation.getValue());
+ long value;
+ if (stepFactorValue != 0) {
+ value = roundUp
+ ? roundUp((long) Math.ceil((float) (rValue * by)), stepFactorValue)
+ : roundDown((long) (rValue * by), stepFactorValue);
+ } else {
+ value = roundUp
+ ? (long) Math.ceil((float) (rValue * by))
+ : (long) (rValue * by);
+ }
+ tmp.setValue(value);
+ }
+ return ret;
}
@Override
- public boolean fitsIn(Resource cluster,
- Resource smaller, Resource bigger) {
- return smaller.getMemorySize() <= bigger.getMemorySize()
- && smaller.getVirtualCores() <= bigger.getVirtualCores();
+ public boolean fitsIn(Resource smaller, Resource bigger) {
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ ResourceInformation sResourceInformation = smaller
+ .getResourceInformation(i);
+ ResourceInformation bResourceInformation = bigger
+ .getResourceInformation(i);
+ long sResourceValue = UnitsConversionUtil.convert(
+ sResourceInformation.getUnits(), bResourceInformation.getUnits(),
+ sResourceInformation.getValue());
+ if (sResourceValue > bResourceInformation.getValue()) {
+ return false;
+ }
+ }
+ return true;
}
@Override
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 398dac50fa5..d59560fa24d 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -225,8 +225,7 @@ public abstract float divide(
/**
* Check if a smaller resource can be contained by bigger resource.
*/
- public abstract boolean fitsIn(Resource cluster,
- Resource smaller, Resource bigger);
+ public abstract boolean fitsIn(Resource smaller, Resource bigger);
/**
* Check if resource has any major resource types (which are all NodeManagers
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index a1d14fdce73..94857c4a5c6 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -18,104 +18,124 @@
package org.apache.hadoop.yarn.util.resource;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
-@InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"})
+/**
+ * Resources is a computation class which provides a set of apis to do
+ * mathematical operations on Resource object.
+ */
+@InterfaceAudience.LimitedPrivate({ "YARN", "MapReduce" })
@Unstable
public class Resources {
-
- // Java doesn't have const :(
- private static final Resource NONE = new Resource() {
- @Override
- @SuppressWarnings("deprecation")
- public int getMemory() {
- return 0;
- }
+ private static final Log LOG =
+ LogFactory.getLog(Resources.class);
- @Override
- public long getMemorySize() {
- return 0;
+ /**
+ * Helper class to create a resource with a fixed value for all resource
+ * types. For example, a NONE resource which returns 0 for any resource type.
+ */
+ @InterfaceAudience.Private
+ @Unstable
+ static class FixedValueResource extends Resource {
+
+ private final long resourceValue;
+ private String name;
+
+ /**
+ * Constructor for a fixed value resource.
+ * @param rName the name of the resource
+ * @param value the fixed value to be returned for all resource types
+ */
+ FixedValueResource(String rName, long value) {
+ this.resourceValue = value;
+ this.name = rName;
+ initResourceMap();
}
- @Override
- public void setMemorySize(long memory) {
- throw new RuntimeException("NONE cannot be modified!");
+ private int resourceValueToInt() {
+ if(this.resourceValue > Integer.MAX_VALUE) {
+ return Integer.MAX_VALUE;
+ }
+ return Long.valueOf(this.resourceValue).intValue();
}
@Override
@SuppressWarnings("deprecation")
- public void setMemory(int memory) {
- throw new RuntimeException("NONE cannot be modified!");
+ public int getMemory() {
+ return resourceValueToInt();
}
@Override
- public int getVirtualCores() {
- return 0;
+ public long getMemorySize() {
+ return this.resourceValue;
}
@Override
- public void setVirtualCores(int cores) {
- throw new RuntimeException("NONE cannot be modified!");
+ public void setMemory(int memory) {
+ throw new RuntimeException(name + " cannot be modified!");
}
@Override
- public int compareTo(Resource o) {
- long diff = 0 - o.getMemorySize();
- if (diff == 0) {
- diff = 0 - o.getVirtualCores();
- }
- return Long.signum(diff);
+ @SuppressWarnings("deprecation")
+ public void setMemorySize(long memory) {
+ throw new RuntimeException(name + " cannot be modified!");
}
-
- };
-
- private static final Resource UNBOUNDED = new Resource() {
@Override
- @SuppressWarnings("deprecation")
- public int getMemory() {
- return Integer.MAX_VALUE;
+ public int getVirtualCores() {
+ return resourceValueToInt();
}
@Override
- public long getMemorySize() {
- return Long.MAX_VALUE;
+ public void setVirtualCores(int virtualCores) {
+ throw new RuntimeException(name + " cannot be modified!");
}
@Override
- @SuppressWarnings("deprecation")
- public void setMemory(int memory) {
- throw new RuntimeException("UNBOUNDED cannot be modified!");
+ public void setResourceInformation(int index,
+ ResourceInformation resourceInformation)
+ throws ResourceNotFoundException {
+ throw new RuntimeException(name + " cannot be modified!");
}
@Override
- public void setMemorySize(long memory) {
- throw new RuntimeException("UNBOUNDED cannot be modified!");
+ public void setResourceValue(int index, long value)
+ throws ResourceNotFoundException {
+ throw new RuntimeException(name + " cannot be modified!");
}
@Override
- public int getVirtualCores() {
- return Integer.MAX_VALUE;
+ public void setResourceInformation(String resource,
+ ResourceInformation resourceInformation)
+ throws ResourceNotFoundException {
+ throw new RuntimeException(name + " cannot be modified!");
}
@Override
- public void setVirtualCores(int cores) {
- throw new RuntimeException("UNBOUNDED cannot be modified!");
+ public void setResourceValue(String resource, long value)
+ throws ResourceNotFoundException {
+ throw new RuntimeException(name + " cannot be modified!");
}
- @Override
- public int compareTo(Resource o) {
- long diff = Long.MAX_VALUE - o.getMemorySize();
- if (diff == 0) {
- diff = Integer.MAX_VALUE - o.getVirtualCores();
+ private void initResourceMap() {
+ ResourceInformation[] types = ResourceUtils.getResourceTypesArray();
+ if (types != null) {
+ resources = new ResourceInformation[types.length];
+ for (int index = 0; index < types.length; index++) {
+ resources[index] = ResourceInformation.newInstance(types[index]);
+ resources[index].setValue(resourceValue);
+ }
}
- return Long.signum(diff);
}
-
- };
+ }
public static Resource createResource(int memory) {
return createResource(memory, (memory > 0) ? 1 : 0);
@@ -125,6 +145,11 @@ public static Resource createResource(int memory, int cores) {
return Resource.newInstance(memory, cores);
}
+ private static final Resource UNBOUNDED =
+ new FixedValueResource("UNBOUNDED", Long.MAX_VALUE);
+
+ private static final Resource NONE = new FixedValueResource("NONE", 0L);
+
public static Resource createResource(long memory) {
return createResource(memory, (memory > 0) ? 1 : 0);
}
@@ -152,12 +177,26 @@ public static Resource unbounded() {
}
public static Resource clone(Resource res) {
- return createResource(res.getMemorySize(), res.getVirtualCores());
+ return Resource.newInstance(res);
}
public static Resource addTo(Resource lhs, Resource rhs) {
- lhs.setMemorySize(lhs.getMemorySize() + rhs.getMemorySize());
- lhs.setVirtualCores(lhs.getVirtualCores() + rhs.getVirtualCores());
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ try {
+ ResourceInformation rhsValue = rhs.getResourceInformation(i);
+ ResourceInformation lhsValue = lhs.getResourceInformation(i);
+
+ long convertedRhs = (rhsValue.getUnits().equals(lhsValue.getUnits()))
+ ? rhsValue.getValue()
+ : UnitsConversionUtil.convert(rhsValue.getUnits(),
+ lhsValue.getUnits(), rhsValue.getValue());
+ lhs.setResourceValue(i, lhsValue.getValue() + convertedRhs);
+ } catch (ResourceNotFoundException ye) {
+ LOG.warn("Resource is missing:" + ye.getMessage());
+ continue;
+ }
+ }
return lhs;
}
@@ -166,8 +205,22 @@ public static Resource add(Resource lhs, Resource rhs) {
}
public static Resource subtractFrom(Resource lhs, Resource rhs) {
- lhs.setMemorySize(lhs.getMemorySize() - rhs.getMemorySize());
- lhs.setVirtualCores(lhs.getVirtualCores() - rhs.getVirtualCores());
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ try {
+ ResourceInformation rhsValue = rhs.getResourceInformation(i);
+ ResourceInformation lhsValue = lhs.getResourceInformation(i);
+
+ long convertedRhs = (rhsValue.getUnits().equals(lhsValue.getUnits()))
+ ? rhsValue.getValue()
+ : UnitsConversionUtil.convert(rhsValue.getUnits(),
+ lhsValue.getUnits(), rhsValue.getValue());
+ lhs.setResourceValue(i, lhsValue.getValue() - convertedRhs);
+ } catch (ResourceNotFoundException ye) {
+ LOG.warn("Resource is missing:" + ye.getMessage());
+ continue;
+ }
+ }
return lhs;
}
@@ -198,8 +251,16 @@ public static Resource negate(Resource resource) {
}
public static Resource multiplyTo(Resource lhs, double by) {
- lhs.setMemorySize((long)(lhs.getMemorySize() * by));
- lhs.setVirtualCores((int)(lhs.getVirtualCores() * by));
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ try {
+ ResourceInformation lhsValue = lhs.getResourceInformation(i);
+ lhs.setResourceValue(i, (long) (lhsValue.getValue() * by));
+ } catch (ResourceNotFoundException ye) {
+ LOG.warn("Resource is missing:" + ye.getMessage());
+ continue;
+ }
+ }
return lhs;
}
@@ -213,9 +274,24 @@ public static Resource multiply(Resource lhs, double by) {
*/
public static Resource multiplyAndAddTo(
Resource lhs, Resource rhs, double by) {
- lhs.setMemorySize(lhs.getMemorySize() + (long)(rhs.getMemorySize() * by));
- lhs.setVirtualCores(lhs.getVirtualCores()
- + (int)(rhs.getVirtualCores() * by));
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ try {
+ ResourceInformation rhsValue = rhs.getResourceInformation(i);
+ ResourceInformation lhsValue = lhs.getResourceInformation(i);
+
+ long convertedRhs = (long) (((rhsValue.getUnits()
+ .equals(lhsValue.getUnits()))
+ ? rhsValue.getValue()
+ : UnitsConversionUtil.convert(rhsValue.getUnits(),
+ lhsValue.getUnits(), rhsValue.getValue()))
+ * by);
+ lhs.setResourceValue(i, lhsValue.getValue() + convertedRhs);
+ } catch (ResourceNotFoundException ye) {
+ LOG.warn("Resource is missing:" + ye.getMessage());
+ continue;
+ }
+ }
return lhs;
}
@@ -231,8 +307,16 @@ public static Resource multiplyAndNormalizeDown(
public static Resource multiplyAndRoundDown(Resource lhs, double by) {
Resource out = clone(lhs);
- out.setMemorySize((long)(lhs.getMemorySize() * by));
- out.setVirtualCores((int)(lhs.getVirtualCores() * by));
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ try {
+ ResourceInformation lhsValue = lhs.getResourceInformation(i);
+ out.setResourceValue(i, (long) (lhsValue.getValue() * by));
+ } catch (ResourceNotFoundException ye) {
+ LOG.warn("Resource is missing:" + ye.getMessage());
+ continue;
+ }
+ }
return out;
}
@@ -332,23 +416,78 @@ public static Resource max(
}
public static boolean fitsIn(Resource smaller, Resource bigger) {
- return smaller.getMemorySize() <= bigger.getMemorySize() &&
- smaller.getVirtualCores() <= bigger.getVirtualCores();
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ try {
+ ResourceInformation rhsValue = bigger.getResourceInformation(i);
+ ResourceInformation lhsValue = smaller.getResourceInformation(i);
+
+ long convertedRhs = (rhsValue.getUnits().equals(lhsValue.getUnits()))
+ ? rhsValue.getValue()
+ : UnitsConversionUtil.convert(rhsValue.getUnits(),
+ lhsValue.getUnits(), rhsValue.getValue());
+ if (lhsValue.getValue() > convertedRhs) {
+ return false;
+ }
+ } catch (ResourceNotFoundException ye) {
+ LOG.warn("Resource is missing:" + ye.getMessage());
+ continue;
+ }
+ }
+ return true;
}
- public static boolean fitsIn(ResourceCalculator rc, Resource cluster,
+ public static boolean fitsIn(ResourceCalculator rc,
Resource smaller, Resource bigger) {
- return rc.fitsIn(cluster, smaller, bigger);
+ return rc.fitsIn(smaller, bigger);
}
public static Resource componentwiseMin(Resource lhs, Resource rhs) {
- return createResource(Math.min(lhs.getMemorySize(), rhs.getMemorySize()),
- Math.min(lhs.getVirtualCores(), rhs.getVirtualCores()));
+ Resource ret = createResource(0);
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ try {
+ ResourceInformation rhsValue = rhs.getResourceInformation(i);
+ ResourceInformation lhsValue = lhs.getResourceInformation(i);
+
+ long convertedRhs = (rhsValue.getUnits().equals(lhsValue.getUnits()))
+ ? rhsValue.getValue()
+ : UnitsConversionUtil.convert(rhsValue.getUnits(),
+ lhsValue.getUnits(), rhsValue.getValue());
+ ResourceInformation outInfo = lhsValue.getValue() < convertedRhs
+ ? lhsValue
+ : rhsValue;
+ ret.setResourceInformation(i, outInfo);
+ } catch (ResourceNotFoundException ye) {
+ LOG.warn("Resource is missing:" + ye.getMessage());
+ continue;
+ }
+ }
+ return ret;
}
public static Resource componentwiseMax(Resource lhs, Resource rhs) {
- return createResource(Math.max(lhs.getMemorySize(), rhs.getMemorySize()),
- Math.max(lhs.getVirtualCores(), rhs.getVirtualCores()));
+ Resource ret = createResource(0);
+ int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
+ for (int i = 0; i < maxLength; i++) {
+ try {
+ ResourceInformation rhsValue = rhs.getResourceInformation(i);
+ ResourceInformation lhsValue = lhs.getResourceInformation(i);
+
+ long convertedRhs = (rhsValue.getUnits().equals(lhsValue.getUnits()))
+ ? rhsValue.getValue()
+ : UnitsConversionUtil.convert(rhsValue.getUnits(),
+ lhsValue.getUnits(), rhsValue.getValue());
+ ResourceInformation outInfo = lhsValue.getValue() > convertedRhs
+ ? lhsValue
+ : rhsValue;
+ ret.setResourceInformation(i, outInfo);
+ } catch (ResourceNotFoundException ye) {
+ LOG.warn("Resource is missing:" + ye.getMessage());
+ continue;
+ }
+ }
+ return ret;
}
public static boolean isAnyMajorResourceZero(ResourceCalculator rc,
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index ccfe10a6100..4c5c0ea0e25 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3404,4 +3404,14 @@
yarn.scheduler.configuration.zk-store.parent-path
/confstore
+
+
+
+ yarn.resource-types
+
+
+ The resource types to be used for scheduling. Use resource-types.xml
+ to specify details about the individual resource types.
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
index 82170b31342..86946518db3 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/BasePBImplRecordsTest.java
@@ -37,6 +37,9 @@
@SuppressWarnings("checkstyle:visibilitymodifier")
protected static HashMap typeValueCache =
new HashMap();
+ @SuppressWarnings("checkstyle:visibilitymodifier")
+ protected static HashMap> excludedPropertiesMap =
+ new HashMap<>();
private static Random rand = new Random();
private static byte [] bytes = new byte[] {'1', '2', '3', '4'};
@@ -167,6 +170,10 @@ public String toString() {
private Map getGetSetPairs(Class recordClass)
throws Exception {
Map ret = new HashMap();
+ List excluded = null;
+ if (excludedPropertiesMap.containsKey(recordClass.getClass())) {
+ excluded = excludedPropertiesMap.get(recordClass.getClass());
+ }
Method [] methods = recordClass.getDeclaredMethods();
// get all get methods
for (int i = 0; i < methods.length; i++) {
@@ -224,6 +231,11 @@ public String toString() {
(gsp.setMethod == null)) {
LOG.info(String.format("Exclude potential property: %s\n", gsp.propertyName));
itr.remove();
+ } else if ((excluded != null && excluded.contains(gsp.propertyName))) {
+ LOG.info(String.format(
+ "Excluding potential property(present in exclusion list): %s\n",
+ gsp.propertyName));
+ itr.remove();
} else {
LOG.info(String.format("New property: %s type: %s", gsp.toString(), gsp.type));
gsp.testValue = genTypeValue(gsp.type);
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index a3f5491cdc0..8c6312e51bd 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -42,6 +42,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterResponsePBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoRequestPBImpl;
+import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetAllResourceTypeInfoResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptReportResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.GetApplicationAttemptsRequestPBImpl;
@@ -139,9 +141,11 @@
import org.apache.hadoop.yarn.api.records.ReservationRequests;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceAllocationRequest;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
import org.apache.hadoop.yarn.api.records.ResourceUtilization;
import org.apache.hadoop.yarn.api.records.SerializedException;
import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
@@ -180,12 +184,14 @@
import org.apache.hadoop.yarn.api.records.impl.pb.ResourceOptionPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ResourceRequestPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourceTypeInfoPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.SerializedExceptionPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.StrictPreemptionContractPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.URLPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.UpdateContainerRequestPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.YarnClusterMetricsPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptReportProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
@@ -332,6 +338,7 @@
import org.junit.Test;
import com.google.common.collect.ImmutableSet;
+import java.util.Arrays;
/**
* Test class for YARN API protocol records.
@@ -346,6 +353,8 @@ public static void setup() throws Exception {
typeValueCache.put(SerializedException.class,
SerializedException.newInstance(new IOException("exception for test")));
generateByNewInstance(ExecutionTypeRequest.class);
+ typeValueCache.put(ResourceInformation.class, ResourceInformation
+ .newInstance("localhost.test/sample", 1l));
generateByNewInstance(LogAggregationContext.class);
generateByNewInstance(ApplicationId.class);
generateByNewInstance(ApplicationAttemptId.class);
@@ -408,6 +417,7 @@ public static void setup() throws Exception {
generateByNewInstance(ApplicationTimeout.class);
generateByNewInstance(QueueConfigurations.class);
generateByNewInstance(CollectorInfo.class);
+ generateByNewInstance(ResourceTypeInfo.class);
}
@Test
@@ -731,6 +741,8 @@ public void testApplicationReportPBImpl() throws Exception {
@Test
public void testApplicationResourceUsageReportPBImpl() throws Exception {
+ excludedPropertiesMap.put(ApplicationResourceUsageReportPBImpl.class.getClass(),
+ Arrays.asList("PreemptedResourceSecondsMap", "ResourceSecondsMap"));
validatePBImplRecord(ApplicationResourceUsageReportPBImpl.class,
ApplicationResourceUsageReportProto.class);
}
@@ -1153,4 +1165,22 @@ public void testExecutionTypeRequestPBImpl() throws Exception {
validatePBImplRecord(ExecutionTypeRequestPBImpl.class,
ExecutionTypeRequestProto.class);
}
+
+ @Test
+ public void testResourceTypesInfoPBImpl() throws Exception {
+ validatePBImplRecord(ResourceTypeInfoPBImpl.class,
+ YarnProtos.ResourceTypeInfoProto.class);
+ }
+
+ @Test
+ public void testGetAllResourceTypesInfoRequestPBImpl() throws Exception {
+ validatePBImplRecord(GetAllResourceTypeInfoRequestPBImpl.class,
+ YarnServiceProtos.GetAllResourceTypeInfoRequestProto.class);
+ }
+
+ @Test
+ public void testGetAllResourceTypesInfoResponsePBImpl() throws Exception {
+ validatePBImplRecord(GetAllResourceTypeInfoResponsePBImpl.class,
+ YarnServiceProtos.GetAllResourceTypeInfoResponseProto.class);
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestResourcePBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestResourcePBImpl.java
new file mode 100644
index 00000000000..569a7b74f8b
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestResourcePBImpl.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test class to handle various proto related tests for resources.
+ */
+public class TestResourcePBImpl {
+ @Test
+ public void testEmptyResourcePBInit() throws Exception {
+ Resource res = new ResourcePBImpl();
+ // Assert to check it sets resource value and unit to default.
+ Assert.assertEquals(0, res.getMemorySize());
+ Assert.assertEquals(ResourceInformation.MEMORY_MB.getUnits(),
+ res.getResourceInformation(ResourceInformation.MEMORY_MB.getName())
+ .getUnits());
+ Assert.assertEquals(ResourceInformation.VCORES.getUnits(),
+ res.getResourceInformation(ResourceInformation.VCORES.getName())
+ .getUnits());
+ }
+
+ @Test
+ public void testResourcePBInitFromOldPB() throws Exception {
+ YarnProtos.ResourceProto proto =
+ YarnProtos.ResourceProto.newBuilder().setMemory(1024).setVirtualCores(3)
+ .build();
+ // Assert to check it sets resource value and unit to default.
+ Resource res = new ResourcePBImpl(proto);
+ Assert.assertEquals(1024, res.getMemorySize());
+ Assert.assertEquals(3, res.getVirtualCores());
+ Assert.assertEquals(ResourceInformation.MEMORY_MB.getUnits(),
+ res.getResourceInformation(ResourceInformation.MEMORY_MB.getName())
+ .getUnits());
+ Assert.assertEquals(ResourceInformation.VCORES.getUnits(),
+ res.getResourceInformation(ResourceInformation.VCORES.getName())
+ .getUnits());
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
index b123b0520d4..5f3ed196048 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceCalculator.java
@@ -21,15 +21,20 @@
import java.util.Arrays;
import java.util.Collection;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.junit.Assert;
+import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
+import static org.junit.Assert.assertEquals;
+
@RunWith(Parameterized.class)
public class TestResourceCalculator {
- private ResourceCalculator resourceCalculator;
+ private final ResourceCalculator resourceCalculator;
@Parameterized.Parameters
public static Collection getParameters() {
@@ -38,41 +43,199 @@
{ new DominantResourceCalculator() } });
}
+ @Before
+ public void setupNoExtraResource() {
+ // This has to run before each test because we don't know when
+ // setupExtraResource() might be called
+ ResourceUtils.resetResourceTypes(new Configuration());
+ }
+
+ private static void setupExtraResource() {
+ Configuration conf = new Configuration();
+
+ conf.set(YarnConfiguration.RESOURCE_TYPES, "test");
+ ResourceUtils.resetResourceTypes(conf);
+ }
+
public TestResourceCalculator(ResourceCalculator rs) {
this.resourceCalculator = rs;
}
@Test(timeout = 10000)
public void testFitsIn() {
- Resource cluster = Resource.newInstance(1024, 1);
if (resourceCalculator instanceof DefaultResourceCalculator) {
- Assert.assertTrue(resourceCalculator.fitsIn(cluster,
+ Assert.assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(2, 1)));
- Assert.assertTrue(resourceCalculator.fitsIn(cluster,
+ Assert.assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(2, 2)));
- Assert.assertTrue(resourceCalculator.fitsIn(cluster,
+ Assert.assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(1, 2)));
- Assert.assertTrue(resourceCalculator.fitsIn(cluster,
+ Assert.assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(1, 1)));
- Assert.assertFalse(resourceCalculator.fitsIn(cluster,
+ Assert.assertFalse(resourceCalculator.fitsIn(
Resource.newInstance(2, 1), Resource.newInstance(1, 2)));
} else if (resourceCalculator instanceof DominantResourceCalculator) {
- Assert.assertFalse(resourceCalculator.fitsIn(cluster,
+ Assert.assertFalse(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(2, 1)));
- Assert.assertTrue(resourceCalculator.fitsIn(cluster,
+ Assert.assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(2, 2)));
- Assert.assertTrue(resourceCalculator.fitsIn(cluster,
+ Assert.assertTrue(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(1, 2)));
- Assert.assertFalse(resourceCalculator.fitsIn(cluster,
+ Assert.assertFalse(resourceCalculator.fitsIn(
Resource.newInstance(1, 2), Resource.newInstance(1, 1)));
- Assert.assertFalse(resourceCalculator.fitsIn(cluster,
+ Assert.assertFalse(resourceCalculator.fitsIn(
Resource.newInstance(2, 1), Resource.newInstance(1, 2)));
}
}
+ private Resource newResource(long memory, int cpu) {
+ Resource res = Resource.newInstance(memory, cpu);
+
+ return res;
+ }
+
+ private Resource newResource(long memory, int cpu, int test) {
+ Resource res = newResource(memory, cpu);
+
+ res.setResourceValue("test", test);
+
+ return res;
+ }
+
+ /**
+ * Test that the compare() method returns the expected result (0, -1, or 1).
+ * If the expected result is not 0, this method will also test the resources
+ * in the opposite order and check for the negative of the expected result.
+ *
+ * @param cluster the cluster resource
+ * @param res1 the LHS resource
+ * @param res2 the RHS resource
+ * @param expected the expected result
+ */
+ private void assertComparison(Resource cluster, Resource res1, Resource res2,
+ int expected) {
+ int actual = resourceCalculator.compare(cluster, res1, res2);
+
+ assertEquals(String.format("Resource comparison did not give the expected "
+ + "result for %s v/s %s", res1.toString(), res2.toString()),
+ expected, actual);
+
+ if (expected != 0) {
+ // Try again with args in the opposite order and the negative of the
+ // expected result.
+ actual = resourceCalculator.compare(cluster, res2, res1);
+ assertEquals(String.format("Resource comparison did not give the "
+ + "expected result for %s v/s %s", res2.toString(), res1.toString()),
+ expected * -1, actual);
+ }
+ }
+
+ @Test
+ public void testCompareWithOnlyMandatory() {
+ // This test is necessary because there are optimizations that are only
+ // triggered when only the mandatory resources are configured.
+
+ // Keep cluster resources even so that the numbers are easy to understand
+ Resource cluster = newResource(4, 4);
+
+ assertComparison(cluster, newResource(1, 1), newResource(1, 1), 0);
+ assertComparison(cluster, newResource(0, 0), newResource(0, 0), 0);
+ assertComparison(cluster, newResource(2, 2), newResource(1, 1), 1);
+ assertComparison(cluster, newResource(2, 2), newResource(0, 0), 1);
+
+ if (resourceCalculator instanceof DefaultResourceCalculator) {
+ testCompareDefaultWithOnlyMandatory(cluster);
+ } else if (resourceCalculator instanceof DominantResourceCalculator) {
+ testCompareDominantWithOnlyMandatory(cluster);
+ }
+ }
+
+ private void testCompareDefaultWithOnlyMandatory(Resource cluster) {
+ assertComparison(cluster, newResource(1, 1), newResource(1, 1), 0);
+ assertComparison(cluster, newResource(1, 2), newResource(1, 1), 0);
+ assertComparison(cluster, newResource(1, 1), newResource(1, 0), 0);
+ assertComparison(cluster, newResource(2, 1), newResource(1, 1), 1);
+ assertComparison(cluster, newResource(2, 1), newResource(1, 2), 1);
+ assertComparison(cluster, newResource(2, 1), newResource(1, 0), 1);
+ }
+
+ private void testCompareDominantWithOnlyMandatory(Resource cluster) {
+ assertComparison(cluster, newResource(2, 1), newResource(2, 1), 0);
+ assertComparison(cluster, newResource(2, 1), newResource(1, 2), 0);
+ assertComparison(cluster, newResource(2, 1), newResource(1, 1), 1);
+ assertComparison(cluster, newResource(2, 2), newResource(2, 1), 1);
+ assertComparison(cluster, newResource(2, 2), newResource(1, 2), 1);
+ assertComparison(cluster, newResource(3, 1), newResource(3, 0), 1);
+ }
+
+ @Test
+ public void testCompare() {
+ // Test with 3 resources
+ setupExtraResource();
+
+ // Keep cluster resources even so that the numbers are easy to understand
+ Resource cluster = newResource(4L, 4, 4);
+
+ assertComparison(cluster, newResource(1, 1, 1), newResource(1, 1, 1), 0);
+ assertComparison(cluster, newResource(0, 0, 0), newResource(0, 0, 0), 0);
+ assertComparison(cluster, newResource(2, 2, 2), newResource(1, 1, 1), 1);
+ assertComparison(cluster, newResource(2, 2, 2), newResource(0, 0, 0), 1);
+
+ if (resourceCalculator instanceof DefaultResourceCalculator) {
+ testCompareDefault(cluster);
+ } else if (resourceCalculator instanceof DominantResourceCalculator) {
+ testCompareDominant(cluster);
+ }
+ }
+
+ private void testCompareDefault(Resource cluster) {
+ assertComparison(cluster, newResource(1, 1, 2), newResource(1, 1, 1), 0);
+ assertComparison(cluster, newResource(1, 2, 1), newResource(1, 1, 1), 0);
+ assertComparison(cluster, newResource(1, 2, 2), newResource(1, 1, 1), 0);
+ assertComparison(cluster, newResource(1, 2, 2), newResource(1, 0, 0), 0);
+ assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 1), 1);
+ assertComparison(cluster, newResource(2, 1, 1), newResource(1, 2, 1), 1);
+ assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 2), 1);
+ assertComparison(cluster, newResource(2, 1, 1), newResource(1, 2, 2), 1);
+ assertComparison(cluster, newResource(2, 1, 1), newResource(1, 0, 0), 1);
+ }
+
+ private void testCompareDominant(Resource cluster) {
+ assertComparison(cluster, newResource(2, 1, 1), newResource(2, 1, 1), 0);
+ assertComparison(cluster, newResource(2, 1, 1), newResource(1, 2, 1), 0);
+ assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 2), 0);
+ assertComparison(cluster, newResource(2, 1, 0), newResource(0, 1, 2), 0);
+ assertComparison(cluster, newResource(2, 2, 1), newResource(1, 2, 2), 0);
+ assertComparison(cluster, newResource(2, 2, 1), newResource(2, 1, 2), 0);
+ assertComparison(cluster, newResource(2, 2, 1), newResource(2, 2, 1), 0);
+ assertComparison(cluster, newResource(2, 2, 0), newResource(2, 0, 2), 0);
+ assertComparison(cluster, newResource(3, 2, 1), newResource(3, 2, 1), 0);
+ assertComparison(cluster, newResource(3, 2, 1), newResource(3, 1, 2), 0);
+ assertComparison(cluster, newResource(3, 2, 1), newResource(1, 2, 3), 0);
+ assertComparison(cluster, newResource(3, 2, 1), newResource(1, 3, 2), 0);
+ assertComparison(cluster, newResource(3, 2, 1), newResource(2, 1, 3), 0);
+ assertComparison(cluster, newResource(3, 2, 1), newResource(2, 3, 1), 0);
+ assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 1), 1);
+ assertComparison(cluster, newResource(2, 1, 1), newResource(1, 1, 0), 1);
+ assertComparison(cluster, newResource(2, 2, 1), newResource(2, 1, 1), 1);
+ assertComparison(cluster, newResource(2, 2, 1), newResource(1, 2, 1), 1);
+ assertComparison(cluster, newResource(2, 2, 1), newResource(1, 1, 2), 1);
+ assertComparison(cluster, newResource(2, 2, 1), newResource(0, 2, 2), 1);
+ assertComparison(cluster, newResource(2, 2, 2), newResource(2, 1, 1), 1);
+ assertComparison(cluster, newResource(2, 2, 2), newResource(1, 2, 1), 1);
+ assertComparison(cluster, newResource(2, 2, 2), newResource(1, 1, 2), 1);
+ assertComparison(cluster, newResource(2, 2, 2), newResource(2, 2, 1), 1);
+ assertComparison(cluster, newResource(2, 2, 2), newResource(2, 1, 2), 1);
+ assertComparison(cluster, newResource(2, 2, 2), newResource(1, 2, 2), 1);
+ assertComparison(cluster, newResource(3, 2, 1), newResource(2, 2, 2), 1);
+ assertComparison(cluster, newResource(3, 1, 1), newResource(2, 2, 2), 1);
+ assertComparison(cluster, newResource(3, 1, 1), newResource(3, 1, 0), 1);
+ assertComparison(cluster, newResource(3, 1, 1), newResource(3, 0, 0), 1);
+ }
+
@Test(timeout = 10000)
- public void testResourceCalculatorCompareMethod() {
+ public void testCompareWithEmptyCluster() {
Resource clusterResource = Resource.newInstance(0, 0);
// For lhs == rhs
@@ -126,27 +289,27 @@ private void assertResourcesOperations(Resource clusterResource,
boolean greaterThan, boolean greaterThanOrEqual, Resource max,
Resource min) {
- Assert.assertEquals("Less Than operation is wrongly calculated.", lessThan,
+ assertEquals("Less Than operation is wrongly calculated.", lessThan,
Resources.lessThan(resourceCalculator, clusterResource, lhs, rhs));
- Assert.assertEquals(
+ assertEquals(
"Less Than Or Equal To operation is wrongly calculated.",
lessThanOrEqual, Resources.lessThanOrEqual(resourceCalculator,
clusterResource, lhs, rhs));
- Assert.assertEquals("Greater Than operation is wrongly calculated.",
+ assertEquals("Greater Than operation is wrongly calculated.",
greaterThan,
Resources.greaterThan(resourceCalculator, clusterResource, lhs, rhs));
- Assert.assertEquals(
+ assertEquals(
"Greater Than Or Equal To operation is wrongly calculated.",
greaterThanOrEqual, Resources.greaterThanOrEqual(resourceCalculator,
clusterResource, lhs, rhs));
- Assert.assertEquals("Max(value) Operation wrongly calculated.", max,
+ assertEquals("Max(value) Operation wrongly calculated.", max,
Resources.max(resourceCalculator, clusterResource, lhs, rhs));
- Assert.assertEquals("Min(value) operation is wrongly calculated.", min,
+ assertEquals("Min(value) operation is wrongly calculated.", min,
Resources.min(resourceCalculator, clusterResource, lhs, rhs));
}
@@ -164,13 +327,13 @@ public void testNormalize() {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
- Assert.assertEquals(2 * 1024, result.getMemorySize());
+ assertEquals(2 * 1024, result.getMemorySize());
} else if (resourceCalculator instanceof DominantResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
- Assert.assertEquals(2 * 1024, result.getMemorySize());
- Assert.assertEquals(4, result.getVirtualCores());
+ assertEquals(2 * 1024, result.getMemorySize());
+ assertEquals(4, result.getVirtualCores());
}
// if resources asked are less than minimum resource, then normalize it to
@@ -183,13 +346,13 @@ public void testNormalize() {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
- Assert.assertEquals(2 * 1024, result.getMemorySize());
+ assertEquals(2 * 1024, result.getMemorySize());
} else if (resourceCalculator instanceof DominantResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
- Assert.assertEquals(2 * 1024, result.getMemorySize());
- Assert.assertEquals(2, result.getVirtualCores());
+ assertEquals(2 * 1024, result.getMemorySize());
+ assertEquals(2, result.getVirtualCores());
}
// if resources asked are larger than maximum resource, then normalize it to
@@ -202,13 +365,13 @@ public void testNormalize() {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
- Assert.assertEquals(8 * 1024, result.getMemorySize());
+ assertEquals(8 * 1024, result.getMemorySize());
} else if (resourceCalculator instanceof DominantResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
- Assert.assertEquals(8 * 1024, result.getMemorySize());
- Assert.assertEquals(8, result.getVirtualCores());
+ assertEquals(8 * 1024, result.getMemorySize());
+ assertEquals(8, result.getVirtualCores());
}
// if increment is 0, use minimum resource as the increment resource.
@@ -220,13 +383,13 @@ public void testNormalize() {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
- Assert.assertEquals(2 * 1024, result.getMemorySize());
+ assertEquals(2 * 1024, result.getMemorySize());
} else if (resourceCalculator instanceof DominantResourceCalculator) {
Resource result = Resources.normalize(resourceCalculator,
ask, min, max, increment);
- Assert.assertEquals(2 * 1024, result.getMemorySize());
- Assert.assertEquals(2, result.getVirtualCores());
+ assertEquals(2 * 1024, result.getMemorySize());
+ assertEquals(2, result.getVirtualCores());
}
}
}
\ No newline at end of file
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
new file mode 100644
index 00000000000..a5550a70f22
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
@@ -0,0 +1,306 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util.resource;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Test class to verify all resource utility methods.
+ */
+public class TestResourceUtils {
+
+ static class ResourceFileInformation {
+ String filename;
+ int resourceCount;
+ Map resourceNameUnitsMap;
+
+ public ResourceFileInformation(String name, int count) {
+ filename = name;
+ resourceCount = count;
+ resourceNameUnitsMap = new HashMap<>();
+ }
+ }
+
+ @Before
+ public void setup() {
+ ResourceUtils.resetResourceTypes();
+ }
+
+ @After
+ public void teardown() {
+ Configuration conf = new YarnConfiguration();
+ File source = new File(
+ conf.getClassLoader().getResource("resource-types-1.xml").getFile());
+ File dest = new File(source.getParent(), "resource-types.xml");
+ if (dest.exists()) {
+ dest.delete();
+ }
+ }
+
+ private void testMemoryAndVcores(Map res) {
+ String memory = ResourceInformation.MEMORY_MB.getName();
+ String vcores = ResourceInformation.VCORES.getName();
+ Assert.assertTrue("Resource 'memory' missing", res.containsKey(memory));
+ Assert.assertEquals("'memory' units incorrect",
+ ResourceInformation.MEMORY_MB.getUnits(), res.get(memory).getUnits());
+ Assert.assertEquals("'memory' types incorrect",
+ ResourceInformation.MEMORY_MB.getResourceType(),
+ res.get(memory).getResourceType());
+ Assert.assertTrue("Resource 'vcores' missing", res.containsKey(vcores));
+ Assert.assertEquals("'vcores' units incorrect",
+ ResourceInformation.VCORES.getUnits(), res.get(vcores).getUnits());
+ Assert.assertEquals("'vcores' type incorrect",
+ ResourceInformation.VCORES.getResourceType(),
+ res.get(vcores).getResourceType());
+ }
+
+ @Test
+ public void testGetResourceTypes() throws Exception {
+
+ Map res = ResourceUtils.getResourceTypes();
+ Assert.assertEquals(2, res.size());
+ testMemoryAndVcores(res);
+ }
+
+ @Test
+ public void testGetResourceTypesConfigs() throws Exception {
+
+ Configuration conf = new YarnConfiguration();
+
+ ResourceFileInformation testFile1 =
+ new ResourceFileInformation("resource-types-1.xml", 2);
+ ResourceFileInformation testFile2 =
+ new ResourceFileInformation("resource-types-2.xml", 3);
+ testFile2.resourceNameUnitsMap.put("resource1", "G");
+ ResourceFileInformation testFile3 =
+ new ResourceFileInformation("resource-types-3.xml", 3);
+ testFile3.resourceNameUnitsMap.put("resource2", "");
+ ResourceFileInformation testFile4 =
+ new ResourceFileInformation("resource-types-4.xml", 4);
+ testFile4.resourceNameUnitsMap.put("resource1", "G");
+ testFile4.resourceNameUnitsMap.put("resource2", "m");
+
+ ResourceFileInformation[] tests = {testFile1, testFile2, testFile3,
+ testFile4};
+ Map res;
+ for (ResourceFileInformation testInformation : tests) {
+ ResourceUtils.resetResourceTypes();
+ File source = new File(
+ conf.getClassLoader().getResource(testInformation.filename)
+ .getFile());
+ File dest = new File(source.getParent(), "resource-types.xml");
+ FileUtils.copyFile(source, dest);
+ res = ResourceUtils.getResourceTypes();
+ testMemoryAndVcores(res);
+ Assert.assertEquals(testInformation.resourceCount, res.size());
+ for (Map.Entry entry : testInformation.resourceNameUnitsMap
+ .entrySet()) {
+ String resourceName = entry.getKey();
+ Assert.assertTrue("Missing key " + resourceName,
+ res.containsKey(resourceName));
+ Assert.assertEquals(entry.getValue(), res.get(resourceName).getUnits());
+ }
+ dest.delete();
+ }
+ }
+
+ @Test
+ public void testGetResourceTypesConfigErrors() throws Exception {
+ Configuration conf = new YarnConfiguration();
+
+ String[] resourceFiles = {"resource-types-error-1.xml",
+ "resource-types-error-2.xml", "resource-types-error-3.xml",
+ "resource-types-error-4.xml"};
+ for (String resourceFile : resourceFiles) {
+ ResourceUtils.resetResourceTypes();
+ File dest = null;
+ try {
+ File source =
+ new File(conf.getClassLoader().getResource(resourceFile).getFile());
+ dest = new File(source.getParent(), "resource-types.xml");
+ FileUtils.copyFile(source, dest);
+ ResourceUtils.getResourceTypes();
+ Assert.fail("Expected error with file " + resourceFile);
+ } catch (NullPointerException ne) {
+ throw ne;
+ } catch (Exception e) {
+ if (dest != null) {
+ dest.delete();
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testInitializeResourcesMap() throws Exception {
+ String[] empty = {"", ""};
+ String[] res1 = {"resource1", "m"};
+ String[] res2 = {"resource2", "G"};
+ String[][] test1 = {empty};
+ String[][] test2 = {res1};
+ String[][] test3 = {res2};
+ String[][] test4 = {res1, res2};
+
+ String[][][] allTests = {test1, test2, test3, test4};
+
+ for (String[][] test : allTests) {
+
+ Configuration conf = new YarnConfiguration();
+ String resSt = "";
+ for (String[] resources : test) {
+ resSt += (resources[0] + ",");
+ }
+ resSt = resSt.substring(0, resSt.length() - 1);
+ conf.set(YarnConfiguration.RESOURCE_TYPES, resSt);
+ for (String[] resources : test) {
+ String name =
+ YarnConfiguration.RESOURCE_TYPES + "." + resources[0] + ".units";
+ conf.set(name, resources[1]);
+ }
+ Map ret =
+ ResourceUtils.resetResourceTypes(conf);
+
+ // for test1, 4 - length will be 1, 4
+ // for the others, len will be 3
+ int len = 3;
+ if (test == test1) {
+ len = 2;
+ } else if (test == test4) {
+ len = 4;
+ }
+
+ Assert.assertEquals(len, ret.size());
+ for (String[] resources : test) {
+ if (resources[0].length() == 0) {
+ continue;
+ }
+ Assert.assertTrue(ret.containsKey(resources[0]));
+ ResourceInformation resInfo = ret.get(resources[0]);
+ Assert.assertEquals(resources[1], resInfo.getUnits());
+ Assert.assertEquals(ResourceTypes.COUNTABLE, resInfo.getResourceType());
+ }
+ // we must always have memory and vcores with their fixed units
+ Assert.assertTrue(ret.containsKey("memory-mb"));
+ ResourceInformation memInfo = ret.get("memory-mb");
+ Assert.assertEquals("Mi", memInfo.getUnits());
+ Assert.assertEquals(ResourceTypes.COUNTABLE, memInfo.getResourceType());
+ Assert.assertTrue(ret.containsKey("vcores"));
+ ResourceInformation vcoresInfo = ret.get("vcores");
+ Assert.assertEquals("", vcoresInfo.getUnits());
+ Assert
+ .assertEquals(ResourceTypes.COUNTABLE, vcoresInfo.getResourceType());
+ }
+ }
+
+ @Test
+ public void testInitializeResourcesMapErrors() throws Exception {
+
+ String[] mem1 = {"memory-mb", ""};
+ String[] vcores1 = {"vcores", "M"};
+
+ String[] mem2 = {"memory-mb", "m"};
+ String[] vcores2 = {"vcores", "G"};
+
+ String[] mem3 = {"memory", ""};
+
+ String[][] test1 = {mem1, vcores1};
+ String[][] test2 = {mem2, vcores2};
+ String[][] test3 = {mem3};
+
+ String[][][] allTests = {test1, test2, test3};
+
+ for (String[][] test : allTests) {
+
+ Configuration conf = new YarnConfiguration();
+ String resSt = "";
+ for (String[] resources : test) {
+ resSt += (resources[0] + ",");
+ }
+ resSt = resSt.substring(0, resSt.length() - 1);
+ conf.set(YarnConfiguration.RESOURCE_TYPES, resSt);
+ for (String[] resources : test) {
+ String name =
+ YarnConfiguration.RESOURCE_TYPES + "." + resources[0] + ".units";
+ conf.set(name, resources[1]);
+ }
+ try {
+ ResourceUtils.initializeResourcesMap(conf);
+ Assert.fail("resource map initialization should fail");
+ } catch (Exception e) {
+ // do nothing
+ }
+ }
+ }
+
+ @Test
+ public void testGetResourceInformation() throws Exception {
+
+ Configuration conf = new YarnConfiguration();
+ Map testRun = new HashMap<>();
+ setupResourceTypes(conf, "resource-types-4.xml");
+ // testRun.put("node-resources-1.xml", Resource.newInstance(1024, 1));
+ Resource test3Resources = Resource.newInstance(1024, 1);
+ test3Resources.setResourceInformation("resource1",
+ ResourceInformation.newInstance("resource1", "Gi", 5L));
+ test3Resources.setResourceInformation("resource2",
+ ResourceInformation.newInstance("resource2", "m", 2L));
+ testRun.put("node-resources-2.xml", test3Resources);
+
+ for (Map.Entry entry : testRun.entrySet()) {
+ String resourceFile = entry.getKey();
+ ResourceUtils.resetNodeResources();
+ File dest;
+ File source = new File(
+ conf.getClassLoader().getResource(resourceFile).getFile());
+ dest = new File(source.getParent(), "node-resources.xml");
+ FileUtils.copyFile(source, dest);
+ Map actual = ResourceUtils
+ .getNodeResourceInformation(conf);
+ Assert.assertEquals(actual.size(),
+ entry.getValue().getResources().length);
+ for (ResourceInformation resInfo : entry.getValue().getResources()) {
+ Assert.assertEquals(resInfo, actual.get(resInfo.getName()));
+ }
+ dest.delete();
+ }
+ }
+
+ public static String setupResourceTypes(Configuration conf, String filename)
+ throws Exception {
+ File source = new File(
+ conf.getClassLoader().getResource(filename).getFile());
+ File dest = new File(source.getParent(), "resource-types.xml");
+ FileUtils.copyFile(source, dest);
+ ResourceUtils.getResourceTypes();
+ return dest.getAbsolutePath();
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
index d79179ac0d9..a8404fbaee7 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java
@@ -18,35 +18,102 @@
package org.apache.hadoop.yarn.util.resource;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
+import java.io.File;
+
+import static org.apache.hadoop.yarn.util.resource.Resources.componentwiseMin;
+import static org.apache.hadoop.yarn.util.resource.Resources.componentwiseMax;
+import static org.apache.hadoop.yarn.util.resource.Resources.add;
+import static org.apache.hadoop.yarn.util.resource.Resources.subtract;
+import static org.apache.hadoop.yarn.util.resource.Resources.multiply;
+import static org.apache.hadoop.yarn.util.resource.Resources.multiplyAndAddTo;
+import static org.apache.hadoop.yarn.util.resource.Resources.multiplyAndRoundDown;
+import static org.apache.hadoop.yarn.util.resource.Resources.fitsIn;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class TestResources {
-
+
+ static class ExtendedResources extends Resources {
+ public static Resource unbounded() {
+ return new FixedValueResource("UNBOUNDED", Long.MAX_VALUE);
+ }
+
+ public static Resource none() {
+ return new FixedValueResource("NONE", 0L);
+ }
+ }
+
+ private static final String EXTRA_RESOURCE_TYPE = "resource2";
+ private String resourceTypesFile;
+
+ private void setupExtraResourceType() throws Exception {
+ Configuration conf = new YarnConfiguration();
+ resourceTypesFile =
+ TestResourceUtils.setupResourceTypes(conf, "resource-types-3.xml");
+ }
+
+ private void unsetExtraResourceType() {
+ deleteResourceTypesFile();
+ ResourceUtils.resetResourceTypes();
+ }
+
+ private void deleteResourceTypesFile() {
+ if (resourceTypesFile != null && !resourceTypesFile.isEmpty()) {
+ File resourceFile = new File(resourceTypesFile);
+ resourceFile.delete();
+ }
+ }
+
+ @Before
+ public void setup() throws Exception {
+ setupExtraResourceType();
+ }
+
+ @After
+ public void teardown() {
+ deleteResourceTypesFile();
+ }
+
public Resource createResource(long memory, int vCores) {
return Resource.newInstance(memory, vCores);
}
- @Test(timeout=10000)
+ public Resource createResource(long memory, int vCores, long resource2) {
+ Resource ret = Resource.newInstance(memory, vCores);
+ ret.setResourceInformation(EXTRA_RESOURCE_TYPE,
+ ResourceInformation.newInstance(EXTRA_RESOURCE_TYPE, resource2));
+ return ret;
+ }
+
+ @Test(timeout = 10000)
public void testCompareToWithUnboundedResource() {
- assertTrue(Resources.unbounded().compareTo(
- createResource(Long.MAX_VALUE, Integer.MAX_VALUE)) == 0);
- assertTrue(Resources.unbounded().compareTo(
- createResource(Long.MAX_VALUE, 0)) > 0);
- assertTrue(Resources.unbounded().compareTo(
- createResource(0, Integer.MAX_VALUE)) > 0);
+ unsetExtraResourceType();
+ Resource unboundedClone = Resources.clone(ExtendedResources.unbounded());
+ assertTrue(unboundedClone
+ .compareTo(createResource(Long.MAX_VALUE, Integer.MAX_VALUE)) == 0);
+ assertTrue(unboundedClone.compareTo(createResource(Long.MAX_VALUE, 0)) > 0);
+ assertTrue(
+ unboundedClone.compareTo(createResource(0, Integer.MAX_VALUE)) > 0);
}
- @Test(timeout=10000)
+ @Test(timeout = 10000)
public void testCompareToWithNoneResource() {
assertTrue(Resources.none().compareTo(createResource(0, 0)) == 0);
- assertTrue(Resources.none().compareTo(
- createResource(1, 0)) < 0);
- assertTrue(Resources.none().compareTo(
- createResource(0, 1)) < 0);
+ assertTrue(Resources.none().compareTo(createResource(1, 0)) < 0);
+ assertTrue(Resources.none().compareTo(createResource(0, 1)) < 0);
+ assertTrue(Resources.none().compareTo(createResource(0, 0, 0)) == 0);
+ assertTrue(Resources.none().compareTo(createResource(1, 0, 0)) < 0);
+ assertTrue(Resources.none().compareTo(createResource(0, 1, 0)) < 0);
+ assertTrue(Resources.none().compareTo(createResource(0, 0, 1)) < 0);
}
@Test(timeout=10000)
@@ -69,4 +136,131 @@ public void testMultipleRoundUp() {
assertEquals(memoryErrorMsg, result.getMemorySize(), 0);
assertEquals(vcoreErrorMsg, result.getVirtualCores(), 0);
}
+
+ @Test(timeout = 1000)
+ public void testFitsIn() {
+ assertTrue(fitsIn(createResource(1, 1), createResource(2, 2)));
+ assertTrue(fitsIn(createResource(2, 2), createResource(2, 2)));
+ assertFalse(fitsIn(createResource(2, 2), createResource(1, 1)));
+ assertFalse(fitsIn(createResource(1, 2), createResource(2, 1)));
+ assertFalse(fitsIn(createResource(2, 1), createResource(1, 2)));
+ assertTrue(fitsIn(createResource(1, 1, 1), createResource(2, 2, 2)));
+ assertTrue(fitsIn(createResource(1, 1, 0), createResource(2, 2, 0)));
+ assertTrue(fitsIn(createResource(1, 1, 1), createResource(2, 2, 2)));
+ }
+
+ @Test(timeout = 1000)
+ public void testComponentwiseMin() {
+ assertEquals(createResource(1, 1),
+ componentwiseMin(createResource(1, 1), createResource(2, 2)));
+ assertEquals(createResource(1, 1),
+ componentwiseMin(createResource(2, 2), createResource(1, 1)));
+ assertEquals(createResource(1, 1),
+ componentwiseMin(createResource(1, 2), createResource(2, 1)));
+ assertEquals(createResource(1, 1, 1),
+ componentwiseMin(createResource(1, 1, 1), createResource(2, 2, 2)));
+ assertEquals(createResource(1, 1, 0),
+ componentwiseMin(createResource(2, 2, 2), createResource(1, 1)));
+ assertEquals(createResource(1, 1, 2),
+ componentwiseMin(createResource(1, 2, 2), createResource(2, 1, 3)));
+ }
+
+ @Test
+ public void testComponentwiseMax() {
+ assertEquals(createResource(2, 2),
+ componentwiseMax(createResource(1, 1), createResource(2, 2)));
+ assertEquals(createResource(2, 2),
+ componentwiseMax(createResource(2, 2), createResource(1, 1)));
+ assertEquals(createResource(2, 2),
+ componentwiseMax(createResource(1, 2), createResource(2, 1)));
+ assertEquals(createResource(2, 2, 2),
+ componentwiseMax(createResource(1, 1, 1), createResource(2, 2, 2)));
+ assertEquals(createResource(2, 2, 2),
+ componentwiseMax(createResource(2, 2, 2), createResource(1, 1)));
+ assertEquals(createResource(2, 2, 3),
+ componentwiseMax(createResource(1, 2, 2), createResource(2, 1, 3)));
+ assertEquals(createResource(2, 2, 1),
+ componentwiseMax(createResource(2, 2, 0), createResource(2, 1, 1)));
+ }
+
+ @Test
+ public void testAdd() {
+ assertEquals(createResource(2, 3),
+ add(createResource(1, 1), createResource(1, 2)));
+ assertEquals(createResource(3, 2),
+ add(createResource(1, 1), createResource(2, 1)));
+ assertEquals(createResource(2, 2, 0),
+ add(createResource(1, 1, 0), createResource(1, 1, 0)));
+ assertEquals(createResource(2, 2, 3),
+ add(createResource(1, 1, 1), createResource(1, 1, 2)));
+ }
+
+ @Test
+ public void testSubtract() {
+ assertEquals(createResource(1, 0),
+ subtract(createResource(2, 1), createResource(1, 1)));
+ assertEquals(createResource(0, 1),
+ subtract(createResource(1, 2), createResource(1, 1)));
+ assertEquals(createResource(2, 2, 0),
+ subtract(createResource(3, 3, 0), createResource(1, 1, 0)));
+ assertEquals(createResource(1, 1, 2),
+ subtract(createResource(2, 2, 3), createResource(1, 1, 1)));
+ }
+
+ @Test
+ public void testClone() {
+ assertEquals(createResource(1, 1), Resources.clone(createResource(1, 1)));
+ assertEquals(createResource(1, 1, 0),
+ Resources.clone(createResource(1, 1)));
+ assertEquals(createResource(1, 1),
+ Resources.clone(createResource(1, 1, 0)));
+ assertEquals(createResource(1, 1, 2),
+ Resources.clone(createResource(1, 1, 2)));
+ }
+
+ @Test
+ public void testMultiply() {
+ assertEquals(createResource(4, 2), multiply(createResource(2, 1), 2));
+ assertEquals(createResource(4, 2, 0), multiply(createResource(2, 1), 2));
+ assertEquals(createResource(2, 4), multiply(createResource(1, 2), 2));
+ assertEquals(createResource(2, 4, 0), multiply(createResource(1, 2), 2));
+ assertEquals(createResource(6, 6, 0), multiply(createResource(3, 3, 0), 2));
+ assertEquals(createResource(4, 4, 6), multiply(createResource(2, 2, 3), 2));
+ }
+
+ @Test
+ public void testMultiplyAndRoundDown() {
+ assertEquals(createResource(4, 1),
+ multiplyAndRoundDown(createResource(3, 1), 1.5));
+ assertEquals(createResource(4, 1, 0),
+ multiplyAndRoundDown(createResource(3, 1), 1.5));
+ assertEquals(createResource(1, 4),
+ multiplyAndRoundDown(createResource(1, 3), 1.5));
+ assertEquals(createResource(1, 4, 0),
+ multiplyAndRoundDown(createResource(1, 3), 1.5));
+ assertEquals(createResource(7, 7, 0),
+ multiplyAndRoundDown(createResource(3, 3, 0), 2.5));
+ assertEquals(createResource(2, 2, 7),
+ multiplyAndRoundDown(createResource(1, 1, 3), 2.5));
+ }
+
+ @Test
+ public void testMultiplyAndAddTo() throws Exception {
+ unsetExtraResourceType();
+ setupExtraResourceType();
+ assertEquals(createResource(6, 4),
+ multiplyAndAddTo(createResource(3, 1), createResource(2, 2), 1.5));
+ assertEquals(createResource(6, 4, 0),
+ multiplyAndAddTo(createResource(3, 1), createResource(2, 2), 1.5));
+ assertEquals(createResource(4, 7),
+ multiplyAndAddTo(createResource(1, 1), createResource(2, 4), 1.5));
+ assertEquals(createResource(4, 7, 0),
+ multiplyAndAddTo(createResource(1, 1), createResource(2, 4), 1.5));
+ assertEquals(createResource(6, 4, 0),
+ multiplyAndAddTo(createResource(3, 1, 0), createResource(2, 2, 0),
+ 1.5));
+ assertEquals(createResource(6, 4, 6),
+ multiplyAndAddTo(createResource(3, 1, 2), createResource(2, 2, 3),
+ 1.5));
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-1.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-1.xml
new file mode 100644
index 00000000000..f00573e3077
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-1.xml
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+
+ yarn.nodemanager.resource.memory-mb
+ 1024
+
+
+
+ yarn.nodemanager.resource.vcores
+ 1
+
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-2.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-2.xml
new file mode 100644
index 00000000000..9d9b3dc65c8
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/node-resources-2.xml
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+ yarn.nodemanager.resource-type.memory-mb
+ 1024Mi
+
+
+
+ yarn.nodemanager.resource-type.vcores
+ 1
+
+
+
+ yarn.nodemanager.resource-type.resource1
+ 5Gi
+
+
+
+ yarn.nodemanager.resource-type.resource2
+ 2m
+
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-1.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-1.xml
new file mode 100644
index 00000000000..3ec106dfbb2
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-1.xml
@@ -0,0 +1,18 @@
+
+
+
+
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-2.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-2.xml
new file mode 100644
index 00000000000..6e5885ed7d7
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-2.xml
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+
+ yarn.resource-types
+ resource1
+
+
+
+ yarn.resource-types.resource1.units
+ G
+
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-3.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-3.xml
new file mode 100644
index 00000000000..8fd6fefa8f1
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-3.xml
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+
+ yarn.resource-types
+ resource2
+
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-4.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-4.xml
new file mode 100644
index 00000000000..c84316a536e
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-4.xml
@@ -0,0 +1,34 @@
+
+
+
+
+
+
+
+ yarn.resource-types
+ resource1,resource2
+
+
+
+ yarn.resource-types.resource1.units
+ G
+
+
+
+ yarn.resource-types.resource2.units
+ m
+
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-1.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-1.xml
new file mode 100644
index 00000000000..d1942f2c97f
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-1.xml
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+
+ yarn.resource-types
+ memory-mb,resource1
+
+
+
+ yarn.resource-types.resource1.calculator-units
+ G
+
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-2.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-2.xml
new file mode 100644
index 00000000000..fa43b6c14ef
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-2.xml
@@ -0,0 +1,33 @@
+
+
+
+
+
+
+
+ yarn.resource-types
+ vcores,resource1
+
+
+
+ yarn.resource-types.resource1.calculator-units
+ G
+
+
+
+ yarn.resource-types.vcores.units
+ Az
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-3.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-3.xml
new file mode 100644
index 00000000000..539d657692e
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-3.xml
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+
+ yarn.resource-types
+ resource1,resource1
+
+
+
+ yarn.resource-types.resource1.calculator-units
+ A
+
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-4.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-4.xml
new file mode 100644
index 00000000000..c8eb7662097
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-4.xml
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+
+ yarn.resource-types
+ memory,resource1
+
+
+
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 9240ed872e0..0b57717c29f 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -46,6 +46,7 @@
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities;
@@ -338,9 +339,20 @@ private static ApplicationReportExt convertToApplicationReport(
ApplicationMetricsConstants.APP_MEM_PREEMPT_METRICS);
long preemptedVcoreSeconds = parseLong(entityInfo,
ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS);
- appResources = ApplicationResourceUsageReport.newInstance(0, 0, null,
- null, null, memorySeconds, vcoreSeconds, 0, 0,
- preemptedMemorySeconds, preemptedVcoreSeconds);
+ Map resourceSecondsMap = new HashMap<>();
+ Map preemptedResoureSecondsMap = new HashMap<>();
+ resourceSecondsMap
+ .put(ResourceInformation.MEMORY_MB.getName(), memorySeconds);
+ resourceSecondsMap
+ .put(ResourceInformation.VCORES.getName(), vcoreSeconds);
+ preemptedResoureSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
+ preemptedMemorySeconds);
+ preemptedResoureSecondsMap
+ .put(ResourceInformation.VCORES.getName(), preemptedVcoreSeconds);
+
+ appResources = ApplicationResourceUsageReport
+ .newInstance(0, 0, null, null, null, resourceSecondsMap, 0, 0,
+ preemptedResoureSecondsMap);
}
if (entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
index e7f47af2647..3b37abdee03 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
@@ -65,8 +65,6 @@
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
@@ -447,12 +445,12 @@ public static ApplicationSubmissionContext newApplicationSubmissionContext(
queue, priority, amContainer, isUnmanagedAM, cancelTokensWhenComplete,
maxAppAttempts, resource, null);
}
-
+
public static ApplicationResourceUsageReport newApplicationResourceUsageReport(
int numUsedContainers, int numReservedContainers, Resource usedResources,
- Resource reservedResources, Resource neededResources, long memorySeconds,
- long vcoreSeconds, long preemptedMemorySeconds,
- long preemptedVcoreSeconds) {
+ Resource reservedResources, Resource neededResources,
+ Map resourceSecondsMap,
+ Map preemptedResourceSecondsMap) {
ApplicationResourceUsageReport report =
recordFactory.newRecordInstance(ApplicationResourceUsageReport.class);
report.setNumUsedContainers(numUsedContainers);
@@ -460,10 +458,8 @@ public static ApplicationResourceUsageReport newApplicationResourceUsageReport(
report.setUsedResources(usedResources);
report.setReservedResources(reservedResources);
report.setNeededResources(neededResources);
- report.setMemorySeconds(memorySeconds);
- report.setVcoreSeconds(vcoreSeconds);
- report.setPreemptedMemorySeconds(preemptedMemorySeconds);
- report.setPreemptedVcoreSeconds(preemptedVcoreSeconds);
+ report.setResourceSecondsMap(resourceSecondsMap);
+ report.setPreemptedResourceSecondsMap(preemptedResourceSecondsMap);
return report;
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
index bc5751de935..e771e4fe325 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
@@ -41,6 +41,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
@@ -852,4 +854,10 @@ public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
return new String[0];
}
+
+ @Override
+ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
+ GetAllResourceTypeInfoRequest request) throws YarnException, IOException {
+ return null;
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 35b7cb0e5f1..3efe0bc1e18 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -174,27 +174,28 @@ public NodeStatusUpdaterImpl(Context context, Dispatcher dispatcher,
@Override
protected void serviceInit(Configuration conf) throws Exception {
- int memoryMb = NodeManagerHardwareUtils.getContainerMemoryMB(conf);
+ this.totalResource = NodeManagerHardwareUtils.getNodeResources(conf);
+ long memoryMb = totalResource.getMemorySize();
float vMemToPMem =
conf.getFloat(
YarnConfiguration.NM_VMEM_PMEM_RATIO,
YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
- int virtualMemoryMb = (int)Math.ceil(memoryMb * vMemToPMem);
+ long virtualMemoryMb = (long)Math.ceil(memoryMb * vMemToPMem);
- int virtualCores = NodeManagerHardwareUtils.getVCores(conf);
+ int virtualCores = totalResource.getVirtualCores();
LOG.info("Nodemanager resources: memory set to " + memoryMb + "MB.");
LOG.info("Nodemanager resources: vcores set to " + virtualCores + ".");
+ LOG.info("Nodemanager resources: " + totalResource);
- this.totalResource = Resource.newInstance(memoryMb, virtualCores);
metrics.addResource(totalResource);
// Get actual node physical resources
- int physicalMemoryMb = memoryMb;
+ long physicalMemoryMb = memoryMb;
int physicalCores = virtualCores;
ResourceCalculatorPlugin rcp =
ResourceCalculatorPlugin.getNodeResourceMonitorPlugin(conf);
if (rcp != null) {
- physicalMemoryMb = (int) (rcp.getPhysicalMemorySize() / (1024 * 1024));
+ physicalMemoryMb = rcp.getPhysicalMemorySize() / (1024 * 1024);
physicalCores = rcp.getNumProcessors();
}
this.physicalResource =
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
index 32f73c85a0c..6fe5bbe73fd 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
@@ -21,10 +21,16 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+
+import java.util.Map;
/**
* Helper class to determine hardware related characteristics such as the
@@ -240,8 +246,8 @@ private static int getVCoresInternal(ResourceCalculatorPlugin plugin,
return cores;
}
- private static int getConfiguredMemoryMB(Configuration conf) {
- int memoryMb = conf.getInt(YarnConfiguration.NM_PMEM_MB,
+ private static long getConfiguredMemoryMB(Configuration conf) {
+ long memoryMb = conf.getLong(YarnConfiguration.NM_PMEM_MB,
YarnConfiguration.DEFAULT_NM_PMEM_MB);
if (memoryMb == -1) {
memoryMb = YarnConfiguration.DEFAULT_NM_PMEM_MB;
@@ -264,7 +270,7 @@ private static int getConfiguredMemoryMB(Configuration conf) {
* - the configuration for the NodeManager
* @return the amount of memory that will be used for YARN containers in MB.
*/
- public static int getContainerMemoryMB(Configuration conf) {
+ public static long getContainerMemoryMB(Configuration conf) {
if (!isHardwareDetectionEnabled(conf)) {
return getConfiguredMemoryMB(conf);
}
@@ -293,7 +299,7 @@ public static int getContainerMemoryMB(Configuration conf) {
* - the configuration for the NodeManager
* @return the amount of memory that will be used for YARN containers in MB.
*/
- public static int getContainerMemoryMB(ResourceCalculatorPlugin plugin,
+ public static long getContainerMemoryMB(ResourceCalculatorPlugin plugin,
Configuration conf) {
if (!isHardwareDetectionEnabled(conf) || plugin == null) {
return getConfiguredMemoryMB(conf);
@@ -301,26 +307,24 @@ public static int getContainerMemoryMB(ResourceCalculatorPlugin plugin,
return getContainerMemoryMBInternal(plugin, conf);
}
- private static int getContainerMemoryMBInternal(ResourceCalculatorPlugin plugin,
+ private static long getContainerMemoryMBInternal(ResourceCalculatorPlugin plugin,
Configuration conf) {
- int memoryMb = conf.getInt(YarnConfiguration.NM_PMEM_MB, -1);
+ long memoryMb = conf.getInt(YarnConfiguration.NM_PMEM_MB, -1);
if (memoryMb == -1) {
- int physicalMemoryMB =
- (int) (plugin.getPhysicalMemorySize() / (1024 * 1024));
- int hadoopHeapSizeMB =
- (int) (Runtime.getRuntime().maxMemory() / (1024 * 1024));
- int containerPhysicalMemoryMB =
- (int) (0.8f * (physicalMemoryMB - (2 * hadoopHeapSizeMB)));
- int reservedMemoryMB =
- conf.getInt(YarnConfiguration.NM_SYSTEM_RESERVED_PMEM_MB, -1);
+ long physicalMemoryMB = (plugin.getPhysicalMemorySize() / (1024 * 1024));
+ long hadoopHeapSizeMB = (Runtime.getRuntime().maxMemory()
+ / (1024 * 1024));
+ long containerPhysicalMemoryMB = (long) (0.8f
+ * (physicalMemoryMB - (2 * hadoopHeapSizeMB)));
+ long reservedMemoryMB = conf
+ .getInt(YarnConfiguration.NM_SYSTEM_RESERVED_PMEM_MB, -1);
if (reservedMemoryMB != -1) {
containerPhysicalMemoryMB = physicalMemoryMB - reservedMemoryMB;
}
- if(containerPhysicalMemoryMB <= 0) {
+ if (containerPhysicalMemoryMB <= 0) {
LOG.error("Calculated memory for YARN containers is too low."
+ " Node memory is " + physicalMemoryMB
- + " MB, system reserved memory is "
- + reservedMemoryMB + " MB.");
+ + " MB, system reserved memory is " + reservedMemoryMB + " MB.");
}
containerPhysicalMemoryMB = Math.max(containerPhysicalMemoryMB, 0);
memoryMb = containerPhysicalMemoryMB;
@@ -332,4 +336,50 @@ private static int getContainerMemoryMBInternal(ResourceCalculatorPlugin plugin,
}
return memoryMb;
}
+
+ /**
+ * Get the resources for the node.
+ * @param configuration configuration file
+ * @return the resources for the node
+ */
+ public static Resource getNodeResources(Configuration configuration) {
+ Configuration conf = new Configuration(configuration);
+ String memory = ResourceInformation.MEMORY_MB.getName();
+ String vcores = ResourceInformation.VCORES.getName();
+
+ Resource ret = Resource.newInstance(0, 0);
+ Map resourceInformation =
+ ResourceUtils.getNodeResourceInformation(conf);
+ for (Map.Entry entry : resourceInformation
+ .entrySet()) {
+ ret.setResourceInformation(entry.getKey(), entry.getValue());
+ LOG.debug("Setting key " + entry.getKey() + " to " + entry.getValue());
+ }
+ if (resourceInformation.containsKey(memory)) {
+ Long value = resourceInformation.get(memory).getValue();
+ if (value > Integer.MAX_VALUE) {
+ throw new YarnRuntimeException("Value '" + value
+ + "' for resource memory is more than the maximum for an integer.");
+ }
+ ResourceInformation memResInfo = resourceInformation.get(memory);
+ if(memResInfo.getValue() == 0) {
+ ret.setMemorySize(getContainerMemoryMB(conf));
+ LOG.debug("Set memory to " + ret.getMemorySize());
+ }
+ }
+ if (resourceInformation.containsKey(vcores)) {
+ Long value = resourceInformation.get(vcores).getValue();
+ if (value > Integer.MAX_VALUE) {
+ throw new YarnRuntimeException("Value '" + value
+ + "' for resource vcores is more than the maximum for an integer.");
+ }
+ ResourceInformation vcoresResInfo = resourceInformation.get(vcores);
+ if(vcoresResInfo.getValue() == 0) {
+ ret.setVirtualCores(getVCores(conf));
+ LOG.debug("Set vcores to " + ret.getVirtualCores());
+ }
+ }
+ LOG.debug("Node resource information map is " + ret);
+ return ret;
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
index 4add586bbf1..767c308aeb6 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
@@ -172,7 +172,7 @@ public void testGetContainerMemoryMB() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_ENABLE_HARDWARE_CAPABILITY_DETECTION,
true);
- int mem = NodeManagerHardwareUtils.getContainerMemoryMB(null, conf);
+ long mem = NodeManagerHardwareUtils.getContainerMemoryMB(null, conf);
Assert.assertEquals(YarnConfiguration.DEFAULT_NM_PMEM_MB, mem);
mem = NodeManagerHardwareUtils.getContainerMemoryMB(plugin, conf);
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java
index 931b1c8b7d5..7ae23e7bb63 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AMSProcessingChain.java
@@ -82,7 +82,7 @@ public synchronized void addProcessor(
public void registerApplicationMaster(
ApplicationAttemptId applicationAttemptId,
RegisterApplicationMasterRequest request,
- RegisterApplicationMasterResponse resp) throws IOException {
+ RegisterApplicationMasterResponse resp) throws IOException, YarnException {
this.head.registerApplicationMaster(applicationAttemptId, request, resp);
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index d08005e64e0..ce4c4e1a519 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -112,6 +112,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse;
import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
@@ -175,6 +177,7 @@
import org.apache.hadoop.yarn.util.UTCClock;
import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
@@ -1783,4 +1786,12 @@ public void setDisplayPerUserApps(boolean displayPerUserApps) {
this.displayPerUserApps = displayPerUserApps;
}
+ @Override
+ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
+ GetAllResourceTypeInfoRequest request) throws YarnException, IOException {
+ GetAllResourceTypeInfoResponse response =
+ GetAllResourceTypeInfoResponse.newInstance();
+ response.setResourceTypeInfo(ResourceUtils.getResourcesTypeInfo());
+ return response;
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index d5444b48091..a68bf3166f4 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -111,7 +111,8 @@ public void init(ApplicationMasterServiceContext amsContext,
public void registerApplicationMaster(
ApplicationAttemptId applicationAttemptId,
RegisterApplicationMasterRequest request,
- RegisterApplicationMasterResponse response) throws IOException {
+ RegisterApplicationMasterResponse response)
+ throws IOException, YarnException {
RMApp app = getRmContext().getRMApps().get(
applicationAttemptId.getApplicationId());
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
index 98944af6e92..ce425dfaac4 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
@@ -127,7 +127,8 @@ public void init(ApplicationMasterServiceContext amsContext,
public void registerApplicationMaster(
ApplicationAttemptId applicationAttemptId,
RegisterApplicationMasterRequest request,
- RegisterApplicationMasterResponse response) throws IOException {
+ RegisterApplicationMasterResponse response)
+ throws IOException, YarnException {
SchedulerApplicationAttempt appAttempt = ((AbstractYarnScheduler)
getScheduler()).getApplicationAttempt(applicationAttemptId);
if (appAttempt.getOpportunisticContainerContext() == null) {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index cb2828f05ad..d0425907f6a 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -70,6 +70,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.SettableFuture;
+import org.apache.hadoop.yarn.util.StringHelper;
/**
* This class manages the list of applications for the resource manager.
@@ -189,7 +190,12 @@ public static SummaryBuilder createAppSummary(RMApp app) {
.add("preemptedAMContainers", metrics.getNumAMContainersPreempted())
.add("preemptedNonAMContainers", metrics.getNumNonAMContainersPreempted())
.add("preemptedResources", metrics.getResourcePreempted())
- .add("applicationType", app.getApplicationType());
+ .add("applicationType", app.getApplicationType())
+ .add("resourceSeconds", StringHelper
+ .getResourceSecondsString(metrics.getResourceSecondsMap()))
+ .add("preemptedResourceSeconds", StringHelper
+ .getResourceSecondsString(
+ metrics.getPreemptedResourceSecondsMap()));
return summary;
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 35b0c983fac..e045f9a1b67 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -295,8 +295,7 @@ public static void checkSchedContainerChangeRequest(
// Target resource of the increase request is more than NM can offer
ResourceScheduler scheduler = rmContext.getScheduler();
RMNode rmNode = request.getSchedulerNode().getRMNode();
- if (!Resources.fitsIn(scheduler.getResourceCalculator(),
- scheduler.getClusterResource(), targetResource,
+ if (!Resources.fitsIn(scheduler.getResourceCalculator(), targetResource,
rmNode.getTotalCapability())) {
String msg = "Target resource=" + targetResource + " of containerId="
+ containerId + " is more than node's total resource="
@@ -478,7 +477,7 @@ public static YarnApplicationAttemptState createApplicationAttemptState(
DUMMY_APPLICATION_RESOURCE_USAGE_REPORT =
BuilderUtils.newApplicationResourceUsageReport(-1, -1,
Resources.createResource(-1, -1), Resources.createResource(-1, -1),
- Resources.createResource(-1, -1), 0, 0, 0, 0);
+ Resources.createResource(-1, -1), new HashMap<>(), new HashMap<>());
/**
@@ -622,4 +621,12 @@ public static int getApplicableNodeCountForAM(RMContext rmContext,
return labelsToNodes.get(label);
}
}
+
+ public static Long getOrDefault(Map map, String key,
+ Long defaultValue) {
+ if (map.containsKey(key)) {
+ return map.get(key);
+ }
+ return defaultValue;
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index cc47e02cb19..a42d0533c52 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -376,10 +376,11 @@ public RegisterNodeManagerResponse registerNodeManager(
// Check if this node has minimum allocations
if (capability.getMemorySize() < minAllocMb
|| capability.getVirtualCores() < minAllocVcores) {
- String message =
- "NodeManager from " + host
- + " doesn't satisfy minimum allocations, Sending SHUTDOWN"
- + " signal to the NodeManager.";
+ String message = "NodeManager from " + host
+ + " doesn't satisfy minimum allocations, Sending SHUTDOWN"
+ + " signal to the NodeManager. Node capabilities are " + capability
+ + "; minimums are " + minAllocMb + "mb and " + minAllocVcores
+ + " vcores";
LOG.info(message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
index 0ae3ef01340..f097e9c6291 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
@@ -156,8 +156,8 @@ public static boolean tryPreemptContainerAndDeductResToObtain(
if (null != toObtainByPartition
&& Resources.greaterThan(rc, clusterResource, toObtainByPartition,
Resources.none())
- && Resources.fitsIn(rc, clusterResource,
- rmContainer.getAllocatedResource(), totalPreemptionAllowed)
+ && Resources.fitsIn(rc, rmContainer.getAllocatedResource(),
+ totalPreemptionAllowed)
&& !Resources.isAnyMajorResourceZero(rc, toObtainByPartition)) {
Resources.subtractFrom(toObtainByPartition,
rmContainer.getAllocatedResource());
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
index c730a2d88ea..7b7404caf3d 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
@@ -229,8 +229,7 @@ private boolean canPreemptEnoughResourceForAsked(Resource requiredResource,
// If we already can allocate the reserved container after preemption,
// skip following steps
- if (Resources.fitsIn(rc, clusterResource, lacking,
- Resources.none())) {
+ if (Resources.fitsIn(rc, lacking, Resources.none())) {
return true;
}
@@ -270,7 +269,7 @@ private boolean canPreemptEnoughResourceForAsked(Resource requiredResource,
}
// Lacking <= 0 means we can allocate the reserved container
- if (Resources.fitsIn(rc, clusterResource, lacking, Resources.none())) {
+ if (Resources.fitsIn(rc, lacking, Resources.none())) {
return true;
}
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
index de23d0a291c..ff100d9a6ec 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
@@ -87,8 +87,8 @@ public NodeForPreemption(float preemptionCost,
// Get list of nodes for preemption, ordered by preemption cost
List nodesForPreemption = getNodesForPreemption(
- clusterResource, queueToPreemptableResourceByPartition,
- selectedCandidates, totalPreemptedResourceAllowed);
+ queueToPreemptableResourceByPartition, selectedCandidates,
+ totalPreemptedResourceAllowed);
for (NodeForPreemption nfp : nodesForPreemption) {
RMContainer reservedContainer = nfp.schedulerNode.getReservedContainer();
@@ -97,9 +97,8 @@ public NodeForPreemption(float preemptionCost,
}
NodeForPreemption preemptionResult = getPreemptionCandidatesOnNode(
- nfp.schedulerNode, clusterResource,
- queueToPreemptableResourceByPartition, selectedCandidates,
- totalPreemptedResourceAllowed, false);
+ nfp.schedulerNode, queueToPreemptableResourceByPartition,
+ selectedCandidates, totalPreemptedResourceAllowed, false);
if (null != preemptionResult) {
for (RMContainer c : preemptionResult.selectedContainers) {
ApplicationAttemptId appId = c.getApplicationAttemptId();
@@ -135,8 +134,7 @@ private Resource getPreemptableResource(String queueName,
return preemptable;
}
- private boolean tryToPreemptFromQueue(Resource cluster, String queueName,
- String partitionName,
+ private boolean tryToPreemptFromQueue(String queueName, String partitionName,
Map> queueToPreemptableResourceByPartition,
Resource required, Resource totalPreemptionAllowed, boolean readOnly) {
Resource preemptable = getPreemptableResource(queueName, partitionName,
@@ -145,11 +143,11 @@ private boolean tryToPreemptFromQueue(Resource cluster, String queueName,
return false;
}
- if (!Resources.fitsIn(rc, cluster, required, preemptable)) {
+ if (!Resources.fitsIn(rc, required, preemptable)) {
return false;
}
- if (!Resources.fitsIn(rc, cluster, required, totalPreemptionAllowed)) {
+ if (!Resources.fitsIn(rc, required, totalPreemptionAllowed)) {
return false;
}
@@ -165,7 +163,6 @@ private boolean tryToPreemptFromQueue(Resource cluster, String queueName,
/**
* Try to check if we can preempt resources for reserved container in given node
* @param node
- * @param cluster
* @param queueToPreemptableResourceByPartition it's a map of
* >
* @param readOnly do we want to modify preemptable resource after we selected
@@ -174,7 +171,7 @@ private boolean tryToPreemptFromQueue(Resource cluster, String queueName,
* to satisfy reserved resource
*/
private NodeForPreemption getPreemptionCandidatesOnNode(
- FiCaSchedulerNode node, Resource cluster,
+ FiCaSchedulerNode node,
Map> queueToPreemptableResourceByPartition,
Map> selectedCandidates,
Resource totalPreemptionAllowed, boolean readOnly) {
@@ -204,8 +201,7 @@ private NodeForPreemption getPreemptionCandidatesOnNode(
String partition = node.getPartition();
// Avoid preempt any container if required <= available + killable
- if (Resources.fitsIn(rc, cluster, reservedContainer.getReservedResource(),
- cur)) {
+ if (Resources.fitsIn(rc, reservedContainer.getReservedResource(), cur)) {
return null;
}
@@ -232,9 +228,9 @@ private NodeForPreemption getPreemptionCandidatesOnNode(
// Can we preempt container c?
// Check if we have quota to preempt this container
- boolean canPreempt = tryToPreemptFromQueue(cluster, containerQueueName,
- partition, queueToPreemptableResourceByPartition,
- c.getAllocatedResource(), totalPreemptionAllowed, readOnly);
+ boolean canPreempt = tryToPreemptFromQueue(containerQueueName, partition,
+ queueToPreemptableResourceByPartition, c.getAllocatedResource(),
+ totalPreemptionAllowed, readOnly);
// If we can, add to selected container, and change resource accordingly.
if (canPreempt) {
@@ -246,7 +242,7 @@ private NodeForPreemption getPreemptionCandidatesOnNode(
Resources.addTo(totalSelected, c.getAllocatedResource());
}
Resources.addTo(cur, c.getAllocatedResource());
- if (Resources.fitsIn(rc, cluster,
+ if (Resources.fitsIn(rc,
reservedContainer.getReservedResource(), cur)) {
canAllocateReservedContainer = true;
break;
@@ -282,7 +278,7 @@ private NodeForPreemption getPreemptionCandidatesOnNode(
return nfp;
}
- private List getNodesForPreemption(Resource cluster,
+ private List getNodesForPreemption(
Map> queueToPreemptableResourceByPartition,
Map> selectedCandidates,
Resource totalPreemptionAllowed) {
@@ -292,7 +288,7 @@ private NodeForPreemption getPreemptionCandidatesOnNode(
for (FiCaSchedulerNode node : preemptionContext.getScheduler()
.getAllNodes()) {
if (node.getReservedContainer() != null) {
- NodeForPreemption nfp = getPreemptionCandidatesOnNode(node, cluster,
+ NodeForPreemption nfp = getPreemptionCandidatesOnNode(node,
queueToPreemptableResourceByPartition, selectedCandidates,
totalPreemptionAllowed, true);
if (null != nfp) {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index 35340e62a22..00ef39fdd98 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -851,11 +851,8 @@ public void storeNewApplicationAttempt(RMAppAttempt appAttempt) {
appAttempt.getAppAttemptId(),
appAttempt.getMasterContainer(),
credentials, appAttempt.getStartTime(),
- resUsage.getMemorySeconds(),
- resUsage.getVcoreSeconds(),
- attempMetrics.getPreemptedMemory(),
- attempMetrics.getPreemptedVcore()
- );
+ resUsage.getResourceUsageSecondsMap(),
+ attempMetrics.getPreemptedResourceSecondsMap());
getRMStateStoreEventHandler().handle(
new RMStateStoreAppAttemptEvent(attemptState));
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java
index 67aaf947127..2de071ad2ec 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/ApplicationAttemptStateData.java
@@ -25,23 +25,28 @@
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.ApplicationAttemptStateDataProto;
+import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.util.Records;
+import java.util.Map;
+
/*
* Contains the state data that needs to be persisted for an ApplicationAttempt
*/
@Public
@Unstable
public abstract class ApplicationAttemptStateData {
+
public static ApplicationAttemptStateData newInstance(
ApplicationAttemptId attemptId, Container container,
Credentials attemptTokens, long startTime, RMAppAttemptState finalState,
String finalTrackingUrl, String diagnostics,
FinalApplicationStatus amUnregisteredFinalStatus, int exitStatus,
- long finishTime, long memorySeconds, long vcoreSeconds,
- long preemptedMemorySeconds, long preemptedVcoreSeconds) {
+ long finishTime, Map resourceSecondsMap,
+ Map preemptedResourceSecondsMap) {
ApplicationAttemptStateData attemptStateData =
Records.newRecord(ApplicationAttemptStateData.class);
attemptStateData.setAttemptId(attemptId);
@@ -54,23 +59,33 @@ public static ApplicationAttemptStateData newInstance(
attemptStateData.setFinalApplicationStatus(amUnregisteredFinalStatus);
attemptStateData.setAMContainerExitStatus(exitStatus);
attemptStateData.setFinishTime(finishTime);
- attemptStateData.setMemorySeconds(memorySeconds);
- attemptStateData.setVcoreSeconds(vcoreSeconds);
- attemptStateData.setPreemptedMemorySeconds(preemptedMemorySeconds);
- attemptStateData.setPreemptedVcoreSeconds(preemptedVcoreSeconds);
+ attemptStateData.setMemorySeconds(RMServerUtils
+ .getOrDefault(resourceSecondsMap,
+ ResourceInformation.MEMORY_MB.getName(), 0L));
+ attemptStateData.setVcoreSeconds(RMServerUtils
+ .getOrDefault(resourceSecondsMap, ResourceInformation.VCORES.getName(),
+ 0L));
+ attemptStateData.setPreemptedMemorySeconds(RMServerUtils
+ .getOrDefault(preemptedResourceSecondsMap,
+ ResourceInformation.MEMORY_MB.getName(), 0L));
+ attemptStateData.setPreemptedVcoreSeconds(RMServerUtils
+ .getOrDefault(preemptedResourceSecondsMap,
+ ResourceInformation.VCORES.getName(), 0L));
+ attemptStateData.setResourceSecondsMap(resourceSecondsMap);
+ attemptStateData
+ .setPreemptedResourceSecondsMap(preemptedResourceSecondsMap);
return attemptStateData;
}
public static ApplicationAttemptStateData newInstance(
ApplicationAttemptId attemptId, Container masterContainer,
- Credentials attemptTokens, long startTime, long memorySeconds,
- long vcoreSeconds, long preemptedMemorySeconds,
- long preemptedVcoreSeconds) {
- return newInstance(attemptId, masterContainer, attemptTokens,
- startTime, null, "N/A", "", null, ContainerExitStatus.INVALID, 0,
- memorySeconds, vcoreSeconds,
- preemptedMemorySeconds, preemptedVcoreSeconds);
- }
+ Credentials attemptTokens, long startTime,
+ Map resourceSeondsMap,
+ Map preemptedResourceSecondsMap) {
+ return newInstance(attemptId, masterContainer, attemptTokens, startTime,
+ null, "N/A", "", null, ContainerExitStatus.INVALID, 0,
+ resourceSeondsMap, preemptedResourceSecondsMap);
+ }
public abstract ApplicationAttemptStateDataProto getProto();
@@ -215,4 +230,50 @@ public abstract void setFinalApplicationStatus(
@Public
@Unstable
public abstract void setPreemptedVcoreSeconds(long vcoreSeconds);
+
+ /**
+ * Get the aggregated number of resources preempted that the application has
+ * allocated times the number of seconds the application has been running.
+ *
+ * @return map containing the resource name and aggregated preempted
+ * resource-seconds
+ */
+ @Public
+ @Unstable
+ public abstract Map getResourceSecondsMap();
+
+ /**
+ * Set the aggregated number of resources that the application has
+ * allocated times the number of seconds the application has been running.
+ *
+ * @param resourceSecondsMap map containing the resource name and aggregated
+ * resource-seconds
+ */
+ @Public
+ @Unstable
+ public abstract void setResourceSecondsMap(
+ Map resourceSecondsMap);
+
+ /**
+ * Get the aggregated number of resources preempted that the application has
+ * allocated times the number of seconds the application has been running.
+ *
+ * @return map containing the resource name and aggregated preempted
+ * resource-seconds
+ */
+ @Public
+ @Unstable
+ public abstract Map getPreemptedResourceSecondsMap();
+
+ /**
+ * Set the aggregated number of resources preempted that the application has
+ * allocated times the number of seconds the application has been running.
+ *
+ * @param preemptedResourceSecondsMap map containing the resource name and
+ * aggregated preempted resource-seconds
+ */
+ @Public
+ @Unstable
+ public abstract void setPreemptedResourceSecondsMap(
+ Map preemptedResourceSecondsMap);
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
index e89726f91ad..ed71ea2f016 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -55,6 +56,9 @@
private Container masterContainer = null;
private ByteBuffer appAttemptTokens = null;
+ private Map resourceSecondsMap;
+ private Map preemptedResourceSecondsMap;
+
public ApplicationAttemptStateDataPBImpl() {
builder = ApplicationAttemptStateDataProto.newBuilder();
}
@@ -404,4 +408,50 @@ private static Credentials convertCredentialsFromByteBuffer(
IOUtils.closeStream(dibb);
}
}
+
+ @Override
+ public Map getResourceSecondsMap() {
+ if (this.resourceSecondsMap != null) {
+ return this.resourceSecondsMap;
+ }
+ ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
+ this.resourceSecondsMap = ProtoUtils.convertStringLongMapProtoListToMap(
+ p.getApplicationResourceUsageMapList());
+ return this.resourceSecondsMap;
+ }
+
+ @Override
+ public void setResourceSecondsMap(Map resourceSecondsMap) {
+ maybeInitBuilder();
+ builder.clearApplicationResourceUsageMap();
+ this.resourceSecondsMap = resourceSecondsMap;
+ if (resourceSecondsMap != null) {
+ builder.addAllApplicationResourceUsageMap(
+ ProtoUtils.convertMapToStringLongMapProtoList(resourceSecondsMap));
+ }
+ }
+
+ @Override
+ public Map getPreemptedResourceSecondsMap() {
+ if (this.preemptedResourceSecondsMap != null) {
+ return this.preemptedResourceSecondsMap;
+ }
+ ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
+ this.preemptedResourceSecondsMap = ProtoUtils
+ .convertStringLongMapProtoListToMap(
+ p.getApplicationResourceUsageMapList());
+ return this.preemptedResourceSecondsMap;
+ }
+
+ @Override
+ public void setPreemptedResourceSecondsMap(
+ Map preemptedResourceSecondsMap) {
+ maybeInitBuilder();
+ builder.clearPreemptedResourceUsageMap();
+ this.preemptedResourceSecondsMap = preemptedResourceSecondsMap;
+ if (preemptedResourceSecondsMap != null) {
+ builder.addAllPreemptedResourceUsageMap(ProtoUtils
+ .convertMapToStringLongMapProtoList(preemptedResourceSecondsMap));
+ }
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
deleted file mode 100644
index b66a5d0d467..00000000000
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.resource;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
-import org.apache.hadoop.util.StringUtils;
-
-@Private
-@Evolving
-public class ResourceWeights {
- public static final ResourceWeights NEUTRAL = new ResourceWeights(1.0f);
-
- private final float[] weights = new float[ResourceType.values().length];
-
- public ResourceWeights(float memoryWeight, float cpuWeight) {
- weights[ResourceType.MEMORY.ordinal()] = memoryWeight;
- weights[ResourceType.CPU.ordinal()] = cpuWeight;
- }
-
- public ResourceWeights(float weight) {
- setWeight(weight);
- }
-
- public ResourceWeights() { }
-
- public final void setWeight(float weight) {
- for (int i = 0; i < weights.length; i++) {
- weights[i] = weight;
- }
- }
-
- public void setWeight(ResourceType resourceType, float weight) {
- weights[resourceType.ordinal()] = weight;
- }
-
- public float getWeight(ResourceType resourceType) {
- return weights[resourceType.ordinal()];
- }
-
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append("<");
- for (int i = 0; i < ResourceType.values().length; i++) {
- if (i != 0) {
- sb.append(", ");
- }
- ResourceType resourceType = ResourceType.values()[i];
- sb.append(StringUtils.toLowerCase(resourceType.name()));
- sb.append(StringUtils.format(" weight=%.1f", getWeight(resourceType)));
- }
- sb.append(">");
- return sb.toString();
- }
-}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 7526ea3c611..cfb8a74f59a 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -744,14 +744,10 @@ public ApplicationReport createAndGetApplicationReport(String clientUserName,
}
RMAppMetrics rmAppMetrics = getRMAppMetrics();
- appUsageReport.setMemorySeconds(rmAppMetrics.getMemorySeconds());
- appUsageReport.setVcoreSeconds(rmAppMetrics.getVcoreSeconds());
- appUsageReport.
- setPreemptedMemorySeconds(rmAppMetrics.
- getPreemptedMemorySeconds());
- appUsageReport.
- setPreemptedVcoreSeconds(rmAppMetrics.
- getPreemptedVcoreSeconds());
+ appUsageReport
+ .setResourceSecondsMap(rmAppMetrics.getResourceSecondsMap());
+ appUsageReport.setPreemptedResourceSecondsMap(
+ rmAppMetrics.getPreemptedResourceSecondsMap());
}
if (currentApplicationAttemptId == null) {
@@ -1612,10 +1608,9 @@ public RMAppMetrics getRMAppMetrics() {
Resource resourcePreempted = Resource.newInstance(0, 0);
int numAMContainerPreempted = 0;
int numNonAMContainerPreempted = 0;
- long memorySeconds = 0;
- long vcoreSeconds = 0;
- long preemptedMemorySeconds = 0;
- long preemptedVcoreSeconds = 0;
+ Map resourceSecondsMap = new HashMap<>();
+ Map preemptedSecondsMap = new HashMap<>();
+
for (RMAppAttempt attempt : attempts.values()) {
if (null != attempt) {
RMAppAttemptMetrics attemptMetrics =
@@ -1629,17 +1624,25 @@ public RMAppMetrics getRMAppMetrics() {
// for both running and finished containers.
AggregateAppResourceUsage resUsage =
attempt.getRMAppAttemptMetrics().getAggregateAppResourceUsage();
- memorySeconds += resUsage.getMemorySeconds();
- vcoreSeconds += resUsage.getVcoreSeconds();
- preemptedMemorySeconds += attemptMetrics.getPreemptedMemory();
- preemptedVcoreSeconds += attemptMetrics.getPreemptedVcore();
+ for (Map.Entry entry : resUsage
+ .getResourceUsageSecondsMap().entrySet()) {
+ long value = RMServerUtils
+ .getOrDefault(resourceSecondsMap, entry.getKey(), 0L);
+ value += entry.getValue();
+ resourceSecondsMap.put(entry.getKey(), value);
+ }
+ for (Map.Entry entry : attemptMetrics
+ .getPreemptedResourceSecondsMap().entrySet()) {
+ long value = RMServerUtils
+ .getOrDefault(preemptedSecondsMap, entry.getKey(), 0L);
+ value += entry.getValue();
+ preemptedSecondsMap.put(entry.getKey(), value);
+ }
}
}
- return new RMAppMetrics(resourcePreempted,
- numNonAMContainerPreempted, numAMContainerPreempted,
- memorySeconds, vcoreSeconds,
- preemptedMemorySeconds, preemptedVcoreSeconds);
+ return new RMAppMetrics(resourcePreempted, numNonAMContainerPreempted,
+ numAMContainerPreempted, resourceSecondsMap, preemptedSecondsMap);
}
@Private
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppMetrics.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppMetrics.java
index fa068ea2d88..2bb7fd1ae10 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppMetrics.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppMetrics.java
@@ -19,27 +19,27 @@
package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
+
+import java.util.Map;
public class RMAppMetrics {
final Resource resourcePreempted;
final int numNonAMContainersPreempted;
final int numAMContainersPreempted;
- final long memorySeconds;
- final long vcoreSeconds;
- private final long preemptedMemorySeconds;
- private final long preemptedVcoreSeconds;
+ private final Map resourceSecondsMap;
+ private final Map preemptedResourceSecondsMap;
public RMAppMetrics(Resource resourcePreempted,
int numNonAMContainersPreempted, int numAMContainersPreempted,
- long memorySeconds, long vcoreSeconds, long preemptedMemorySeconds,
- long preemptedVcoreSeconds) {
+ Map resourceSecondsMap,
+ Map preemptedResourceSecondsMap) {
this.resourcePreempted = resourcePreempted;
this.numNonAMContainersPreempted = numNonAMContainersPreempted;
this.numAMContainersPreempted = numAMContainersPreempted;
- this.memorySeconds = memorySeconds;
- this.vcoreSeconds = vcoreSeconds;
- this.preemptedMemorySeconds = preemptedMemorySeconds;
- this.preemptedVcoreSeconds = preemptedVcoreSeconds;
+ this.resourceSecondsMap = resourceSecondsMap;
+ this.preemptedResourceSecondsMap = preemptedResourceSecondsMap;
}
public Resource getResourcePreempted() {
@@ -55,19 +55,32 @@ public int getNumAMContainersPreempted() {
}
public long getMemorySeconds() {
- return memorySeconds;
+ return RMServerUtils.getOrDefault(resourceSecondsMap,
+ ResourceInformation.MEMORY_MB.getName(), 0L);
}
public long getVcoreSeconds() {
- return vcoreSeconds;
+ return RMServerUtils
+ .getOrDefault(resourceSecondsMap, ResourceInformation.VCORES.getName(),
+ 0L);
}
public long getPreemptedMemorySeconds() {
- return preemptedMemorySeconds;
+ return RMServerUtils.getOrDefault(preemptedResourceSecondsMap,
+ ResourceInformation.MEMORY_MB.getName(), 0L);
}
public long getPreemptedVcoreSeconds() {
- return preemptedVcoreSeconds;
+ return RMServerUtils.getOrDefault(preemptedResourceSecondsMap,
+ ResourceInformation.VCORES.getName(), 0L);
+ }
+
+ public Map getResourceSecondsMap() {
+ return resourceSecondsMap;
+ }
+
+ public Map getPreemptedResourceSecondsMap() {
+ return preemptedResourceSecondsMap;
}
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AggregateAppResourceUsage.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AggregateAppResourceUsage.java
index f0c2b348c32..b858712f7d6 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AggregateAppResourceUsage.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/AggregateAppResourceUsage.java
@@ -19,42 +19,38 @@
package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
+
+import java.util.HashMap;
+import java.util.Map;
@Private
public class AggregateAppResourceUsage {
- long memorySeconds;
- long vcoreSeconds;
+ private Map resourceSecondsMap = new HashMap<>();
- public AggregateAppResourceUsage(long memorySeconds, long vcoreSeconds) {
- this.memorySeconds = memorySeconds;
- this.vcoreSeconds = vcoreSeconds;
+ public AggregateAppResourceUsage(Map resourceSecondsMap) {
+ this.resourceSecondsMap.putAll(resourceSecondsMap);
}
/**
* @return the memorySeconds
*/
public long getMemorySeconds() {
- return memorySeconds;
- }
-
- /**
- * @param memorySeconds the memorySeconds to set
- */
- public void setMemorySeconds(long memorySeconds) {
- this.memorySeconds = memorySeconds;
+ return RMServerUtils.getOrDefault(resourceSecondsMap,
+ ResourceInformation.MEMORY_MB.getName(), 0L);
}
/**
* @return the vcoreSeconds
*/
public long getVcoreSeconds() {
- return vcoreSeconds;
+ return RMServerUtils
+ .getOrDefault(resourceSecondsMap, ResourceInformation.VCORES.getName(),
+ 0L);
}
- /**
- * @param vcoreSeconds the vcoreSeconds to set
- */
- public void setVcoreSeconds(long vcoreSeconds) {
- this.vcoreSeconds = vcoreSeconds;
+ public Map getResourceUsageSecondsMap() {
+ return resourceSecondsMap;
}
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index a8d92bc0e78..4997bc60f3e 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -937,12 +937,9 @@ public ApplicationResourceUsageReport getApplicationResourceUsageReport() {
}
AggregateAppResourceUsage resUsage =
this.attemptMetrics.getAggregateAppResourceUsage();
- report.setMemorySeconds(resUsage.getMemorySeconds());
- report.setVcoreSeconds(resUsage.getVcoreSeconds());
- report.setPreemptedMemorySeconds(
- this.attemptMetrics.getPreemptedMemory());
- report.setPreemptedVcoreSeconds(
- this.attemptMetrics.getPreemptedVcore());
+ report.setResourceSecondsMap(resUsage.getResourceUsageSecondsMap());
+ report.setPreemptedResourceSecondsMap(
+ this.attemptMetrics.getPreemptedResourceSecondsMap());
return report;
} finally {
this.readLock.unlock();
@@ -979,11 +976,10 @@ public void recover(RMState state) {
this.finalStatus = attemptState.getFinalApplicationStatus();
this.startTime = attemptState.getStartTime();
this.finishTime = attemptState.getFinishTime();
- this.attemptMetrics.updateAggregateAppResourceUsage(
- attemptState.getMemorySeconds(), attemptState.getVcoreSeconds());
+ this.attemptMetrics
+ .updateAggregateAppResourceUsage(attemptState.getResourceSecondsMap());
this.attemptMetrics.updateAggregatePreemptedAppResourceUsage(
- attemptState.getPreemptedMemorySeconds(),
- attemptState.getPreemptedVcoreSeconds());
+ attemptState.getPreemptedResourceSecondsMap());
}
public void transferStateFromAttempt(RMAppAttempt attempt) {
@@ -1358,16 +1354,12 @@ private void rememberTargetTransitionsAndStoreState(RMAppAttemptEvent event,
RMStateStore rmStore = rmContext.getStateStore();
setFinishTime(System.currentTimeMillis());
- ApplicationAttemptStateData attemptState =
- ApplicationAttemptStateData.newInstance(
- applicationAttemptId, getMasterContainer(),
- rmStore.getCredentialsFromAppAttempt(this),
- startTime, stateToBeStored, finalTrackingUrl, diags.toString(),
- finalStatus, exitStatus,
- getFinishTime(), resUsage.getMemorySeconds(),
- resUsage.getVcoreSeconds(),
- this.attemptMetrics.getPreemptedMemory(),
- this.attemptMetrics.getPreemptedVcore());
+ ApplicationAttemptStateData attemptState = ApplicationAttemptStateData
+ .newInstance(applicationAttemptId, getMasterContainer(),
+ rmStore.getCredentialsFromAppAttempt(this), startTime,
+ stateToBeStored, finalTrackingUrl, diags.toString(), finalStatus, exitStatus,
+ getFinishTime(), resUsage.getResourceUsageSecondsMap(),
+ this.attemptMetrics.getPreemptedResourceSecondsMap());
LOG.info("Updating application attempt " + applicationAttemptId
+ " with final state: " + targetedFinalState + ", and exit status: "
+ exitStatus);
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
index 0655609a893..0982ef93434 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptMetrics.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
+import java.util.HashMap;
+import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
@@ -25,11 +27,13 @@
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+import org.apache.commons.lang.time.DateUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
@@ -49,10 +53,8 @@
private ReadLock readLock;
private WriteLock writeLock;
- private AtomicLong finishedMemorySeconds = new AtomicLong(0);
- private AtomicLong finishedVcoreSeconds = new AtomicLong(0);
- private AtomicLong preemptedMemorySeconds = new AtomicLong(0);
- private AtomicLong preemptedVcoreSeconds = new AtomicLong(0);
+ private Map resourceUsageMap = new HashMap<>();
+ private Map preemptedResourceMap = new HashMap<>();
private RMContext rmContext;
private int[][] localityStatistics =
@@ -102,11 +104,16 @@ public Resource getResourcePreempted() {
}
public long getPreemptedMemory() {
- return preemptedMemorySeconds.get();
+ return preemptedResourceMap.get(ResourceInformation.MEMORY_MB.getName())
+ .get();
}
public long getPreemptedVcore() {
- return preemptedVcoreSeconds.get();
+ return preemptedResourceMap.get(ResourceInformation.VCORES.getName()).get();
+ }
+
+ public Map getPreemptedResourceSecondsMap() {
+ return convertAtomicLongMaptoLongMap(preemptedResourceMap);
}
public int getNumNonAMContainersPreempted() {
@@ -122,35 +129,89 @@ public boolean getIsPreempted() {
}
public AggregateAppResourceUsage getAggregateAppResourceUsage() {
- long memorySeconds = finishedMemorySeconds.get();
- long vcoreSeconds = finishedVcoreSeconds.get();
+ Map resourcesUsed =
+ convertAtomicLongMaptoLongMap(resourceUsageMap);
// Only add in the running containers if this is the active attempt.
RMApp rmApp = rmContext.getRMApps().get(attemptId.getApplicationId());
- if (null != rmApp) {
- RMAppAttempt currentAttempt = rmApp.getCurrentAppAttempt();
+ if (rmApp != null) {
+ RMAppAttempt currentAttempt = rmContext.getRMApps().get(attemptId.getApplicationId()).getCurrentAppAttempt();
if (currentAttempt.getAppAttemptId().equals(attemptId)) {
- ApplicationResourceUsageReport appResUsageReport = rmContext
- .getScheduler().getAppResourceUsageReport(attemptId);
+ ApplicationResourceUsageReport appResUsageReport =
+ rmContext.getScheduler().getAppResourceUsageReport(attemptId);
if (appResUsageReport != null) {
- memorySeconds += appResUsageReport.getMemorySeconds();
- vcoreSeconds += appResUsageReport.getVcoreSeconds();
+ Map tmp = appResUsageReport.getResourceSecondsMap();
+ for (Map.Entry entry : tmp.entrySet()) {
+ if (resourcesUsed.containsKey(entry.getKey())) {
+ Long value = resourcesUsed.get(entry.getKey());
+ value += entry.getValue();
+ resourcesUsed.put(entry.getKey(), value);
+ } else{
+ resourcesUsed.put(entry.getKey(), entry.getValue());
+ }
+ }
}
}
}
- return new AggregateAppResourceUsage(memorySeconds, vcoreSeconds);
+ return new AggregateAppResourceUsage(resourcesUsed);
+ }
+
+ public void updateAggregateAppResourceUsage(Resource allocated,
+ long deltaUsedMillis) {
+ updateUsageMap(allocated, deltaUsedMillis, resourceUsageMap);
+ }
+
+ public void updateAggregatePreemptedAppResourceUsage(Resource allocated,
+ long deltaUsedMillis) {
+ updateUsageMap(allocated, deltaUsedMillis, preemptedResourceMap);
}
- public void updateAggregateAppResourceUsage(long finishedMemorySeconds,
- long finishedVcoreSeconds) {
- this.finishedMemorySeconds.addAndGet(finishedMemorySeconds);
- this.finishedVcoreSeconds.addAndGet(finishedVcoreSeconds);
+ public void updateAggregateAppResourceUsage(
+ Map resourceSecondsMap) {
+ updateUsageMap(resourceSecondsMap, resourceUsageMap);
}
public void updateAggregatePreemptedAppResourceUsage(
- long preemptedMemorySeconds, long preemptedVcoreSeconds) {
- this.preemptedMemorySeconds.addAndGet(preemptedMemorySeconds);
- this.preemptedVcoreSeconds.addAndGet(preemptedVcoreSeconds);
+ Map preemptedResourceSecondsMap) {
+ updateUsageMap(preemptedResourceSecondsMap, preemptedResourceMap);
+ }
+
+ private void updateUsageMap(Resource allocated, long deltaUsedMillis,
+ Map targetMap) {
+ for (ResourceInformation entry : allocated.getResources()) {
+ AtomicLong resourceUsed;
+ if (!targetMap.containsKey(entry.getName())) {
+ resourceUsed = new AtomicLong(0);
+ targetMap.put(entry.getName(), resourceUsed);
+
+ }
+ resourceUsed = targetMap.get(entry.getName());
+ resourceUsed.addAndGet((entry.getValue() * deltaUsedMillis)
+ / DateUtils.MILLIS_PER_SECOND);
+ }
+ }
+
+ private void updateUsageMap(Map sourceMap,
+ Map targetMap) {
+ for (Map.Entry entry : sourceMap.entrySet()) {
+ AtomicLong resourceUsed;
+ if (!targetMap.containsKey(entry.getKey())) {
+ resourceUsed = new AtomicLong(0);
+ targetMap.put(entry.getKey(), resourceUsed);
+
+ }
+ resourceUsed = targetMap.get(entry.getKey());
+ resourceUsed.set(entry.getValue());
+ }
+ }
+
+ private Map convertAtomicLongMaptoLongMap(
+ Map source) {
+ Map ret = new HashMap<>();
+ for (Map.Entry entry : source.entrySet()) {
+ ret.put(entry.getKey(), entry.getValue().get());
+ }
+ return ret;
}
public void incNumAllocatedContainers(NodeType containerType,
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 5ed6a7537cd..a43459cfbb5 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -25,7 +25,6 @@
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
-import org.apache.commons.lang.time.DateUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -715,20 +714,15 @@ private static void updateAttemptMetrics(RMContainerImpl container) {
if (rmAttempt != null) {
long usedMillis = container.finishTime - container.creationTime;
- long memorySeconds = resource.getMemorySize()
- * usedMillis / DateUtils.MILLIS_PER_SECOND;
- long vcoreSeconds = resource.getVirtualCores()
- * usedMillis / DateUtils.MILLIS_PER_SECOND;
rmAttempt.getRMAppAttemptMetrics()
- .updateAggregateAppResourceUsage(memorySeconds,vcoreSeconds);
+ .updateAggregateAppResourceUsage(resource, usedMillis);
// If this is a preempted container, update preemption metrics
if (ContainerExitStatus.PREEMPTED == container.finishedStatus
- .getExitStatus()) {
- rmAttempt.getRMAppAttemptMetrics().updatePreemptionInfo(resource,
- container);
+ .getExitStatus()) {
rmAttempt.getRMAppAttemptMetrics()
- .updateAggregatePreemptedAppResourceUsage(memorySeconds,
- vcoreSeconds);
+ .updatePreemptionInfo(resource, container);
+ rmAttempt.getRMAppAttemptMetrics()
+ .updateAggregatePreemptedAppResourceUsage(resource, usedMillis);
}
}
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 5b6fdc65feb..abe5bc69c19 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -58,6 +58,7 @@
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent;
@@ -1321,8 +1322,8 @@ protected void rollbackContainerUpdate(
* @param container Container.
*/
public void asyncContainerRelease(RMContainer container) {
- this.rmContext.getDispatcher().getEventHandler()
- .handle(new ReleaseContainerEvent(container));
+ this.rmContext.getDispatcher().getEventHandler().handle(
+ new ReleaseContainerEvent(container));
}
@Override
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index c807590c22c..db63cd868d0 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -55,11 +55,13 @@
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.UpdateContainerError;
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
import org.apache.hadoop.yarn.server.api.ContainerType;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AggregateAppResourceUsage;
@@ -107,9 +109,7 @@
private static final long MEM_AGGREGATE_ALLOCATION_CACHE_MSECS = 3000;
protected long lastMemoryAggregateAllocationUpdateTime = 0;
- private long lastMemorySeconds = 0;
- private long lastVcoreSeconds = 0;
-
+ private Map lastResourceSecondsMap = new HashMap<>();
protected final AppSchedulingInfo appSchedulingInfo;
protected ApplicationAttemptId attemptId;
protected Map liveContainers =
@@ -1002,22 +1002,23 @@ private AggregateAppResourceUsage getRunningAggregateAppResourceUsage() {
// recently.
if ((currentTimeMillis - lastMemoryAggregateAllocationUpdateTime)
> MEM_AGGREGATE_ALLOCATION_CACHE_MSECS) {
- long memorySeconds = 0;
- long vcoreSeconds = 0;
+ Map resourceSecondsMap = new HashMap<>();
for (RMContainer rmContainer : this.liveContainers.values()) {
long usedMillis = currentTimeMillis - rmContainer.getCreationTime();
Resource resource = rmContainer.getContainer().getResource();
- memorySeconds += resource.getMemorySize() * usedMillis /
- DateUtils.MILLIS_PER_SECOND;
- vcoreSeconds += resource.getVirtualCores() * usedMillis
- / DateUtils.MILLIS_PER_SECOND;
+ for (ResourceInformation entry : resource.getResources()) {
+ long value = RMServerUtils
+ .getOrDefault(resourceSecondsMap, entry.getName(), 0L);
+ value += entry.getValue() * usedMillis
+ / DateUtils.MILLIS_PER_SECOND;
+ resourceSecondsMap.put(entry.getName(), value);
+ }
}
lastMemoryAggregateAllocationUpdateTime = currentTimeMillis;
- lastMemorySeconds = memorySeconds;
- lastVcoreSeconds = vcoreSeconds;
+ lastResourceSecondsMap = resourceSecondsMap;
}
- return new AggregateAppResourceUsage(lastMemorySeconds, lastVcoreSeconds);
+ return new AggregateAppResourceUsage(lastResourceSecondsMap);
}
public ApplicationResourceUsageReport getResourceUsageReport() {
@@ -1032,6 +1033,11 @@ public ApplicationResourceUsageReport getResourceUsageReport() {
Resource cluster = rmContext.getScheduler().getClusterResource();
ResourceCalculator calc =
rmContext.getScheduler().getResourceCalculator();
+ Map preemptedResourceSecondsMaps = new HashMap<>();
+ preemptedResourceSecondsMaps
+ .put(ResourceInformation.MEMORY_MB.getName(), 0L);
+ preemptedResourceSecondsMaps
+ .put(ResourceInformation.VCORES.getName(), 0L);
float queueUsagePerc = 0.0f;
float clusterUsagePerc = 0.0f;
if (!calc.isInvalidDivisor(cluster)) {
@@ -1041,15 +1047,15 @@ public ApplicationResourceUsageReport getResourceUsageReport() {
queueUsagePerc = calc.divide(cluster, usedResourceClone,
Resources.multiply(cluster, queueCapacityPerc)) * 100;
}
- clusterUsagePerc = calc.divide(cluster, usedResourceClone, cluster)
- * 100;
+ clusterUsagePerc =
+ calc.divide(cluster, usedResourceClone, cluster) * 100;
}
- return ApplicationResourceUsageReport.newInstance(liveContainers.size(),
- reservedContainers.size(), usedResourceClone, reservedResourceClone,
- Resources.add(usedResourceClone, reservedResourceClone),
- runningResourceUsage.getMemorySeconds(),
- runningResourceUsage.getVcoreSeconds(), queueUsagePerc,
- clusterUsagePerc, 0, 0);
+ return ApplicationResourceUsageReport
+ .newInstance(liveContainers.size(), reservedContainers.size(),
+ usedResourceClone, reservedResourceClone,
+ Resources.add(usedResourceClone, reservedResourceClone),
+ runningResourceUsage.getResourceUsageSecondsMap(), queueUsagePerc,
+ clusterUsagePerc, preemptedResourceSecondsMaps);
} finally {
writeLock.unlock();
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index d7c452a1ffc..250f4e6b9a7 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -909,7 +909,7 @@ public boolean accept(Resource cluster,
maxResourceLimit = labelManager.getResourceByLabel(
schedulerContainer.getNodePartition(), cluster);
}
- if (!Resources.fitsIn(resourceCalculator, cluster,
+ if (!Resources.fitsIn(resourceCalculator,
Resources.add(queueUsage.getUsed(partition), netAllocated),
maxResourceLimit)) {
if (LOG.isDebugEnabled()) {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
index f753d31fdbf..72dfbdd6dfb 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
@@ -548,10 +548,7 @@ private ContainerAllocation assignContainer(Resource clusterResource,
toKillContainers.add(killableContainer);
Resources.addTo(availableAndKillable,
killableContainer.getAllocatedResource());
- if (Resources.fitsIn(rc,
- clusterResource,
- capability,
- availableAndKillable)) {
+ if (Resources.fitsIn(rc, capability, availableAndKillable)) {
// Stop if we find enough spaces
availableContainers = 1;
break;
@@ -579,8 +576,7 @@ private ContainerAllocation assignContainer(Resource clusterResource,
// under the limit.
resourceNeedToUnReserve = capability;
}
- unreservedContainer =
- application.findNodeToUnreserve(clusterResource, node,
+ unreservedContainer = application.findNodeToUnreserve(node,
schedulerKey, resourceNeedToUnReserve);
// When (minimum-unreserved-resource > 0 OR we cannot allocate
// new/reserved
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 17bb104605d..a12c5ec7f68 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -314,7 +314,6 @@ private boolean anyContainerInFinalState(
}
private boolean commonCheckContainerAllocation(
- Resource cluster,
ContainerAllocationProposal allocation,
SchedulerContainer schedulerContainer) {
// Make sure node is not reserved by anyone else
@@ -355,8 +354,7 @@ private boolean commonCheckContainerAllocation(
}
}
}
- if (!Resources.fitsIn(rc, cluster,
- allocation.getAllocatedOrReservedResource(),
+ if (!Resources.fitsIn(rc, allocation.getAllocatedOrReservedResource(),
availableResource)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Node doesn't have enough available resource, asked="
@@ -419,8 +417,7 @@ public boolean accept(Resource cluster,
// Common part of check container allocation regardless if it is a
// increase container or regular container
- commonCheckContainerAllocation(cluster, allocation,
- schedulerContainer);
+ commonCheckContainerAllocation(allocation, schedulerContainer);
} else {
// Container reserved first time will be NEW, after the container
// accepted & confirmed, it will become RESERVED state
@@ -721,9 +718,8 @@ public Allocation getAllocation(ResourceCalculator resourceCalculator,
}
@VisibleForTesting
- public NodeId getNodeIdToUnreserve(
- SchedulerRequestKey schedulerKey, Resource resourceNeedUnreserve,
- ResourceCalculator rc, Resource clusterResource) {
+ public NodeId getNodeIdToUnreserve(SchedulerRequestKey schedulerKey,
+ Resource resourceNeedUnreserve, ResourceCalculator resourceCalculator) {
// first go around make this algorithm simple and just grab first
// reservation that has enough resources
Map reservedContainers = this.reservedContainers.get(
@@ -738,7 +734,7 @@ public NodeId getNodeIdToUnreserve(
// make sure we unreserve one with at least the same amount of
// resources, otherwise could affect capacity limits
- if (Resources.fitsIn(rc, clusterResource, resourceNeedUnreserve,
+ if (Resources.fitsIn(resourceCalculator, resourceNeedUnreserve,
reservedResource)) {
if (LOG.isDebugEnabled()) {
LOG.debug(
@@ -806,14 +802,13 @@ public void reserve(SchedulerRequestKey schedulerKey, FiCaSchedulerNode node,
}
@VisibleForTesting
- public RMContainer findNodeToUnreserve(Resource clusterResource,
- FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
- Resource minimumUnreservedResource) {
+ public RMContainer findNodeToUnreserve(FiCaSchedulerNode node,
+ SchedulerRequestKey schedulerKey, Resource minimumUnreservedResource) {
try {
readLock.lock();
// need to unreserve some other container first
NodeId idToUnreserve = getNodeIdToUnreserve(schedulerKey,
- minimumUnreservedResource, rc, clusterResource);
+ minimumUnreservedResource, rc);
if (idToUnreserve == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("checked to see if could unreserve for app but nothing "
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index 352b58d17b2..724e449632d 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -30,7 +30,6 @@
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.security.AccessType;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.util.resource.Resources;
@@ -47,7 +46,7 @@
// Maximum amount of resources for each queue's ad hoc children
private final Map maxChildQueueResources;
// Sharing weights for each queue
- private final Map queueWeights;
+ private final Map queueWeights;
// Max concurrent running applications for each queue and for each user; in addition,
// for users that have no max specified, we use the userMaxJobsDefault.
@@ -109,7 +108,7 @@ public AllocationConfiguration(Map minQueueResources,
Map maxQueueResources,
Map maxChildQueueResources,
Map queueMaxApps, Map userMaxApps,
- Map queueWeights,
+ Map queueWeights,
Map queueMaxAMShares, int userMaxAppsDefault,
int queueMaxAppsDefault, ConfigurableResource queueMaxResourcesDefault,
float queueMaxAMShareDefault,
@@ -249,9 +248,9 @@ public boolean isPreemptable(String queueName) {
return !nonPreemptableQueues.contains(queueName);
}
- private ResourceWeights getQueueWeight(String queue) {
- ResourceWeights weight = queueWeights.get(queue);
- return (weight == null) ? ResourceWeights.NEUTRAL : weight;
+ private float getQueueWeight(String queue) {
+ Float weight = queueWeights.get(queue);
+ return (weight == null) ? 1.0f : weight;
}
public int getUserMaxApps(String user) {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index 16374e41c84..597af9411c0 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -45,7 +45,6 @@
import org.apache.hadoop.yarn.security.Permission;
import org.apache.hadoop.yarn.security.PrivilegedEntity;
import org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.util.Clock;
@@ -232,7 +231,7 @@ public synchronized void reloadAllocations() throws IOException,
Map queueMaxApps = new HashMap<>();
Map userMaxApps = new HashMap<>();
Map queueMaxAMShares = new HashMap<>();
- Map queueWeights = new HashMap<>();
+ Map queueWeights = new HashMap<>();
Map queuePolicies = new HashMap<>();
Map minSharePreemptionTimeouts = new HashMap<>();
Map fairSharePreemptionTimeouts = new HashMap<>();
@@ -455,7 +454,7 @@ private void loadQueue(String parentName, Element element,
Map queueMaxApps,
Map userMaxApps,
Map queueMaxAMShares,
- Map queueWeights,
+ Map queueWeights,
Map queuePolicies,
Map minSharePreemptionTimeouts,
Map fairSharePreemptionTimeouts,
@@ -523,7 +522,7 @@ private void loadQueue(String parentName, Element element,
} else if ("weight".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
double val = Double.parseDouble(text);
- queueWeights.put(queueName, new ResourceWeights((float)val));
+ queueWeights.put(queueName, (float)val);
} else if ("minSharePreemptionTimeout".equals(field.getTagName())) {
String text = ((Text)field.getFirstChild()).getData().trim();
long val = Long.parseLong(text) * 1000L;
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 309dff488f0..30245586a0d 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -43,7 +43,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger;
import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
@@ -75,7 +74,6 @@
private final long startTime;
private final Priority appPriority;
- private final ResourceWeights resourceWeights;
private Resource demand = Resources.createResource(0);
private final FairScheduler scheduler;
private Resource fairShare = Resources.createResource(0, 0);
@@ -120,11 +118,6 @@ public FSAppAttempt(FairScheduler scheduler,
this.startTime = scheduler.getClock().getTime();
this.lastTimeAtFairShare = this.startTime;
this.appPriority = Priority.newInstance(1);
- this.resourceWeights = new ResourceWeights();
- }
-
- ResourceWeights getResourceWeights() {
- return resourceWeights;
}
/**
@@ -1281,7 +1274,7 @@ public Resource getResourceUsage() {
}
@Override
- public ResourceWeights getWeights() {
+ public float getWeight() {
return scheduler.getAppWeight(this);
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 301147c3763..d0f3d8e846f 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -37,7 +37,6 @@
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils;
@@ -553,7 +552,7 @@ public void recoverContainer(Resource clusterResource,
* @param weight queue weight
*/
public void setWeights(float weight) {
- this.weights = new ResourceWeights(weight);
+ this.weights = weight;
}
/**
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index 9d07c796e58..e0df480dbd1 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -42,7 +42,6 @@
import org.apache.hadoop.yarn.security.PrivilegedEntity;
import org.apache.hadoop.yarn.security.PrivilegedEntity.EntityType;
import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.util.resource.Resources;
@@ -70,7 +69,7 @@
protected SchedulingPolicy policy = SchedulingPolicy.DEFAULT_POLICY;
- protected ResourceWeights weights;
+ protected float weights;
protected Resource minShare;
private ConfigurableResource maxShare;
protected int maxRunningApps;
@@ -140,12 +139,12 @@ public void setPolicy(SchedulingPolicy policy) {
this.policy = policy;
}
- public void setWeights(ResourceWeights weights){
+ public void setWeights(float weights) {
this.weights = weights;
}
@Override
- public ResourceWeights getWeights() {
+ public float getWeight() {
return weights;
}
@@ -448,7 +447,7 @@ public boolean isActive() {
@Override
public String toString() {
return String.format("[%s, demand=%s, running=%s, share=%s, w=%s]",
- getName(), getDemand(), getResourceUsage(), fairShare, getWeights());
+ getName(), getDemand(), getResourceUsage(), fairShare, getWeight());
}
@Override
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 81f6f9dd86b..f9bc1fc5c37 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -54,7 +54,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.RMCriticalThreadUncaughtExceptionHandler;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationConstants;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
@@ -369,7 +368,7 @@ public void update() {
return rmContext.getContainerTokenSecretManager();
}
- public ResourceWeights getAppWeight(FSAppAttempt app) {
+ public float getAppWeight(FSAppAttempt app) {
try {
readLock.lock();
double weight = 1.0;
@@ -377,14 +376,10 @@ public ResourceWeights getAppWeight(FSAppAttempt app) {
// Set weight based on current memory demand
weight = Math.log1p(app.getDemand().getMemorySize()) / Math.log(2);
}
- weight *= app.getPriority().getPriority();
- ResourceWeights resourceWeights = app.getResourceWeights();
- resourceWeights.setWeight((float) weight);
- return resourceWeights;
+ return (float)weight * app.getPriority().getPriority();
} finally {
readLock.unlock();
}
-
}
public Resource getIncrementResourceCapability() {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
index fcdc056577e..4d6af982ae3 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
@@ -22,7 +22,6 @@
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
/**
* A Schedulable represents an entity that can be scheduled such as an
@@ -72,8 +71,15 @@
/** Maximum Resource share assigned to the schedulable. */
Resource getMaxShare();
- /** Job/queue weight in fair sharing. */
- ResourceWeights getWeights();
+ /**
+ * Job/queue weight in fair sharing. Weights are only meaningful when
+ * compared. A weight of 2.0f has twice the weight of a weight of 1.0f,
+ * which has twice the weight of a weight of 0.5f. A weight of 1.0f is
+ * considered unweighted or a neutral weight. A weight of 0 is no weight.
+ *
+ * @return the weight
+ */
+ float getWeight();
/** Start time for jobs in FIFO queues; meaningless for QueueSchedulables.*/
long getStartTime();
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java
index 440c73cefdd..0a21b026714 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java
@@ -21,7 +21,6 @@
import java.util.Collection;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
@@ -47,7 +46,7 @@
*/
public static void computeShares(
Collection extends Schedulable> schedulables, Resource totalResources,
- ResourceType type) {
+ String type) {
computeSharesInternal(schedulables, totalResources, type, false);
}
@@ -62,7 +61,7 @@ public static void computeShares(
*/
public static void computeSteadyShares(
Collection extends FSQueue> queues, Resource totalResources,
- ResourceType type) {
+ String type) {
computeSharesInternal(queues, totalResources, type, true);
}
@@ -110,9 +109,9 @@ public static void computeSteadyShares(
*/
private static void computeSharesInternal(
Collection extends Schedulable> allSchedulables,
- Resource totalResources, ResourceType type, boolean isSteadyShare) {
+ Resource totalResources, String type, boolean isSteadyShare) {
- Collection schedulables = new ArrayList();
+ Collection schedulables = new ArrayList<>();
int takenResources = handleFixedFairShares(
allSchedulables, schedulables, isSteadyShare, type);
@@ -124,7 +123,7 @@ private static void computeSharesInternal(
// have met all Schedulables' max shares.
int totalMaxShare = 0;
for (Schedulable sched : schedulables) {
- long maxShare = getResourceValue(sched.getMaxShare(), type);
+ long maxShare = sched.getMaxShare().getResourceValue(type);
totalMaxShare = (int) Math.min(maxShare + (long)totalMaxShare,
Integer.MAX_VALUE);
if (totalMaxShare == Integer.MAX_VALUE) {
@@ -132,7 +131,7 @@ private static void computeSharesInternal(
}
}
- long totalResource = Math.max((getResourceValue(totalResources, type) -
+ long totalResource = Math.max((totalResources.getResourceValue(type) -
takenResources), 0);
totalResource = Math.min(totalMaxShare, totalResource);
@@ -159,13 +158,15 @@ private static void computeSharesInternal(
}
// Set the fair shares based on the value of R we've converged to
for (Schedulable sched : schedulables) {
+ Resource target;
+
if (isSteadyShare) {
- setResourceValue(computeShare(sched, right, type),
- ((FSQueue) sched).getSteadyFairShare(), type);
+ target = ((FSQueue) sched).getSteadyFairShare();
} else {
- setResourceValue(
- computeShare(sched, right, type), sched.getFairShare(), type);
+ target = sched.getFairShare();
}
+
+ target.setResourceValue(type, (long)computeShare(sched, right, type));
}
}
@@ -174,7 +175,7 @@ private static void computeSharesInternal(
* w2rRatio, for use in the computeFairShares algorithm as described in #
*/
private static int resourceUsedWithWeightToResourceRatio(double w2rRatio,
- Collection extends Schedulable> schedulables, ResourceType type) {
+ Collection extends Schedulable> schedulables, String type) {
int resourcesTaken = 0;
for (Schedulable sched : schedulables) {
int share = computeShare(sched, w2rRatio, type);
@@ -188,10 +189,10 @@ private static int resourceUsedWithWeightToResourceRatio(double w2rRatio,
* weight-to-resource ratio w2rRatio.
*/
private static int computeShare(Schedulable sched, double w2rRatio,
- ResourceType type) {
- double share = sched.getWeights().getWeight(type) * w2rRatio;
- share = Math.max(share, getResourceValue(sched.getMinShare(), type));
- share = Math.min(share, getResourceValue(sched.getMaxShare(), type));
+ String type) {
+ double share = sched.getWeight() * w2rRatio;
+ share = Math.max(share, sched.getMinShare().getResourceValue(type));
+ share = Math.min(share, sched.getMaxShare().getResourceValue(type));
return (int) share;
}
@@ -203,7 +204,7 @@ private static int computeShare(Schedulable sched, double w2rRatio,
private static int handleFixedFairShares(
Collection extends Schedulable> schedulables,
Collection nonFixedSchedulables,
- boolean isSteadyShare, ResourceType type) {
+ boolean isSteadyShare, String type) {
int totalResource = 0;
for (Schedulable sched : schedulables) {
@@ -211,11 +212,15 @@ private static int handleFixedFairShares(
if (fixedShare < 0) {
nonFixedSchedulables.add(sched);
} else {
- setResourceValue(fixedShare,
- isSteadyShare
- ? ((FSQueue)sched).getSteadyFairShare()
- : sched.getFairShare(),
- type);
+ Resource target;
+
+ if (isSteadyShare) {
+ target = ((FSQueue)sched).getSteadyFairShare();
+ } else {
+ target = sched.getFairShare();
+ }
+
+ target.setResourceValue(type, fixedShare);
totalResource = (int) Math.min((long)totalResource + (long)fixedShare,
Integer.MAX_VALUE);
}
@@ -230,10 +235,10 @@ private static int handleFixedFairShares(
* or the Schedulable is not active for instantaneous fairshare.
*/
private static long getFairShareIfFixed(Schedulable sched,
- boolean isSteadyShare, ResourceType type) {
+ boolean isSteadyShare, String type) {
// Check if maxShare is 0
- if (getResourceValue(sched.getMaxShare(), type) <= 0) {
+ if (sched.getMaxShare().getResourceValue(type) <= 0) {
return 0;
}
@@ -244,35 +249,11 @@ private static long getFairShareIfFixed(Schedulable sched,
}
// Check if weight is 0
- if (sched.getWeights().getWeight(type) <= 0) {
- long minShare = getResourceValue(sched.getMinShare(), type);
+ if (sched.getWeight() <= 0) {
+ long minShare = sched.getMinShare().getResourceValue(type);
return (minShare <= 0) ? 0 : minShare;
}
return -1;
}
-
- private static long getResourceValue(Resource resource, ResourceType type) {
- switch (type) {
- case MEMORY:
- return resource.getMemorySize();
- case CPU:
- return resource.getVirtualCores();
- default:
- throw new IllegalArgumentException("Invalid resource");
- }
- }
-
- private static void setResourceValue(long val, Resource resource, ResourceType type) {
- switch (type) {
- case MEMORY:
- resource.setMemorySize(val);
- break;
- case CPU:
- resource.setVirtualCores((int)val);
- break;
- default:
- throw new IllegalArgumentException("Invalid resource");
- }
- }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
index 72377b0c096..e58b3572968 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Comparator;
@@ -25,18 +26,15 @@
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
-
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.Resources;
-
-import static org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType.*;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
/**
* Makes scheduling decisions by trying to equalize dominant resource usage.
@@ -72,16 +70,18 @@ public ResourceCalculator getResourceCalculator() {
@Override
public void computeShares(Collection extends Schedulable> schedulables,
Resource totalResources) {
- for (ResourceType type : ResourceType.values()) {
- ComputeFairShares.computeShares(schedulables, totalResources, type);
+ for (ResourceInformation info: ResourceUtils.getResourceTypesArray()) {
+ ComputeFairShares.computeShares(schedulables, totalResources,
+ info.getName());
}
}
@Override
public void computeSteadyShares(Collection extends FSQueue> queues,
Resource totalResources) {
- for (ResourceType type : ResourceType.values()) {
- ComputeFairShares.computeSteadyShares(queues, totalResources, type);
+ for (ResourceInformation info: ResourceUtils.getResourceTypesArray()) {
+ ComputeFairShares.computeSteadyShares(queues, totalResources,
+ info.getName());
}
}
@@ -110,9 +110,13 @@ public void initialize(FSContext fsContext) {
COMPARATOR.setFSContext(fsContext);
}
- public static class DominantResourceFairnessComparator implements Comparator {
- private static final int NUM_RESOURCES = ResourceType.values().length;
-
+ /**
+ * This class compares two {@link Schedulable} instances according to the
+ * DRF policy. If neither instance is below min share, approximate fair share
+ * ratios are compared.
+ */
+ public static class DominantResourceFairnessComparator
+ implements Comparator {
private FSContext fsContext;
public void setFSContext(FSContext fsContext) {
@@ -121,89 +125,199 @@ public void setFSContext(FSContext fsContext) {
@Override
public int compare(Schedulable s1, Schedulable s2) {
- ResourceWeights sharesOfCluster1 = new ResourceWeights();
- ResourceWeights sharesOfCluster2 = new ResourceWeights();
- ResourceWeights sharesOfMinShare1 = new ResourceWeights();
- ResourceWeights sharesOfMinShare2 = new ResourceWeights();
- ResourceType[] resourceOrder1 = new ResourceType[NUM_RESOURCES];
- ResourceType[] resourceOrder2 = new ResourceType[NUM_RESOURCES];
+ ResourceInformation[] info = ResourceUtils.getResourceTypesArray();
+ Resource usage1 = s1.getResourceUsage();
+ Resource usage2 = s2.getResourceUsage();
+ Resource minShare1 = s1.getMinShare();
+ Resource minShare2 = s2.getMinShare();
Resource clusterCapacity = fsContext.getClusterResource();
- // Calculate shares of the cluster for each resource both schedulables.
- calculateShares(s1.getResourceUsage(),
- clusterCapacity, sharesOfCluster1, resourceOrder1, s1.getWeights());
- calculateShares(s1.getResourceUsage(),
- s1.getMinShare(), sharesOfMinShare1, null, ResourceWeights.NEUTRAL);
- calculateShares(s2.getResourceUsage(),
- clusterCapacity, sharesOfCluster2, resourceOrder2, s2.getWeights());
- calculateShares(s2.getResourceUsage(),
- s2.getMinShare(), sharesOfMinShare2, null, ResourceWeights.NEUTRAL);
-
+ // These arrays hold the usage, fair, and min share ratios for each
+ // resource type. ratios[0][x] are the usage ratios, ratios[1][x] are
+ // the fair share ratios, and ratios[2][x] are the min share ratios.
+ float[][] ratios1 = new float[info.length][3];
+ float[][] ratios2 = new float[info.length][3];
+
+ // Calculate cluster shares and approximate fair shares for each
+ // resource type of both schedulables.
+ int dominant1 = calculateClusterAndFairRatios(usage1, clusterCapacity,
+ ratios1, s1.getWeight());
+ int dominant2 = calculateClusterAndFairRatios(usage2, clusterCapacity,
+ ratios2, s2.getWeight());
+
// A queue is needy for its min share if its dominant resource
- // (with respect to the cluster capacity) is below its configured min share
- // for that resource
- boolean s1Needy = sharesOfMinShare1.getWeight(resourceOrder1[0]) < 1.0f;
- boolean s2Needy = sharesOfMinShare2.getWeight(resourceOrder2[0]) < 1.0f;
+ // (with respect to the cluster capacity) is below its configured min
+ // share for that resource
+ boolean s1Needy =
+ usage1.getResources()[dominant1].getValue() <
+ minShare1.getResources()[dominant1].getValue();
+ boolean s2Needy =
+ usage2.getResources()[dominant2].getValue() <
+ minShare2.getResources()[dominant2].getValue();
int res = 0;
+
if (!s2Needy && !s1Needy) {
- res = compareShares(sharesOfCluster1, sharesOfCluster2,
- resourceOrder1, resourceOrder2);
+ // Sort shares by usage ratio and compare them by approximate fair share
+ // ratio
+ sortRatios(ratios1, ratios2);
+ res = compareRatios(ratios1, ratios2, 1);
} else if (s1Needy && !s2Needy) {
res = -1;
} else if (s2Needy && !s1Needy) {
res = 1;
} else { // both are needy below min share
- res = compareShares(sharesOfMinShare1, sharesOfMinShare2,
- resourceOrder1, resourceOrder2);
+ // Calculate the min share ratios, then sort by usage ratio, and compare
+ // by min share ratio
+ calculateMinShareRatios(usage1, minShare1, ratios1);
+ calculateMinShareRatios(usage2, minShare2, ratios2);
+ sortRatios(ratios1, ratios2);
+ res = compareRatios(ratios1, ratios2, 2);
}
+
if (res == 0) {
// Apps are tied in fairness ratio. Break the tie by submit time and job
// name to get a deterministic ordering, which is useful for unit tests.
res = (int) Math.signum(s1.getStartTime() - s2.getStartTime());
+
if (res == 0) {
res = s1.getName().compareTo(s2.getName());
}
}
+
return res;
}
-
+
+ /**
+ * Sort both ratios arrays according to the usage ratios (the
+ * first index of the inner arrays, e.g. {@code ratios1[x][0]}).
+ *
+ * @param ratios1 the first ratios array
+ * @param ratios2 the second ratios array
+ */
+ @VisibleForTesting
+ void sortRatios(float[][] ratios1, float[][]ratios2) {
+ // sort order descending by resource share
+ Arrays.sort(ratios1, (float[] o1, float[] o2) ->
+ (int) Math.signum(o2[0] - o1[0]));
+ Arrays.sort(ratios2, (float[] o1, float[] o2) ->
+ (int) Math.signum(o2[0] - o1[0]));
+ }
+
/**
- * Calculates and orders a resource's share of a pool in terms of two vectors.
- * The shares vector contains, for each resource, the fraction of the pool that
- * it takes up. The resourceOrder vector contains an ordering of resources
- * by largest share. So if resource=<10 MB, 5 CPU>, and pool=<100 MB, 10 CPU>,
- * shares will be [.1, .5] and resourceOrder will be [CPU, MEMORY].
+ * Calculate a resource's usage ratio and approximate fair share ratio.
+ * The {@code shares} array will be populated with both the usage ratio
+ * and the approximate fair share ratio for each resource type. The usage
+ * ratio is calculated as {@code resource} divided by {@code cluster}.
+ * The approximate fair share ratio is calculated as the usage ratio
+ * divided by {@code weight}. If the cluster's resources are 100MB and
+ * 10 vcores, and the usage ({@code resource}) is 10 MB and 5 CPU, the
+ * usage ratios will be 0.1 and 0.5. If the weights are 2, the fair
+ * share ratios will be 0.05 and 0.25.
+ *
+ * The approximate fair share ratio is the usage divided by the
+ * approximate fair share, i.e. the cluster resources times the weight.
+ * The approximate fair share is an acceptable proxy for the fair share
+ * because when comparing resources, the resource with the higher weight
+ * will be assigned by the scheduler a proportionally higher fair share.
+ *
+ * The {@code shares} array must be at least n x 2, where n
+ * is the number of resource types. Only the first and second indices of
+ * the inner arrays in the {@code shares} array will be used, e.g.
+ * {@code shares[x][0]} and {@code shares[x][1]}.
+ *
+ * The return value will be the index of the dominant resource type in the
+ * {@code shares} array. The dominant resource is the resource type for
+ * which {@code resource} has the largest usage ratio.
+ *
+ * @param resource the resource for which to calculate ratios
+ * @param cluster the total cluster resources
+ * @param ratios the shares array to populate
+ * @param weight the resource weight
+ * @return the index of the resource type with the largest cluster share
*/
@VisibleForTesting
- void calculateShares(Resource resource, Resource pool,
- ResourceWeights shares, ResourceType[] resourceOrder, ResourceWeights weights) {
- shares.setWeight(MEMORY, (float)resource.getMemorySize() /
- (pool.getMemorySize() * weights.getWeight(MEMORY)));
- shares.setWeight(CPU, (float)resource.getVirtualCores() /
- (pool.getVirtualCores() * weights.getWeight(CPU)));
- // sort order vector by resource share
- if (resourceOrder != null) {
- if (shares.getWeight(MEMORY) > shares.getWeight(CPU)) {
- resourceOrder[0] = MEMORY;
- resourceOrder[1] = CPU;
- } else {
- resourceOrder[0] = CPU;
- resourceOrder[1] = MEMORY;
+ int calculateClusterAndFairRatios(Resource resource, Resource cluster,
+ float[][] ratios, float weight) {
+ ResourceInformation[] resourceInfo = resource.getResources();
+ ResourceInformation[] clusterInfo = cluster.getResources();
+ int max = 0;
+
+ for (int i = 0; i < clusterInfo.length; i++) {
+ // First calculate the cluster share
+ ratios[i][0] =
+ resourceInfo[i].getValue() / (float) clusterInfo[i].getValue();
+
+ // Use the cluster share to find the dominant resource
+ if (ratios[i][0] > ratios[max][0]) {
+ max = i;
}
+
+ // Now divide by the weight to get the approximate fair share.
+ // It's OK if the weight is zero, because the floating point division
+ // will yield Infinity, i.e. this Schedulable will lose out to any
+ // other Schedulable with non-zero weight.
+ ratios[i][1] = ratios[i][0] / weight;
}
+
+ return max;
}
- private int compareShares(ResourceWeights shares1, ResourceWeights shares2,
- ResourceType[] resourceOrder1, ResourceType[] resourceOrder2) {
- for (int i = 0; i < resourceOrder1.length; i++) {
- int ret = (int)Math.signum(shares1.getWeight(resourceOrder1[i])
- - shares2.getWeight(resourceOrder2[i]));
+ /**
+ * Calculate a resource's min share ratios. The {@code ratios} array will be
+ * populated with the {@code resource} divided by {@code minShare} for each
+ * resource type. If the min shares are 5 MB and 10 vcores, and the usage
+ * ({@code resource}) is 10 MB and 5 CPU, the ratios will be 2 and 0.5.
+ *
+ * The {@code ratios} array must be n x 3, where n is the
+ * number of resource types. Only the third index of the inner arrays in
+ * the {@code ratios} array will be used, e.g. {@code ratios[x][2]}.
+ *
+ * @param resource the resource for which to calculate min shares
+ * @param minShare the min share
+ * @param ratios the shares array to populate
+ */
+ @VisibleForTesting
+ void calculateMinShareRatios(Resource resource, Resource minShare,
+ float[][] ratios) {
+ ResourceInformation[] resourceInfo = resource.getResources();
+ ResourceInformation[] minShareInfo = minShare.getResources();
+
+ for (int i = 0; i < minShareInfo.length; i++) {
+ ratios[i][2] =
+ resourceInfo[i].getValue() / (float) minShareInfo[i].getValue();
+ }
+ }
+
+ /**
+ * Compare the two ratios arrays and return -1, 0, or 1 if the first array
+ * is less than, equal to, or greater than the second array, respectively.
+ * The {@code index} parameter determines which index of the inner arrays
+ * will be used for the comparisons. 0 is for usage ratios, 1 is for
+ * fair share ratios, and 2 is for the min share ratios. The ratios arrays
+ * are assumed to be sorted in descending order by usage ratio.
+ *
+ * @param ratios1 the first shares array
+ * @param ratios2 the second shares array
+ * @param index the outer index of the ratios arrays to compare. 0 is for
+ * usage ratio, 1 is for approximate fair share ratios, and 1 is for min
+ * share ratios
+ * @return -1, 0, or 1 if the first array is less than, equal to, or
+ * greater than the second array, respectively
+ */
+ @VisibleForTesting
+ int compareRatios(float[][] ratios1, float[][] ratios2, int index) {
+ int ret = 0;
+
+ for (int i = 0; i < ratios1.length; i++) {
+ ret = (int) Math.signum(ratios1[i][index] - ratios2[i][index]);
+
if (ret != 0) {
- return ret;
+ break;
}
}
- return 0;
+
+ return ret;
}
}
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index 0ef90a1d72f..8179aa75033 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
@@ -42,9 +42,10 @@
@Private
@Unstable
public class FairSharePolicy extends SchedulingPolicy {
- private static final Log LOG = LogFactory.getLog(FairSharePolicy.class);
@VisibleForTesting
public static final String NAME = "fair";
+ private static final Log LOG = LogFactory.getLog(FairSharePolicy.class);
+ private static final String MEMORY = ResourceInformation.MEMORY_MB.getName();
private static final DefaultResourceCalculator RESOURCE_CALCULATOR =
new DefaultResourceCalculator();
private static final FairShareComparator COMPARATOR =
@@ -164,10 +165,11 @@ private int compareMinShareUsage(Schedulable s1, Schedulable s2,
*/
private int compareFairShareUsage(Schedulable s1, Schedulable s2,
Resource resourceUsage1, Resource resourceUsage2) {
- double weight1 = s1.getWeights().getWeight(ResourceType.MEMORY);
- double weight2 = s2.getWeights().getWeight(ResourceType.MEMORY);
+ double weight1 = s1.getWeight();
+ double weight2 = s2.getWeight();
double useToWeightRatio1;
double useToWeightRatio2;
+
if (weight1 > 0.0 && weight2 > 0.0) {
useToWeightRatio1 = resourceUsage1.getMemorySize() / weight1;
useToWeightRatio2 = resourceUsage2.getMemorySize() / weight2;
@@ -213,14 +215,13 @@ public Resource getHeadroom(Resource queueFairShare,
@Override
public void computeShares(Collection extends Schedulable> schedulables,
Resource totalResources) {
- ComputeFairShares.computeShares(schedulables, totalResources, ResourceType.MEMORY);
+ ComputeFairShares.computeShares(schedulables, totalResources, MEMORY);
}
@Override
public void computeSteadyShares(Collection extends FSQueue> queues,
Resource totalResources) {
- ComputeFairShares.computeSteadyShares(queues, totalResources,
- ResourceType.MEMORY);
+ ComputeFairShares.computeSteadyShares(queues, totalResources, MEMORY);
}
@Override
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
index f6b1a943a60..806b6364099 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java
@@ -19,17 +19,21 @@
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import com.google.inject.Inject;
+import java.util.Arrays;
+
/**
* Provides an table with an overview of many cluster wide metrics and if
* per user metrics are enabled it will show an overview of what the
@@ -168,8 +172,8 @@ protected void render(Block html) {
}
}
-
- SchedulerInfo schedulerInfo=new SchedulerInfo(this.rm);
+
+ SchedulerInfo schedulerInfo = new SchedulerInfo(this.rm);
div.h3("Scheduler Metrics").
table("#schedulermetricsoverview").
@@ -186,7 +190,8 @@ protected void render(Block html) {
tbody().$class("ui-widget-content").
tr().
td(String.valueOf(schedulerInfo.getSchedulerType())).
- td(String.valueOf(schedulerInfo.getSchedulerResourceTypes())).
+ td(String.valueOf(Arrays.toString(ResourceUtils.getResourcesTypeInfo()
+ .toArray(new ResourceTypeInfo[0])))).
td(schedulerInfo.getMinAllocation().toString()).
td(schedulerInfo.getMaxAllocation().toString()).
td(String.valueOf(schedulerInfo.getMaxClusterLevelAppPriority())).
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
index e5e31e0640b..8553d8cde28 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
@@ -44,6 +44,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
import org.apache.hadoop.yarn.server.webapp.AppBlock;
+import org.apache.hadoop.yarn.util.StringHelper;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
@@ -106,15 +107,12 @@ protected void createApplicationMetricsTable(Block html){
attemptResourcePreempted)
.__("Number of Non-AM Containers Preempted from Current Attempt:",
attemptNumNonAMContainerPreempted)
- .__("Aggregate Resource Allocation:",
- String.format("%d MB-seconds, %d vcore-seconds",
- appMetrics == null ? "N/A" : appMetrics.getMemorySeconds(),
- appMetrics == null ? "N/A" : appMetrics.getVcoreSeconds()))
+ .__("Aggregate Resource Allocation:", appMetrics == null ? "N/A" :
+ StringHelper
+ .getResourceSecondsString(appMetrics.getResourceSecondsMap()))
.__("Aggregate Preempted Resource Allocation:",
- String.format("%d MB-seconds, %d vcore-seconds",
- appMetrics == null ? "N/A" : appMetrics.getPreemptedMemorySeconds(),
- appMetrics == null ? "N/A" :
- appMetrics.getPreemptedVcoreSeconds()));
+ appMetrics == null ? "N/A" : StringHelper.getResourceSecondsString(
+ appMetrics.getPreemptedResourceSecondsMap()));
pdiv.__();
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index 9fb8fb5858d..236c4677653 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -101,6 +101,7 @@
private long vcoreSeconds;
protected float queueUsagePercentage;
protected float clusterUsagePercentage;
+ protected Map resourceSecondsMap;
// preemption info fields
private long preemptedResourceMB;
@@ -109,6 +110,7 @@
private int numAMContainerPreempted;
private long preemptedMemorySeconds;
private long preemptedVcoreSeconds;
+ protected Map preemptedResourceSecondsMap;
// list of resource requests
@XmlElement(name = "resourceRequests")
@@ -236,8 +238,10 @@ public AppInfo(ResourceManager rm, RMApp app, Boolean hasAccess,
appMetrics.getResourcePreempted().getVirtualCores();
memorySeconds = appMetrics.getMemorySeconds();
vcoreSeconds = appMetrics.getVcoreSeconds();
+ resourceSecondsMap = appMetrics.getResourceSecondsMap();
preemptedMemorySeconds = appMetrics.getPreemptedMemorySeconds();
preemptedVcoreSeconds = appMetrics.getPreemptedVcoreSeconds();
+ preemptedResourceSecondsMap = appMetrics.getPreemptedResourceSecondsMap();
ApplicationSubmissionContext appSubmissionContext =
app.getApplicationSubmissionContext();
unmanagedApplication = appSubmissionContext.getUnmanagedAM();
@@ -415,6 +419,22 @@ public long getReservedVCores() {
return this.reservedVCores;
}
+ public long getPreemptedMB() {
+ return preemptedResourceMB;
+ }
+
+ public long getPreemptedVCores() {
+ return preemptedResourceVCores;
+ }
+
+ public int getNumNonAMContainersPreempted() {
+ return numNonAMContainerPreempted;
+ }
+
+ public int getNumAMContainersPreempted() {
+ return numAMContainerPreempted;
+ }
+
public long getMemorySeconds() {
return memorySeconds;
}
@@ -423,6 +443,10 @@ public long getVcoreSeconds() {
return vcoreSeconds;
}
+ public Map getResourceSecondsMap() {
+ return resourceSecondsMap;
+ }
+
public long getPreemptedMemorySeconds() {
return preemptedMemorySeconds;
}
@@ -431,6 +455,10 @@ public long getPreemptedVcoreSeconds() {
return preemptedVcoreSeconds;
}
+ public Map getPreemptedResourceSecondsMap() {
+ return preemptedResourceSecondsMap;
+ }
+
public List getResourceRequests() {
return this.resourceRequests;
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
index 5083943b65a..e13980afc39 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
@@ -20,46 +20,68 @@
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.resource.Resources;
@XmlRootElement
-@XmlAccessorType(XmlAccessType.FIELD)
+@XmlAccessorType(XmlAccessType.NONE)
public class ResourceInfo {
+
+ @XmlElement
long memory;
+ @XmlElement
int vCores;
-
+
+ private Resource resources;
+
public ResourceInfo() {
}
public ResourceInfo(Resource res) {
memory = res.getMemorySize();
vCores = res.getVirtualCores();
+ resources = Resources.clone(res);
}
public long getMemorySize() {
- return memory;
+ if (resources == null) {
+ resources = Resource.newInstance(memory, vCores);
+ }
+ return resources.getMemorySize();
}
public int getvCores() {
- return vCores;
+ if (resources == null) {
+ resources = Resource.newInstance(memory, vCores);
+ }
+ return resources.getVirtualCores();
}
-
+
@Override
public String toString() {
- return "";
+ return resources.toString();
}
public void setMemory(int memory) {
+ if (resources == null) {
+ resources = Resource.newInstance(memory, vCores);
+ }
this.memory = memory;
+ resources.setMemorySize(memory);
}
public void setvCores(int vCores) {
+ if (resources == null) {
+ resources = Resource.newInstance(memory, vCores);
+ }
this.vCores = vCores;
+ resources.setVirtualCores(vCores);
}
public Resource getResource() {
- return Resource.newInstance(memory, vCores);
+ return Resource.newInstance(resources);
}
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
index cf93edd2c1e..81491b14ce1 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+import java.util.Arrays;
import java.util.EnumSet;
import javax.xml.bind.annotation.XmlRootElement;
@@ -73,7 +74,7 @@ public ResourceInfo getMaxAllocation() {
}
public String getSchedulerResourceTypes() {
- return this.schedulingResourceTypes.toString();
+ return Arrays.toString(minAllocResource.getResource().getResources());
}
public int getMaxClusterLevelAppPriority() {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto
index 247cd2195d9..39a56a811a3 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/proto/yarn_server_resourcemanager_recovery.proto
@@ -87,6 +87,8 @@ message ApplicationAttemptStateDataProto {
optional int64 finish_time = 12;
optional int64 preempted_memory_seconds = 13;
optional int64 preempted_vcore_seconds = 14;
+ repeated StringLongMapProto application_resource_usage_map = 15;
+ repeated StringLongMapProto preempted_resource_usage_map = 16;
}
message EpochProto {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index db26a875990..b24a309fc10 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -35,6 +35,7 @@
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.logging.Log;
@@ -56,6 +57,7 @@
import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -247,6 +249,8 @@ public void setUp() {
ResourceScheduler scheduler = mockResourceScheduler();
((RMContextImpl)rmContext).setScheduler(scheduler);
Configuration conf = new Configuration();
+ conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
+ ((RMContextImpl) rmContext).setYarnConfiguration(conf);
ApplicationMasterService masterService =
new ApplicationMasterService(rmContext, scheduler);
appMonitor = new TestRMAppManager(rmContext,
@@ -827,9 +831,12 @@ public void testEscapeApplicationSummary() {
when(app.getState()).thenReturn(RMAppState.RUNNING);
when(app.getApplicationType()).thenReturn("MAPREDUCE");
when(app.getSubmitTime()).thenReturn(1000L);
+ Map resourceSecondsMap = new HashMap<>();
+ resourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), 16384L);
+ resourceSecondsMap.put(ResourceInformation.VCORES.getName(), 64L);
RMAppMetrics metrics =
new RMAppMetrics(Resource.newInstance(1234, 56),
- 10, 1, 16384, 64, 0, 0);
+ 10, 1, resourceSecondsMap, new HashMap<>());
when(app.getRMAppMetrics()).thenReturn(metrics);
RMAppManager.ApplicationSummary.SummaryBuilder summary =
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
index e684f3c1ad3..421ddbc29b6 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationMasterService.java
@@ -102,9 +102,11 @@ public void init(ApplicationMasterServiceContext amsContext,
}
@Override
- public void registerApplicationMaster(ApplicationAttemptId
- applicationAttemptId, RegisterApplicationMasterRequest request,
- RegisterApplicationMasterResponse response) throws IOException {
+ public void registerApplicationMaster(
+ ApplicationAttemptId applicationAttemptId,
+ RegisterApplicationMasterRequest request,
+ RegisterApplicationMasterResponse response)
+ throws IOException, YarnException {
nextProcessor.registerApplicationMaster(
applicationAttemptId, request, response);
}
@@ -144,7 +146,8 @@ public void init(ApplicationMasterServiceContext amsContext,
public void registerApplicationMaster(
ApplicationAttemptId applicationAttemptId,
RegisterApplicationMasterRequest request,
- RegisterApplicationMasterResponse response) throws IOException {
+ RegisterApplicationMasterResponse response)
+ throws IOException, YarnException {
beforeRegCount.incrementAndGet();
nextProcessor.registerApplicationMaster(applicationAttemptId,
request, response);
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 48251c2003f..f8607b6812c 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -56,6 +56,8 @@
import org.apache.hadoop.yarn.MockApps;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
@@ -116,6 +118,7 @@
import org.apache.hadoop.yarn.api.records.ReservationRequest;
import org.apache.hadoop.yarn.api.records.ReservationRequests;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -2143,4 +2146,46 @@ public void handle(Event event) {
rmService.getApplications(request).getApplicationList().size());
rmService.setDisplayPerUserApps(false);
}
+
+ public void testGetResourceTypesInfoWhenResourceProfileDisabled()
+ throws Exception {
+ YarnConfiguration conf = new YarnConfiguration();
+ MockRM rm = new MockRM(conf) {
+ protected ClientRMService createClientRMService() {
+ return new ClientRMService(this.rmContext, scheduler,
+ this.rmAppManager, this.applicationACLsManager, this.queueACLsManager,
+ this.getRMContext().getRMDelegationTokenSecretManager());
+ }
+ };
+ rm.start();
+
+ YarnRPC rpc = YarnRPC.create(conf);
+ InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress();
+ LOG.info("Connecting to ResourceManager at " + rmAddress);
+ ApplicationClientProtocol client =
+ (ApplicationClientProtocol) rpc
+ .getProxy(ApplicationClientProtocol.class, rmAddress, conf);
+
+ // Make call
+ GetAllResourceTypeInfoRequest request =
+ GetAllResourceTypeInfoRequest.newInstance();
+ GetAllResourceTypeInfoResponse response = client.getResourceTypeInfo(request);
+
+ Assert.assertEquals(2, response.getResourceTypeInfo().size());
+
+ // Check memory
+ Assert.assertEquals(ResourceInformation.MEMORY_MB.getName(),
+ response.getResourceTypeInfo().get(0).getName());
+ Assert.assertEquals(ResourceInformation.MEMORY_MB.getUnits(),
+ response.getResourceTypeInfo().get(0).getDefaultUnit());
+
+ // Check vcores
+ Assert.assertEquals(ResourceInformation.VCORES.getName(),
+ response.getResourceTypeInfo().get(1).getName());
+ Assert.assertEquals(ResourceInformation.VCORES.getUnits(),
+ response.getResourceTypeInfo().get(1).getDefaultUnit());
+
+ rm.stop();
+ rpc.stopProxy(client, conf);
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
index 11fe0561769..3508ab4760c 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
@@ -23,6 +23,7 @@
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import org.apache.commons.lang.time.DateUtils;
import org.apache.hadoop.security.UserGroupInformation;
@@ -32,6 +33,7 @@
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
@@ -424,6 +426,9 @@ private AggregateAppResourceUsage calculateContainerResourceMetrics(
* usedMillis / DateUtils.MILLIS_PER_SECOND;
long vcoreSeconds = resource.getVirtualCores()
* usedMillis / DateUtils.MILLIS_PER_SECOND;
- return new AggregateAppResourceUsage(memorySeconds, vcoreSeconds);
+ Map map = new HashMap<>();
+ map.put(ResourceInformation.MEMORY_MB.getName(), memorySeconds);
+ map.put(ResourceInformation.VCORES.getName(), vcoreSeconds);
+ return new AggregateAppResourceUsage(map);
}
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index f826631a21d..399df02465e 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager;
import java.util.Collection;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -189,7 +190,8 @@ public YarnApplicationState createApplicationState() {
@Override
public RMAppMetrics getRMAppMetrics() {
- return new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, 0, 0, 0, 0);
+ return new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, new HashMap<>(),
+ new HashMap<>());
}
@Override
@@ -337,8 +339,9 @@ public int getMaxAppAttempts() {
public ApplicationReport createAndGetApplicationReport(
String clientUserName, boolean allowAccess) {
ApplicationResourceUsageReport usageReport =
- ApplicationResourceUsageReport.newInstance(0, 0, null, null, null,
- 0, 0, 0, 0, 0, 0);
+ ApplicationResourceUsageReport
+ .newInstance(0, 0, null, null, null, new HashMap<>(), 0, 0,
+ new HashMap<>());
ApplicationReport report = ApplicationReport.newInstance(
getApplicationId(), appAttemptId, getUser(), getQueue(),
getName(), null, 0, null, null, getDiagnostics().toString(),
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
index 7005bca6585..2287617b215 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
@@ -24,6 +24,7 @@
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
@@ -40,6 +41,7 @@
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
@@ -506,9 +508,16 @@ private static RMApp createRMApp(ApplicationId appId) {
when(app.getCurrentAppAttempt()).thenReturn(appAttempt);
when(app.getFinalApplicationStatus()).thenReturn(
FinalApplicationStatus.UNDEFINED);
- when(app.getRMAppMetrics()).thenReturn(
- new RMAppMetrics(null, 0, 0, Integer.MAX_VALUE, Long.MAX_VALUE,
- Integer.MAX_VALUE, Long.MAX_VALUE));
+ Map resourceMap = new HashMap<>();
+ resourceMap
+ .put(ResourceInformation.MEMORY_MB.getName(), (long) Integer.MAX_VALUE);
+ resourceMap.put(ResourceInformation.VCORES.getName(), Long.MAX_VALUE);
+ Map preemptedMap = new HashMap<>();
+ preemptedMap
+ .put(ResourceInformation.MEMORY_MB.getName(), (long) Integer.MAX_VALUE);
+ preemptedMap.put(ResourceInformation.VCORES.getName(), Long.MAX_VALUE);
+ when(app.getRMAppMetrics())
+ .thenReturn(new RMAppMetrics(null, 0, 0, resourceMap, preemptedMap));
Set appTags = new HashSet();
appTags.add("test");
appTags.add("tags");
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
index c6bfcc71b23..68bb325a620 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisherForV2.java
@@ -29,6 +29,8 @@
import java.io.FileReader;
import java.io.IOException;
import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
@@ -46,6 +48,7 @@
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
@@ -357,15 +360,20 @@ private static RMApp createRMApp(ApplicationId appId) {
when(app.getDiagnostics()).thenReturn(
new StringBuilder("test diagnostics info"));
RMAppAttempt appAttempt = mock(RMAppAttempt.class);
- when(appAttempt.getAppAttemptId()).thenReturn(
- ApplicationAttemptId.newInstance(appId, 1));
+ when(appAttempt.getAppAttemptId())
+ .thenReturn(ApplicationAttemptId.newInstance(appId, 1));
when(app.getCurrentAppAttempt()).thenReturn(appAttempt);
- when(app.getFinalApplicationStatus()).thenReturn(
- FinalApplicationStatus.UNDEFINED);
+ when(app.getFinalApplicationStatus())
+ .thenReturn(FinalApplicationStatus.UNDEFINED);
+ Map resourceSecondsMap = new HashMap<>();
+ resourceSecondsMap
+ .put(ResourceInformation.MEMORY_MB.getName(), (long) Integer.MAX_VALUE);
+ resourceSecondsMap
+ .put(ResourceInformation.VCORES.getName(), Long.MAX_VALUE);
when(app.getRMAppMetrics()).thenReturn(
- new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, Integer.MAX_VALUE,
- Long.MAX_VALUE, 0, 0));
- when(app.getApplicationTags()).thenReturn(Collections. emptySet());
+ new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, resourceSecondsMap,
+ new HashMap<>()));
+ when(app.getApplicationTags()).thenReturn(Collections.emptySet());
ApplicationSubmissionContext appSubmissionContext =
mock(ApplicationSubmissionContext.class);
when(appSubmissionContext.getPriority())
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
index 06a16ffeffe..453d805a843 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
@@ -194,7 +194,7 @@ protected RMAppAttempt storeAttempt(RMStateStore store,
when(mockAttempt.getRMAppAttemptMetrics())
.thenReturn(mockRmAppAttemptMetrics);
when(mockRmAppAttemptMetrics.getAggregateAppResourceUsage())
- .thenReturn(new AggregateAppResourceUsage(0, 0));
+ .thenReturn(new AggregateAppResourceUsage(new HashMap<>()));
dispatcher.attemptId = attemptId;
store.storeNewApplicationAttempt(mockAttempt);
waitNotify(dispatcher);
@@ -292,7 +292,7 @@ void testRMAppStateStore(RMStateStoreHelper stateStoreHelper,
when(mockRemovedAttempt.getRMAppAttemptMetrics())
.thenReturn(mockRmAppAttemptMetrics);
when(mockRmAppAttemptMetrics.getAggregateAppResourceUsage())
- .thenReturn(new AggregateAppResourceUsage(0,0));
+ .thenReturn(new AggregateAppResourceUsage(new HashMap<>()));
attempts.put(attemptIdRemoved, mockRemovedAttempt);
store.removeApplication(mockRemovedApp);
@@ -369,7 +369,7 @@ void testRMAppStateStore(RMStateStoreHelper stateStoreHelper,
oldAttemptState.getStartTime(), RMAppAttemptState.FINISHED,
"myTrackingUrl", "attemptDiagnostics",
FinalApplicationStatus.SUCCEEDED, 100,
- oldAttemptState.getFinishTime(), 0, 0, 0, 0);
+ oldAttemptState.getFinishTime(), new HashMap<>(), new HashMap<>());
store.updateApplicationAttemptState(newAttemptState);
// test updating the state of an app/attempt whose initial state was not
@@ -393,7 +393,7 @@ void testRMAppStateStore(RMStateStoreHelper stateStoreHelper,
oldAttemptState.getStartTime(), RMAppAttemptState.FINISHED,
"myTrackingUrl", "attemptDiagnostics",
FinalApplicationStatus.SUCCEEDED, 111,
- oldAttemptState.getFinishTime(), 0, 0, 0, 0);
+ oldAttemptState.getFinishTime(), new HashMap<>(), new HashMap<>());
store.updateApplicationAttemptState(dummyAttempt);
// let things settle down
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
index 5ae82391bdf..e5cf95d07b1 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
@@ -33,12 +33,7 @@
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.*;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
import org.apache.hadoop.yarn.conf.HAUtil;
@@ -511,7 +506,7 @@ public void testFencedState() throws Exception {
when(mockAttempt.getRMAppAttemptMetrics())
.thenReturn(mockRmAppAttemptMetrics);
when(mockRmAppAttemptMetrics.getAggregateAppResourceUsage())
- .thenReturn(new AggregateAppResourceUsage(0,0));
+ .thenReturn(new AggregateAppResourceUsage(new HashMap<>()));
store.storeNewApplicationAttempt(mockAttempt);
assertEquals("RMStateStore should have been in fenced state",
true, store.isFencedState());
@@ -523,7 +518,7 @@ public void testFencedState() throws Exception {
store.getCredentialsFromAppAttempt(mockAttempt),
startTime, RMAppAttemptState.FINISHED, "testUrl",
"test", FinalApplicationStatus.SUCCEEDED, 100,
- finishTime, 0, 0, 0, 0);
+ finishTime, new HashMap<>(), new HashMap<>());
store.updateApplicationAttemptState(newAttemptState);
assertEquals("RMStateStore should have been in fenced state",
true, store.isFencedState());
@@ -751,10 +746,20 @@ private static ApplicationStateData createAppState(
private static ApplicationAttemptStateData createFinishedAttempt(
ApplicationAttemptId attemptId, Container container, long startTime,
int amExitStatus) {
+ Map resourceSecondsMap = new HashMap<>();
+ Map preemptedResoureSecondsMap = new HashMap<>();
+ resourceSecondsMap
+ .put(ResourceInformation.MEMORY_MB.getName(), 0L);
+ resourceSecondsMap
+ .put(ResourceInformation.VCORES.getName(), 0L);
+ preemptedResoureSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
+ 0L);
+ preemptedResoureSecondsMap
+ .put(ResourceInformation.VCORES.getName(), 0L);
return ApplicationAttemptStateData.newInstance(attemptId,
container, null, startTime, RMAppAttemptState.FINISHED,
"myTrackingUrl", "attemptDiagnostics", FinalApplicationStatus.SUCCEEDED,
- amExitStatus, 0, 0, 0, 0, 0);
+ amExitStatus, 0, resourceSecondsMap, preemptedResoureSecondsMap);
}
private ApplicationAttemptId storeAttempt(RMStateStore store,
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairSchedulerPlanFollower.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairSchedulerPlanFollower.java
index 9561234d633..f0f2b35f3d0 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairSchedulerPlanFollower.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestFairSchedulerPlanFollower.java
@@ -38,7 +38,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.exceptions.PlanningException;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.planning.ReservationAgent;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -137,7 +136,7 @@ protected void checkDefaultQueueBeforePlanFollowerRun() {
}
@Override
protected void verifyCapacity(Queue defQ) {
- assertTrue(((FSQueue) defQ).getWeights().getWeight(ResourceType.MEMORY) > 0.9);
+ assertTrue(((FSQueue) defQ).getWeight() > 0.9);
}
@Override
@@ -173,8 +172,7 @@ protected void assertReservationQueueExists(ReservationId r,
false);
assertNotNull(q);
// For now we are setting both to same weight
- Assert.assertEquals(expectedCapacity,
- q.getWeights().getWeight(ResourceType.MEMORY), 0.01);
+ Assert.assertEquals(expectedCapacity, q.getWeight(), 0.01);
}
@Override
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResourceWeights.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResourceWeights.java
deleted file mode 100644
index f420b9ecd22..00000000000
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResourceWeights.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.resourcemanager.resource;
-
-import org.junit.Assert;
-
-import org.junit.Test;
-
-public class TestResourceWeights {
-
- @Test(timeout=3000)
- public void testWeights() {
- ResourceWeights rw1 = new ResourceWeights();
- Assert.assertEquals("Default CPU weight should be 0.0f.", 0.0f,
- rw1.getWeight(ResourceType.CPU), 0.00001f);
- Assert.assertEquals("Default memory weight should be 0.0f", 0.0f,
- rw1.getWeight(ResourceType.MEMORY), 0.00001f);
-
- ResourceWeights rw2 = new ResourceWeights(2.0f);
- Assert.assertEquals("The CPU weight should be 2.0f.", 2.0f,
- rw2.getWeight(ResourceType.CPU), 0.00001f);
- Assert.assertEquals("The memory weight should be 2.0f", 2.0f,
- rw2.getWeight(ResourceType.MEMORY), 0.00001f);
-
- // set each individually
- ResourceWeights rw3 = new ResourceWeights(1.5f, 2.0f);
- Assert.assertEquals("The CPU weight should be 2.0f", 2.0f,
- rw3.getWeight(ResourceType.CPU), 0.00001f);
- Assert.assertEquals("The memory weight should be 1.5f", 1.5f,
- rw3.getWeight(ResourceType.MEMORY), 0.00001f);
-
- // reset weights
- rw3.setWeight(ResourceType.CPU, 2.5f);
- Assert.assertEquals("The CPU weight should be set to 2.5f.", 2.5f,
- rw3.getWeight(ResourceType.CPU), 0.00001f);
- rw3.setWeight(ResourceType.MEMORY, 4.0f);
- Assert.assertEquals("The memory weight should be set to 4.0f.", 4.0f,
- rw3.getWeight(ResourceType.MEMORY), 0.00001f);
- }
-}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
deleted file mode 100644
index 2a10747ac9d..00000000000
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.resourcemanager.resource;
-
-import static org.apache.hadoop.yarn.util.resource.Resources.*;
-import static org.junit.Assert.*;
-import org.junit.Test;
-
-public class TestResources {
- @Test(timeout=10000)
- public void testFitsIn() {
- assertTrue(fitsIn(createResource(1, 1), createResource(2, 2)));
- assertTrue(fitsIn(createResource(2, 2), createResource(2, 2)));
- assertFalse(fitsIn(createResource(2, 2), createResource(1, 1)));
- assertFalse(fitsIn(createResource(1, 2), createResource(2, 1)));
- assertFalse(fitsIn(createResource(2, 1), createResource(1, 2)));
- }
-
- @Test(timeout=10000)
- public void testComponentwiseMin() {
- assertEquals(createResource(1, 1),
- componentwiseMin(createResource(1, 1), createResource(2, 2)));
- assertEquals(createResource(1, 1),
- componentwiseMin(createResource(2, 2), createResource(1, 1)));
- assertEquals(createResource(1, 1),
- componentwiseMin(createResource(1, 2), createResource(2, 1)));
- }
-}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index eb633c75bd3..cd9b74ed86f 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -4319,143 +4319,6 @@ public void testCSReservationWithRootUnblocked() throws Exception {
rm.stop();
}
- @Test (timeout = 300000)
- public void testUserLimitThroughput() throws Exception {
- // Since this is more of a performance unit test, only run if
- // RunUserLimitThroughput is set (-DRunUserLimitThroughput=true)
- Assume.assumeTrue(Boolean.valueOf(
- System.getProperty("RunUserLimitThroughput")));
-
- CapacitySchedulerConfiguration csconf =
- new CapacitySchedulerConfiguration();
- csconf.setMaximumApplicationMasterResourcePerQueuePercent("root", 100.0f);
- csconf.setMaximumAMResourcePercentPerPartition("root", "", 100.0f);
- csconf.setMaximumApplicationMasterResourcePerQueuePercent("root.default",
- 100.0f);
- csconf.setMaximumAMResourcePercentPerPartition("root.default", "", 100.0f);
- csconf.setResourceComparator(DominantResourceCalculator.class);
-
- YarnConfiguration conf = new YarnConfiguration(csconf);
- conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
- ResourceScheduler.class);
-
- MockRM rm = new MockRM(conf);
- rm.start();
-
- CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
- LeafQueue qb = (LeafQueue)cs.getQueue("default");
-
- // For now make user limit large so we can activate all applications
- qb.setUserLimitFactor((float)100.0);
- qb.setupConfigurableCapacities();
-
- SchedulerEvent addAppEvent;
- SchedulerEvent addAttemptEvent;
- Container container = mock(Container.class);
- ApplicationSubmissionContext submissionContext =
- mock(ApplicationSubmissionContext.class);
-
- final int appCount = 100;
- ApplicationId[] appids = new ApplicationId[appCount];
- RMAppAttemptImpl[] attempts = new RMAppAttemptImpl[appCount];
- ApplicationAttemptId[] appAttemptIds = new ApplicationAttemptId[appCount];
- RMAppImpl[] apps = new RMAppImpl[appCount];
- RMAppAttemptMetrics[] attemptMetrics = new RMAppAttemptMetrics[appCount];
- for (int i=0; i loggers=LogManager.getCurrentLoggers();
- loggers.hasMoreElements(); ) {
- Logger logger = (Logger) loggers.nextElement();
- logger.setLevel(Level.WARN);
- }
- final int topn = 20;
- final int iterations = 2000000;
- final int printInterval = 20000;
- final float numerator = 1000.0f * printInterval;
- PriorityQueue queue = new PriorityQueue<>(topn,
- Collections.reverseOrder());
-
- long n = Time.monotonicNow();
- long timespent = 0;
- for (int i = 0; i < iterations; i+=2) {
- if (i > 0 && i % printInterval == 0){
- long ts = (Time.monotonicNow() - n);
- if (queue.size() < topn) {
- queue.offer(ts);
- } else {
- Long last = queue.peek();
- if (last > ts) {
- queue.poll();
- queue.offer(ts);
- }
- }
- System.out.println(i + " " + (numerator / ts));
- n= Time.monotonicNow();
- }
- cs.handle(new NodeUpdateSchedulerEvent(node));
- cs.handle(new NodeUpdateSchedulerEvent(node2));
- }
- timespent=0;
- int entries = queue.size();
- while(queue.size() > 0){
- long l = queue.poll();
- timespent += l;
- }
- System.out.println("Avg of fastest " + entries + ": "
- + numerator / (timespent / entries));
- rm.stop();
- }
-
@Test
public void testCSQueueBlocked() throws Exception {
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerPerf.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerPerf.java
new file mode 100644
index 00000000000..085ec770705
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerPerf.java
@@ -0,0 +1,262 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.junit.Assume;
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.PriorityQueue;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class TestCapacitySchedulerPerf {
+ private final int GB = 1024;
+
+ private String getResourceName(int idx) {
+ return "resource-" + idx;
+ }
+
+ private void testUserLimitThroughputWithNumberOfResourceTypes(
+ int numOfResourceTypes)
+ throws Exception {
+ if (numOfResourceTypes > 2) {
+ // Initialize resource map
+ Map riMap = new HashMap<>();
+
+ // Initialize mandatory resources
+ riMap.put(ResourceInformation.MEMORY_URI, ResourceInformation.MEMORY_MB);
+ riMap.put(ResourceInformation.VCORES_URI, ResourceInformation.VCORES);
+
+ for (int i = 2; i < numOfResourceTypes; i++) {
+ String resourceName = getResourceName(i);
+ riMap.put(resourceName, ResourceInformation
+ .newInstance(resourceName, "", 0, ResourceTypes.COUNTABLE));
+ }
+
+ ResourceUtils.initializeResourcesFromResourceInformationMap(riMap);
+ }
+
+ // Since this is more of a performance unit test, only run if
+ // RunUserLimitThroughput is set (-DRunUserLimitThroughput=true)
+ Assume.assumeTrue(Boolean.valueOf(
+ System.getProperty("RunCapacitySchedulerPerfTests")));
+
+ CapacitySchedulerConfiguration csconf =
+ new CapacitySchedulerConfiguration();
+ csconf.setMaximumApplicationMasterResourcePerQueuePercent("root", 100.0f);
+ csconf.setMaximumAMResourcePercentPerPartition("root", "", 100.0f);
+ csconf.setMaximumApplicationMasterResourcePerQueuePercent("root.default",
+ 100.0f);
+ csconf.setMaximumAMResourcePercentPerPartition("root.default", "", 100.0f);
+ csconf.setResourceComparator(DominantResourceCalculator.class);
+
+ YarnConfiguration conf = new YarnConfiguration(csconf);
+ // Don't reset resource types since we have already configured resource types
+ conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+ ResourceScheduler.class);
+
+ MockRM rm = new MockRM(conf);
+ rm.start();
+
+ CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+ LeafQueue qb = (LeafQueue)cs.getQueue("default");
+
+ // For now make user limit large so we can activate all applications
+ qb.setUserLimitFactor((float)100.0);
+ qb.setupConfigurableCapacities();
+
+ SchedulerEvent addAppEvent;
+ SchedulerEvent addAttemptEvent;
+ Container container = mock(Container.class);
+ ApplicationSubmissionContext submissionContext =
+ mock(ApplicationSubmissionContext.class);
+
+ final int appCount = 100;
+ ApplicationId[] appids = new ApplicationId[appCount];
+ RMAppAttemptImpl[] attempts = new RMAppAttemptImpl[appCount];
+ ApplicationAttemptId[] appAttemptIds = new ApplicationAttemptId[appCount];
+ RMAppImpl[] apps = new RMAppImpl[appCount];
+ RMAppAttemptMetrics[] attemptMetrics = new RMAppAttemptMetrics[appCount];
+ for (int i=0; i 2) {
+ for (int i = 2; i < numOfResourceTypes; i++) {
+ nodeResource.setResourceValue(getResourceName(i), 10);
+ }
+ }
+
+ RMNode node = MockNodes.newNodeInfo(0, nodeResource, 1, "127.0.0.1");
+ cs.handle(new NodeAddedSchedulerEvent(node));
+
+ RMNode node2 = MockNodes.newNodeInfo(0, nodeResource, 1, "127.0.0.2");
+ cs.handle(new NodeAddedSchedulerEvent(node2));
+
+ Priority u0Priority = TestUtils.createMockPriority(1);
+ RecordFactory recordFactory =
+ RecordFactoryProvider.getRecordFactory(null);
+
+ FiCaSchedulerApp[] fiCaApps = new FiCaSchedulerApp[appCount];
+ for (int i=0;i 2) {
+ for (int j = 2; j < numOfResourceTypes; j++) {
+ resourceRequest.getCapability().setResourceValue(getResourceName(j),
+ 10);
+ }
+ }
+
+ // allocate container for app2 with 1GB memory and 1 vcore
+ fiCaApps[i].updateResourceRequests(
+ Collections.singletonList(resourceRequest));
+ }
+ // Now force everything to be over user limit
+ qb.setUserLimitFactor((float)0.0);
+
+ // Quiet the loggers while measuring throughput
+ for (Enumeration> loggers = LogManager.getCurrentLoggers();
+ loggers.hasMoreElements(); ) {
+ Logger logger = (Logger) loggers.nextElement();
+ logger.setLevel(Level.WARN);
+ }
+ final int topn = 20;
+ final int iterations = 2000000;
+ final int printInterval = 20000;
+ final float numerator = 1000.0f * printInterval;
+ PriorityQueue queue = new PriorityQueue<>(topn,
+ Collections.reverseOrder());
+
+ long n = Time.monotonicNow();
+ long timespent = 0;
+ for (int i = 0; i < iterations; i+=2) {
+ if (i > 0 && i % printInterval == 0){
+ long ts = (Time.monotonicNow() - n);
+ if (queue.size() < topn) {
+ queue.offer(ts);
+ } else {
+ Long last = queue.peek();
+ if (last > ts) {
+ queue.poll();
+ queue.offer(ts);
+ }
+ }
+ System.out.println(i + " " + (numerator / ts));
+ n= Time.monotonicNow();
+ }
+ cs.handle(new NodeUpdateSchedulerEvent(node));
+ cs.handle(new NodeUpdateSchedulerEvent(node2));
+ }
+ timespent=0;
+ int entries = queue.size();
+ while(queue.size() > 0){
+ long l = queue.poll();
+ timespent += l;
+ }
+ System.out.println(
+ "#ResourceTypes = " + numOfResourceTypes + ". Avg of fastest " + entries
+ + ": " + numerator / (timespent / entries));
+ rm.stop();
+ }
+
+ @Test(timeout = 300000)
+ public void testUserLimitThroughputForTwoResources() throws Exception {
+ testUserLimitThroughputWithNumberOfResourceTypes(2);
+ }
+
+ @Test(timeout = 300000)
+ public void testUserLimitThroughputForThreeResources() throws Exception {
+ testUserLimitThroughputWithNumberOfResourceTypes(3);
+ }
+
+ @Test(timeout = 300000)
+ public void testUserLimitThroughputForFourResources() throws Exception {
+ testUserLimitThroughputWithNumberOfResourceTypes(4);
+ }
+
+ @Test(timeout = 300000)
+ public void testUserLimitThroughputForFiveResources() throws Exception {
+ testUserLimitThroughputWithNumberOfResourceTypes(5);
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
index 5e6548bc80e..32f022f7156 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
@@ -896,8 +896,7 @@ public void testGetAppToUnreserve() throws Exception {
String host_1 = "host_1";
FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0,
8 * GB);
-
- Resource clusterResource = Resources.createResource(2 * 8 * GB);
+
// Setup resource-requests
Priority p = TestUtils.createMockPriority(5);
@@ -933,28 +932,27 @@ public void testGetAppToUnreserve() throws Exception {
node_0.getNodeID(), "user", rmContext);
// no reserved containers
- NodeId unreserveId =
- app_0.getNodeIdToUnreserve(priorityMap, capability,
- cs.getResourceCalculator(), clusterResource);
+ NodeId unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability,
+ cs.getResourceCalculator());
assertEquals(null, unreserveId);
// no reserved containers - reserve then unreserve
app_0.reserve(node_0, priorityMap, rmContainer_1, container_1);
app_0.unreserve(priorityMap, node_0, rmContainer_1);
unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability,
- cs.getResourceCalculator(), clusterResource);
+ cs.getResourceCalculator());
assertEquals(null, unreserveId);
// no container large enough is reserved
app_0.reserve(node_0, priorityMap, rmContainer_1, container_1);
unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability,
- cs.getResourceCalculator(), clusterResource);
+ cs.getResourceCalculator());
assertEquals(null, unreserveId);
// reserve one that is now large enough
app_0.reserve(node_1, priorityMap, rmContainer, container);
unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability,
- cs.getResourceCalculator(), clusterResource);
+ cs.getResourceCalculator());
assertEquals(node_1.getNodeID(), unreserveId);
}
@@ -1001,16 +999,14 @@ public void testFindNodeToUnreserve() throws Exception {
node_1.getNodeID(), "user", rmContext);
// nothing reserved
- RMContainer toUnreserveContainer =
- app_0.findNodeToUnreserve(csContext.getClusterResource(), node_1,
+ RMContainer toUnreserveContainer = app_0.findNodeToUnreserve(node_1,
priorityMap, capability);
assertTrue(toUnreserveContainer == null);
// reserved but scheduler doesn't know about that node.
app_0.reserve(node_1, priorityMap, rmContainer, container);
node_1.reserveResource(app_0, priorityMap, rmContainer);
- toUnreserveContainer =
- app_0.findNodeToUnreserve(csContext.getClusterResource(), node_1,
+ toUnreserveContainer = app_0.findNodeToUnreserve(node_1,
priorityMap, capability);
assertTrue(toUnreserveContainer == null);
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
index 36ff85e5a46..03332b25e2c 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java
@@ -20,8 +20,6 @@
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
-import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.util.resource.Resources;
@@ -33,7 +31,7 @@
private Resource minShare;
private Resource maxShare;
private Resource fairShare;
- private ResourceWeights weights;
+ private float weights;
private Priority priority;
private long startTime;
@@ -49,28 +47,31 @@ public FakeSchedulable(int minShare, int maxShare) {
this(minShare, maxShare, 1, 0, 0, 0);
}
- public FakeSchedulable(int minShare, double memoryWeight) {
+ public FakeSchedulable(int minShare, float memoryWeight) {
this(minShare, Integer.MAX_VALUE, memoryWeight, 0, 0, 0);
}
- public FakeSchedulable(int minShare, int maxShare, double memoryWeight) {
+ public FakeSchedulable(int minShare, int maxShare, float memoryWeight) {
this(minShare, maxShare, memoryWeight, 0, 0, 0);
}
- public FakeSchedulable(int minShare, int maxShare, double weight, int fairShare, int usage,
- long startTime) {
- this(Resources.createResource(minShare, 0), Resources.createResource(maxShare, 0),
- new ResourceWeights((float)weight), Resources.createResource(fairShare, 0),
+ public FakeSchedulable(int minShare, int maxShare, float weight,
+ int fairShare, int usage, long startTime) {
+ this(Resources.createResource(minShare, 0),
+ Resources.createResource(maxShare, 0),
+ weight, Resources.createResource(fairShare, 0),
Resources.createResource(usage, 0), startTime);
}
- public FakeSchedulable(Resource minShare, ResourceWeights weights) {
- this(minShare, Resources.createResource(Integer.MAX_VALUE, Integer.MAX_VALUE),
- weights, Resources.createResource(0, 0), Resources.createResource(0, 0), 0);
+ public FakeSchedulable(Resource minShare, float weights) {
+ this(minShare,
+ Resources.createResource(Integer.MAX_VALUE, Integer.MAX_VALUE),
+ weights, Resources.createResource(0, 0),
+ Resources.createResource(0, 0), 0);
}
public FakeSchedulable(Resource minShare, Resource maxShare,
- ResourceWeights weight, Resource fairShare, Resource usage, long startTime) {
+ float weight, Resource fairShare, Resource usage, long startTime) {
this.minShare = minShare;
this.maxShare = maxShare;
this.weights = weight;
@@ -121,7 +122,7 @@ public long getStartTime() {
}
@Override
- public ResourceWeights getWeights() {
+ public float getWeight() {
return weights;
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java
index 4f3ccb2acd4..c3bcb3b2179 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java
@@ -20,12 +20,11 @@
import java.util.ArrayList;
import java.util.List;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.junit.Assert;
import org.apache.hadoop.yarn.util.resource.Resources;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.ComputeFairShares;
import org.junit.Before;
import org.junit.Test;
@@ -52,7 +51,7 @@ public void testEqualSharing() {
scheds.add(new FakeSchedulable());
scheds.add(new FakeSchedulable());
ComputeFairShares.computeShares(scheds,
- Resources.createResource(40), ResourceType.MEMORY);
+ Resources.createResource(40), ResourceInformation.MEMORY_MB.getName());
verifyMemoryShares(10, 10, 10, 10);
}
@@ -70,7 +69,7 @@ public void testLowMaxShares() {
scheds.add(new FakeSchedulable(0, 11));
scheds.add(new FakeSchedulable(0, 3));
ComputeFairShares.computeShares(scheds,
- Resources.createResource(40), ResourceType.MEMORY);
+ Resources.createResource(40), ResourceInformation.MEMORY_MB.getName());
verifyMemoryShares(13, 13, 11, 3);
}
@@ -90,7 +89,7 @@ public void testMinShares() {
scheds.add(new FakeSchedulable(0));
scheds.add(new FakeSchedulable(2));
ComputeFairShares.computeShares(scheds,
- Resources.createResource(40), ResourceType.MEMORY);
+ Resources.createResource(40), ResourceInformation.MEMORY_MB.getName());
verifyMemoryShares(20, 18, 0, 2);
}
@@ -100,12 +99,12 @@ public void testMinShares() {
*/
@Test
public void testWeightedSharing() {
- scheds.add(new FakeSchedulable(0, 2.0));
- scheds.add(new FakeSchedulable(0, 1.0));
- scheds.add(new FakeSchedulable(0, 1.0));
- scheds.add(new FakeSchedulable(0, 0.5));
+ scheds.add(new FakeSchedulable(0, 2.0f));
+ scheds.add(new FakeSchedulable(0, 1.0f));
+ scheds.add(new FakeSchedulable(0, 1.0f));
+ scheds.add(new FakeSchedulable(0, 0.5f));
ComputeFairShares.computeShares(scheds,
- Resources.createResource(45), ResourceType.MEMORY);
+ Resources.createResource(45), ResourceInformation.MEMORY_MB.getName());
verifyMemoryShares(20, 10, 10, 5);
}
@@ -118,12 +117,12 @@ public void testWeightedSharing() {
*/
@Test
public void testWeightedSharingWithMaxShares() {
- scheds.add(new FakeSchedulable(0, 10, 2.0));
- scheds.add(new FakeSchedulable(0, 11, 1.0));
- scheds.add(new FakeSchedulable(0, 30, 1.0));
- scheds.add(new FakeSchedulable(0, 20, 0.5));
+ scheds.add(new FakeSchedulable(0, 10, 2.0f));
+ scheds.add(new FakeSchedulable(0, 11, 1.0f));
+ scheds.add(new FakeSchedulable(0, 30, 1.0f));
+ scheds.add(new FakeSchedulable(0, 20, 0.5f));
ComputeFairShares.computeShares(scheds,
- Resources.createResource(45), ResourceType.MEMORY);
+ Resources.createResource(45), ResourceInformation.MEMORY_MB.getName());
verifyMemoryShares(10, 11, 16, 8);
}
@@ -137,12 +136,12 @@ public void testWeightedSharingWithMaxShares() {
*/
@Test
public void testWeightedSharingWithMinShares() {
- scheds.add(new FakeSchedulable(20, 2.0));
- scheds.add(new FakeSchedulable(0, 1.0));
- scheds.add(new FakeSchedulable(5, 1.0));
- scheds.add(new FakeSchedulable(15, 0.5));
+ scheds.add(new FakeSchedulable(20, 2.0f));
+ scheds.add(new FakeSchedulable(0, 1.0f));
+ scheds.add(new FakeSchedulable(5, 1.0f));
+ scheds.add(new FakeSchedulable(15, 0.5f));
ComputeFairShares.computeShares(scheds,
- Resources.createResource(45), ResourceType.MEMORY);
+ Resources.createResource(45), ResourceInformation.MEMORY_MB.getName());
verifyMemoryShares(20, 5, 5, 15);
}
@@ -158,7 +157,8 @@ public void testLargeShares() {
scheds.add(new FakeSchedulable());
scheds.add(new FakeSchedulable());
ComputeFairShares.computeShares(scheds,
- Resources.createResource(40 * million), ResourceType.MEMORY);
+ Resources.createResource(40 * million),
+ ResourceInformation.MEMORY_MB.getName());
verifyMemoryShares(10 * million, 10 * million, 10 * million, 10 * million);
}
@@ -168,7 +168,7 @@ public void testLargeShares() {
@Test
public void testEmptyList() {
ComputeFairShares.computeShares(scheds,
- Resources.createResource(40), ResourceType.MEMORY);
+ Resources.createResource(40), ResourceInformation.MEMORY_MB.getName());
verifyMemoryShares();
}
@@ -177,16 +177,12 @@ public void testEmptyList() {
*/
@Test
public void testCPU() {
- scheds.add(new FakeSchedulable(Resources.createResource(0, 20),
- new ResourceWeights(2.0f)));
- scheds.add(new FakeSchedulable(Resources.createResource(0, 0),
- new ResourceWeights(1.0f)));
- scheds.add(new FakeSchedulable(Resources.createResource(0, 5),
- new ResourceWeights(1.0f)));
- scheds.add(new FakeSchedulable(Resources.createResource(0, 15),
- new ResourceWeights(0.5f)));
+ scheds.add(new FakeSchedulable(Resources.createResource(0, 20), 2.0f));
+ scheds.add(new FakeSchedulable(Resources.createResource(0, 0), 1.0f));
+ scheds.add(new FakeSchedulable(Resources.createResource(0, 5), 1.0f));
+ scheds.add(new FakeSchedulable(Resources.createResource(0, 15), 0.5f));
ComputeFairShares.computeShares(scheds,
- Resources.createResource(0, 45), ResourceType.CPU);
+ Resources.createResource(0, 45), ResourceInformation.VCORES.getName());
verifyCPUShares(20, 5, 5, 15);
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 118e5c4e70f..746bcbf7e4a 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -79,7 +79,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
@@ -1980,7 +1979,7 @@ public void testFairShareAndWeightsInNestedUserQueueRule() throws Exception {
// assert that the steady fair share is 1/4th node1's capacity
assertEquals(capacity / 4, leaf.getSteadyFairShare().getMemorySize());
// assert weights are equal for both the user queues
- assertEquals(1.0, leaf.getWeights().getWeight(ResourceType.MEMORY), 0);
+ assertEquals(1.0, leaf.getWeight(), 0);
}
}
}
@@ -5274,7 +5273,7 @@ public void testDumpState() throws IOException {
child1.updateDemand();
String childQueueString = "{Name: root.parent.child1,"
- + " Weight: ,"
+ + " Weight: 1.0,"
+ " Policy: fair,"
+ " FairShare: ,"
+ " SteadyFairShare: ,"
@@ -5291,14 +5290,15 @@ public void testDumpState() throws IOException {
+ " LastTimeAtMinShare: " + clock.getTime()
+ "}";
- assertTrue(child1.dumpState().equals(childQueueString));
+ assertEquals("Unexpected state dump string",
+ childQueueString, child1.dumpState());
FSParentQueue parent =
scheduler.getQueueManager().getParentQueue("parent", false);
parent.setMaxShare(new ConfigurableResource(resource));
parent.updateDemand();
String parentQueueString = "{Name: root.parent,"
- + " Weight: ,"
+ + " Weight: 1.0,"
+ " Policy: fair,"
+ " FairShare: ,"
+ " SteadyFairShare: ,"
@@ -5309,7 +5309,7 @@ public void testDumpState() throws IOException {
+ " MaxAMShare: 0.5,"
+ " Runnable: 0}";
- assertTrue(parent.dumpState().equals(
- parentQueueString + ", " + childQueueString));
+ assertEquals("Unexpected state dump string",
+ parentQueueString + ", " + childQueueString, parent.dumpState());
}
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
index 3a16454c10a..b016c1b4fb8 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
@@ -30,7 +30,6 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
@@ -134,11 +133,7 @@ public void testFairShareComparatorTransitivity() {
Resource.newInstance(0, 1), Resource.newInstance(2, 1),
Resource.newInstance(4, 1) };
- private ResourceWeights[] weightsCollection = {
- new ResourceWeights(0.0f), new ResourceWeights(1.0f),
- new ResourceWeights(2.0f) };
-
-
+ private float[] weightsCollection = {0.0f, 1.0f, 2.0f};
public FairShareComparatorTester(
Comparator fairShareComparator) {
@@ -225,10 +220,10 @@ private void swap(Schedulable[] array, int x, int y) {
private String name;
private long startTime;
private Resource usage;
- private ResourceWeights weights;
+ private float weights;
public MockSchedulable(Resource minShare, Resource demand, String name,
- long startTime, Resource usage, ResourceWeights weights) {
+ long startTime, Resource usage, float weights) {
this.minShare = minShare;
this.demand = demand;
this.name = name;
@@ -258,7 +253,7 @@ public Resource getMinShare() {
}
@Override
- public ResourceWeights getWeights() {
+ public float getWeight() {
return weights;
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
index 3719e2aee08..097558feb18 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
@@ -23,15 +23,22 @@
import static org.mockito.Mockito.when;
import java.util.Comparator;
+import java.util.Map;
+import org.apache.curator.shaded.com.google.common.base.Joiner;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
-import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSContext;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FakeSchedulable;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy.DominantResourceFairnessComparator;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
import org.apache.hadoop.yarn.util.resource.Resources;
+import org.junit.Assert;
+import org.junit.BeforeClass;
import org.junit.Test;
/**
@@ -39,10 +46,15 @@
* container before sched2
*/
public class TestDominantResourceFairnessPolicy {
+ @BeforeClass
+ public static void setup() {
+ addResources("test");
+ }
private Comparator createComparator(int clusterMem,
int clusterCpu) {
- DominantResourceFairnessPolicy policy = new DominantResourceFairnessPolicy();
+ DominantResourceFairnessPolicy policy =
+ new DominantResourceFairnessPolicy();
FSContext fsContext = mock(FSContext.class);
when(fsContext.getClusterResource()).
thenReturn(Resources.createResource(clusterMem, clusterCpu));
@@ -51,23 +63,23 @@
}
private Schedulable createSchedulable(int memUsage, int cpuUsage) {
- return createSchedulable(memUsage, cpuUsage, ResourceWeights.NEUTRAL, 0, 0);
+ return createSchedulable(memUsage, cpuUsage, 1.0f, 0, 0);
}
private Schedulable createSchedulable(int memUsage, int cpuUsage,
int minMemShare, int minCpuShare) {
- return createSchedulable(memUsage, cpuUsage, ResourceWeights.NEUTRAL,
+ return createSchedulable(memUsage, cpuUsage, 1.0f,
minMemShare, minCpuShare);
}
private Schedulable createSchedulable(int memUsage, int cpuUsage,
- ResourceWeights weights) {
+ float weights) {
return createSchedulable(memUsage, cpuUsage, weights, 0, 0);
}
private Schedulable createSchedulable(int memUsage, int cpuUsage,
- ResourceWeights weights, int minMemShare, int minCpuShare) {
+ float weights, int minMemShare, int minCpuShare) {
Resource usage = BuilderUtils.newResource(memUsage, cpuUsage);
Resource minShare = BuilderUtils.newResource(minMemShare, minCpuShare);
return new FakeSchedulable(minShare,
@@ -77,94 +89,260 @@ private Schedulable createSchedulable(int memUsage, int cpuUsage,
@Test
public void testSameDominantResource() {
- assertTrue(createComparator(8000, 4).compare(
- createSchedulable(1000, 1),
- createSchedulable(2000, 1)) < 0);
+ Comparator c = createComparator(8000, 4);
+ Schedulable s1 = createSchedulable(1000, 1);
+ Schedulable s2 = createSchedulable(2000, 1);
+
+ assertTrue("Comparison didn't return a value less than 0",
+ c.compare(s1, s2) < 0);
}
@Test
public void testDifferentDominantResource() {
- assertTrue(createComparator(8000, 8).compare(
- createSchedulable(4000, 3),
- createSchedulable(2000, 5)) < 0);
+ Comparator c = createComparator(8000, 8);
+ Schedulable s1 = createSchedulable(4000, 3);
+ Schedulable s2 = createSchedulable(2000, 5);
+
+ assertTrue("Comparison didn't return a value less than 0",
+ c.compare(s1, s2) < 0);
}
@Test
public void testOneIsNeedy() {
- assertTrue(createComparator(8000, 8).compare(
- createSchedulable(2000, 5, 0, 6),
- createSchedulable(4000, 3, 0, 0)) < 0);
+ Comparator c = createComparator(8000, 8);
+ Schedulable s1 = createSchedulable(2000, 5, 0, 6);
+ Schedulable s2 = createSchedulable(4000, 3, 0, 0);
+
+ assertTrue("Comparison didn't return a value less than 0",
+ c.compare(s1, s2) < 0);
}
@Test
public void testBothAreNeedy() {
- assertTrue(createComparator(8000, 100).compare(
- // dominant share is 2000/8000
- createSchedulable(2000, 5),
- // dominant share is 4000/8000
- createSchedulable(4000, 3)) < 0);
- assertTrue(createComparator(8000, 100).compare(
- // dominant min share is 2/3
- createSchedulable(2000, 5, 3000, 6),
- // dominant min share is 4/5
- createSchedulable(4000, 3, 5000, 4)) < 0);
+ Comparator c = createComparator(8000, 100);
+ // dominant share is 2000/8000
+ Schedulable s1 = createSchedulable(2000, 5);
+ // dominant share is 4000/8000
+ Schedulable s2 = createSchedulable(4000, 3);
+
+ assertTrue("Comparison didn't return a value less than 0",
+ c.compare(s1, s2) < 0);
+
+ // dominant min share is 2/3
+ s1 = createSchedulable(2000, 5, 3000, 6);
+ // dominant min share is 4/5
+ s2 = createSchedulable(4000, 3, 5000, 4);
+
+ assertTrue("Comparison didn't return a value less than 0",
+ c.compare(s1, s2) < 0);
}
@Test
public void testEvenWeightsSameDominantResource() {
assertTrue(createComparator(8000, 8).compare(
- createSchedulable(3000, 1, new ResourceWeights(2.0f)),
+ createSchedulable(3000, 1, 2.0f),
createSchedulable(2000, 1)) < 0);
assertTrue(createComparator(8000, 8).compare(
- createSchedulable(1000, 3, new ResourceWeights(2.0f)),
+ createSchedulable(1000, 3, 2.0f),
createSchedulable(1000, 2)) < 0);
}
@Test
public void testEvenWeightsDifferentDominantResource() {
assertTrue(createComparator(8000, 8).compare(
- createSchedulable(1000, 3, new ResourceWeights(2.0f)),
+ createSchedulable(1000, 3, 2.0f),
createSchedulable(2000, 1)) < 0);
assertTrue(createComparator(8000, 8).compare(
- createSchedulable(3000, 1, new ResourceWeights(2.0f)),
+ createSchedulable(3000, 1, 2.0f),
createSchedulable(1000, 2)) < 0);
}
@Test
- public void testUnevenWeightsSameDominantResource() {
- assertTrue(createComparator(8000, 8).compare(
- createSchedulable(3000, 1, new ResourceWeights(2.0f, 1.0f)),
- createSchedulable(2000, 1)) < 0);
- assertTrue(createComparator(8000, 8).compare(
- createSchedulable(1000, 3, new ResourceWeights(1.0f, 2.0f)),
- createSchedulable(1000, 2)) < 0);
+ public void testSortShares() {
+ float[][] ratios1 = {{0.3f, 2.0f}, {0.2f, 1.0f}, {0.4f, 0.1f}};
+ float[][] ratios2 = {{0.2f, 9.0f}, {0.3f, 2.0f}, {0.25f, 0.1f}};
+ float[][] expected1 = {{0.4f, 0.1f}, {0.3f, 2.0f}, {0.2f, 1.0f}};
+ float[][] expected2 = {{0.3f, 2.0f}, {0.25f, 0.1f}, {0.2f, 9.0f}};
+ DominantResourceFairnessComparator comparator =
+ new DominantResourceFairnessComparator();
+
+ comparator.sortRatios(ratios1, ratios2);
+
+ for (int i = 0; i < ratios1.length; i++) {
+ Assert.assertArrayEquals("The shares array was not sorted into the "
+ + "expected order: incorrect inner array encountered",
+ expected1[i], ratios1[i], 0.00001f);
+ Assert.assertArrayEquals("The shares array was not sorted into the "
+ + "expected order: incorrect inner array encountered",
+ expected2[i], ratios2[i], 0.00001f);
+ }
}
-
+
@Test
- public void testUnevenWeightsDifferentDominantResource() {
- assertTrue(createComparator(8000, 8).compare(
- createSchedulable(1000, 3, new ResourceWeights(1.0f, 2.0f)),
- createSchedulable(2000, 1)) < 0);
- assertTrue(createComparator(8000, 8).compare(
- createSchedulable(3000, 1, new ResourceWeights(2.0f, 1.0f)),
- createSchedulable(1000, 2)) < 0);
+ public void testCalculateClusterAndFairRatios() {
+ Map index = ResourceUtils.getResourceTypeIndex();
+ Resource used = Resources.createResource(10, 5);
+ Resource capacity = Resources.createResource(100, 10);
+ float[][] shares = new float[3][2];
+ DominantResourceFairnessComparator comparator =
+ new DominantResourceFairnessComparator();
+
+ used.setResourceValue("test", 2L);
+ capacity.setResourceValue("test", 5L);
+
+ int dominant = comparator.calculateClusterAndFairRatios(used, capacity,
+ shares, 1.0f);
+
+ assertEquals("Calculated usage ratio for memory (10MB out of 100MB) is "
+ + "incorrect", 0.1,
+ shares[index.get(ResourceInformation.MEMORY_MB.getName())][0], .00001);
+ assertEquals("Calculated usage ratio for vcores (5 out of 10) is "
+ + "incorrect", 0.5,
+ shares[index.get(ResourceInformation.VCORES.getName())][0], .00001);
+ assertEquals("Calculated usage ratio for test resource (2 out of 5) is "
+ + "incorrect", 0.4, shares[index.get("test")][0], .00001);
+ assertEquals("The wrong dominant resource index was returned",
+ index.get(ResourceInformation.VCORES.getName()).intValue(),
+ dominant);
}
-
+
@Test
- public void testCalculateShares() {
+ public void testCalculateMinShareRatios() {
+ Map index = ResourceUtils.getResourceTypeIndex();
Resource used = Resources.createResource(10, 5);
- Resource capacity = Resources.createResource(100, 10);
- ResourceType[] resourceOrder = new ResourceType[2];
- ResourceWeights shares = new ResourceWeights();
- DominantResourceFairnessPolicy.DominantResourceFairnessComparator comparator =
- new DominantResourceFairnessPolicy.DominantResourceFairnessComparator();
- comparator.calculateShares(used, capacity, shares, resourceOrder,
- ResourceWeights.NEUTRAL);
-
- assertEquals(.1, shares.getWeight(ResourceType.MEMORY), .00001);
- assertEquals(.5, shares.getWeight(ResourceType.CPU), .00001);
- assertEquals(ResourceType.CPU, resourceOrder[0]);
- assertEquals(ResourceType.MEMORY, resourceOrder[1]);
+ Resource minShares = Resources.createResource(5, 10);
+ float[][] ratios = new float[3][3];
+ DominantResourceFairnessComparator comparator =
+ new DominantResourceFairnessComparator();
+
+ used.setResourceValue("test", 2L);
+ minShares.setResourceValue("test", 0L);
+
+ comparator.calculateMinShareRatios(used, minShares, ratios);
+
+ assertEquals("Calculated min share ratio for memory (10MB out of 5MB) is "
+ + "incorrect", 2.0,
+ ratios[index.get(ResourceInformation.MEMORY_MB.getName())][2], .00001f);
+ assertEquals("Calculated min share ratio for vcores (5 out of 10) is "
+ + "incorrect", 0.5,
+ ratios[index.get(ResourceInformation.VCORES.getName())][2], .00001f);
+ assertEquals("Calculated min share ratio for test resource (0 out of 5) is "
+ + "incorrect", Float.POSITIVE_INFINITY, ratios[index.get("test")][2],
+ 0.00001f);
+ }
+
+ @Test
+ public void testCompareShares() {
+ float[][] ratios1 = {
+ {0.4f, 0.1f, 2.0f},
+ {0.3f, 2.0f, 0.1f},
+ {0.2f, 1.0f, 9.0f}
+ };
+ float[][] ratios2 = {
+ {0.3f, 2.0f, 1.0f},
+ {0.2f, 0.1f, 0.5f},
+ {0.2f, 1.0f, 2.0f}
+ };
+ float[][] ratios3 = {
+ {0.3f, 2.0f, 1.0f},
+ {0.2f, 0.1f, 2.0f},
+ {0.1f, 2.0f, 1.0f}
+ };
+ DominantResourceFairnessComparator comparator =
+ new DominantResourceFairnessComparator();
+
+ int ret = comparator.compareRatios(ratios1, ratios2, 0);
+
+ assertEquals("Expected the first array to be larger because the first "
+ + "usage ratio element is larger", 1, ret);
+
+ ret = comparator.compareRatios(ratios2, ratios1, 0);
+
+ assertEquals("Expected the first array to be smaller because the first "
+ + "usage ratio element is smaller", -1, ret);
+
+ ret = comparator.compareRatios(ratios1, ratios1, 0);
+
+ assertEquals("Expected the arrays to be equal, since they're the same "
+ + "array", 0, ret);
+
+ ret = comparator.compareRatios(ratios2, ratios2, 0);
+
+ assertEquals("Expected the arrays to be equal, since they're the same "
+ + "array", 0, ret);
+
+ ret = comparator.compareRatios(ratios3, ratios3, 0);
+
+ assertEquals("Expected the arrays to be equal, since they're the same "
+ + "array", 0, ret);
+
+ ret = comparator.compareRatios(ratios2, ratios3, 0);
+
+ assertEquals("Expected the first array to be larger because the last "
+ + "usage ratio element is larger, and all other elements are equal",
+ 1, ret);
+
+ ret = comparator.compareRatios(ratios1, ratios2, 1);
+
+ assertEquals("Expected the first array to be smaller because the first "
+ + "fair share ratio element is smaller", -1, ret);
+
+ ret = comparator.compareRatios(ratios2, ratios1, 1);
+
+ assertEquals("Expected the first array to be larger because the first "
+ + "fair share ratio element is larger", 1, ret);
+
+ ret = comparator.compareRatios(ratios1, ratios1, 1);
+
+ assertEquals("Expected the arrays to be equal, since they're the same "
+ + "array", 0, ret);
+
+ ret = comparator.compareRatios(ratios2, ratios2, 1);
+
+ assertEquals("Expected the arrays to be equal, since they're the same "
+ + "array", 0, ret);
+
+ ret = comparator.compareRatios(ratios3, ratios3, 1);
+
+ assertEquals("Expected the arrays to be equal, since they're the same "
+ + "array", 0, ret);
+
+ ret = comparator.compareRatios(ratios2, ratios3, 1);
+
+ assertEquals("Expected the first array to be smaller because the last "
+ + "usage ratio element is smaller, and all other elements are equal",
+ -1, ret);
+
+ ret = comparator.compareRatios(ratios1, ratios2, 2);
+
+ assertEquals("Expected the first array to be larger because the first "
+ + "min share ratio element is larger", 1, ret);
+
+ ret = comparator.compareRatios(ratios2, ratios1, 2);
+
+ assertEquals("Expected the first array to be smaller because the first "
+ + "min share ratio element is smaller", -1, ret);
+
+ ret = comparator.compareRatios(ratios1, ratios1, 2);
+
+ assertEquals("Expected the arrays to be equal, since they're the same "
+ + "array", 0, ret);
+
+ ret = comparator.compareRatios(ratios2, ratios2, 2);
+
+ assertEquals("Expected the arrays to be equal, since they're the same "
+ + "array", 0, ret);
+
+ ret = comparator.compareRatios(ratios3, ratios3, 2);
+
+ assertEquals("Expected the arrays to be equal, since they're the same "
+ + "array", 0, ret);
+
+ ret = comparator.compareRatios(ratios2, ratios3, 2);
+
+ assertEquals("Expected the first array to be smaller because the second "
+ + "min share ratio element is smaller, and all the first elements are "
+ + "equal", -1, ret);
}
@Test
@@ -183,4 +361,12 @@ public void testCompareSchedulablesWithClusterResourceChanges(){
assertTrue(createComparator(8000, 6)
.compare(schedulable1, schedulable2) < 0);
}
+
+ private static void addResources(String... resources) {
+ Configuration conf = new Configuration();
+
+ // Add a third resource to the allowed set
+ conf.set(YarnConfiguration.RESOURCE_TYPES, Joiner.on(',').join(resources));
+ ResourceUtils.resetResourceTypes(conf);
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java
index d9ed073e95b..f88ac8b4d20 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestAppPage.java
@@ -22,6 +22,7 @@
import static org.mockito.Mockito.when;
import java.io.IOException;
+import java.util.HashMap;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -62,9 +63,10 @@ public void testAppBlockRenderWithNullCurrentAppAttempt() throws Exception {
when(app.getStartTime()).thenReturn(0L);
when(app.getFinishTime()).thenReturn(0L);
when(app.createApplicationState()).thenReturn(YarnApplicationState.FAILED);
-
- RMAppMetrics appMetrics = new RMAppMetrics(
- Resource.newInstance(0, 0), 0, 0, 0, 0, 0, 0);
+
+ RMAppMetrics appMetrics =
+ new RMAppMetrics(Resource.newInstance(0, 0), 0, 0, new HashMap<>(),
+ new HashMap<>());
when(app.getRMAppMetrics()).thenReturn(appMetrics);
// initialize RM Context, and create RMApp, without creating RMAppAttempt
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java
index 8c00b39c4ba..cea0088d334 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java
@@ -51,6 +51,7 @@
import java.io.IOException;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.List;
import java.util.concurrent.ConcurrentMap;
@@ -136,8 +137,8 @@ private static RMContext mockRMContext(List states) {
MockRMApp app = new MockRMApp(i, i, state) {
@Override
public RMAppMetrics getRMAppMetrics() {
- return new RMAppMetrics(Resource.newInstance(0, 0),
- 0, 0, 0, 0, 0, 0);
+ return new RMAppMetrics(Resource.newInstance(0, 0), 0, 0,
+ new HashMap<>(), new HashMap<>());
}
@Override
public YarnApplicationState createApplicationState() {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 1cbdec3a225..07f74a358e7 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -1528,7 +1528,7 @@ public void verifyAppsXML(NodeList nodes, RMApp app, boolean hasResourceReq)
public void verifyAppInfo(JSONObject info, RMApp app, boolean hasResourceReqs)
throws JSONException, Exception {
- int expectedNumberOfElements = 36 + (hasResourceReqs ? 2 : 0);
+ int expectedNumberOfElements = 38 + (hasResourceReqs ? 2 : 0);
String appNodeLabelExpression = null;
String amNodeLabelExpression = null;
if (app.getApplicationSubmissionContext()
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
index 71de6b470e9..5fa6a3e491c 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
@@ -27,6 +27,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
@@ -303,6 +305,12 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts(
return clientRMProxy.updateApplicationTimeouts(request);
}
+ @Override
+ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
+ GetAllResourceTypeInfoRequest request) throws YarnException, IOException {
+ return clientRMProxy.getResourceTypeInfo(request);
+ }
+
@VisibleForTesting
public void setRMClient(ApplicationClientProtocol clientRM) {
this.clientRMProxy = clientRM;
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
index 9f4e3260e06..645929e3998 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
@@ -32,6 +32,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
@@ -709,4 +711,9 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts(
throw new NotImplementedException();
}
+ @Override
+ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
+ GetAllResourceTypeInfoRequest request) throws YarnException, IOException {
+ throw new NotImplementedException();
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java
index fd2c610c7fe..92b43d5eeb0 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java
@@ -38,6 +38,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
@@ -403,6 +405,13 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts(
return pipeline.getRootInterceptor().updateApplicationTimeouts(request);
}
+ @Override
+ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
+ GetAllResourceTypeInfoRequest request) throws YarnException, IOException {
+ RequestInterceptorChainWrapper pipeline = getInterceptorChain();
+ return pipeline.getRootInterceptor().getResourceTypeInfo(request);
+ }
+
private RequestInterceptorChainWrapper getInterceptorChain()
throws IOException {
String user = UserGroupInformation.getCurrentUser().getUserName();
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/PassThroughClientRequestInterceptor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/PassThroughClientRequestInterceptor.java
index c403bd5006c..76faf947187 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/PassThroughClientRequestInterceptor.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/PassThroughClientRequestInterceptor.java
@@ -24,6 +24,8 @@
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
@@ -264,4 +266,10 @@ public UpdateApplicationTimeoutsResponse updateApplicationTimeouts(
throws YarnException, IOException {
return getNextInterceptor().updateApplicationTimeouts(request);
}
+
+ @Override
+ public GetAllResourceTypeInfoResponse getResourceTypeInfo(
+ GetAllResourceTypeInfoRequest request) throws YarnException, IOException {
+ return getNextInterceptor().getResourceTypeInfo(request);
+ }
}