diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 034ec4f..1b56725 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -723,6 +723,16 @@
/** Number of Virtual CPU Cores which can be allocated for containers.*/
public static final String NM_VCORES = NM_PREFIX + "resource.cpu-vcores";
public static final int DEFAULT_NM_VCORES = 8;
+
+ /** Percentage of overall CPU which can be allocated for containers. */
+ public static final String NM_CONTAINERS_CPU_PERC = NM_PREFIX
+ + "containers-cpu-percentage";
+ public static final int DEFAULT_NM_CONTAINERS_CPU_PERC = 100;
+
+ /** Number of CPUs which can be allocated for containers. */
+ public static final String NM_CONTAINERS_CPU_ABSOLUTE = NM_PREFIX
+ + "containers-cpu-cores";
+ public static final int DEFAULT_NM_CONTAINERS_CPU_ABSOLUTE = -1;
/** NM Webapp address.**/
public static final String NM_WEBAPP_ADDRESS = NM_PREFIX + "webapp.address";
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 9b4a90f..01254ae 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -871,13 +871,37 @@
- Number of CPU cores that can be allocated
- for containers.
+ Number of vcores that can be allocated
+ for containers. This is used by the RM scheduler when allocating
+ resources for containers. This is not used to limit the number of
+ physical cores used by YARN containers.
yarn.nodemanager.resource.cpu-vcores
8
+ Number of CPU cores that can be allocated
+ for containers. If set to a negative number, the number derived from
+ yarn.nodemanager.containers-cpu-percentage will be used. This setting
+ allows users to limit the number of physical cores that YARN
+ containers use. Currently functional only on Linux using cgroups.
+
+ yarn.nodemanager.containers-cpu-cores
+ -1
+
+
+
+ Percentage of CPU cores that can be allocated
+ for containers. If yarn.nodemanager.containers-cpu-cores is also specfied,
+ that value will be preferred over this one. This setting
+ allows users to limit the number of physical cores that YARN
+ containers use. Currently functional only on Linux using cgroups.
+
+ yarn.nodemanager.containers-cpu-percentage
+ 100
+
+
+
NM Webapp address.
yarn.nodemanager.webapp.address
${yarn.nodemanager.hostname}:8042
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index d5bd225..223ba7c 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -42,6 +42,8 @@
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin;
+import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.apache.hadoop.yarn.util.SystemClock;
public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
@@ -59,7 +61,11 @@
private final String MTAB_FILE = "/proc/mounts";
private final String CGROUPS_FSTYPE = "cgroup";
private final String CONTROLLER_CPU = "cpu";
+ private final String CPU_PERIOD_US = "cfs_period_us";
+ private final String CPU_QUOTA_US = "cfs_quota_us";
private final int CPU_DEFAULT_WEIGHT = 1024; // set by kernel
+ private final int MAX_QUOTA_US = 1000 * 1000;
+ private final int MIN_PERIOD_US = 1000;
private final Map controllerPaths; // Controller -> path
private long deleteCgroupTimeout;
@@ -106,19 +112,62 @@ void initConfig() throws IOException {
}
public void init(LinuxContainerExecutor lce) throws IOException {
+ this.init(lce,
+ ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf));
+ }
+
+ @VisibleForTesting
+ void init(LinuxContainerExecutor lce, ResourceCalculatorPlugin plugin)
+ throws IOException {
initConfig();
-
+
// mount cgroups if requested
if (cgroupMount && cgroupMountPath != null) {
ArrayList cgroupKVs = new ArrayList();
- cgroupKVs.add(CONTROLLER_CPU + "=" + cgroupMountPath + "/" +
- CONTROLLER_CPU);
+ cgroupKVs.add(CONTROLLER_CPU + "=" + cgroupMountPath + "/"
+ + CONTROLLER_CPU);
lce.mountCgroups(cgroupKVs, cgroupPrefix);
}
initializeControllerPaths();
+
+ // cap overall usage to the number of cores allocated to YARN
+ float yarnProcessors =
+ NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ LOG.info("YARN containers restricted to " + yarnProcessors + " cores");
+ int[] limits = getOverallLimits(yarnProcessors);
+ updateCgroup(CONTROLLER_CPU, "", CPU_PERIOD_US, String.valueOf(limits[0]));
+ updateCgroup(CONTROLLER_CPU, "", CPU_QUOTA_US, String.valueOf(limits[1]));
}
+ @VisibleForTesting
+ int[] getOverallLimits(float yarnProcessors) {
+
+ if (yarnProcessors < 0.01f) {
+ throw new IllegalArgumentException("Number of processors can't be <= 0.");
+ }
+
+ int[] ret = new int[2];
+
+ int quotaUS = MAX_QUOTA_US;
+ int periodUS = (int) (MAX_QUOTA_US / yarnProcessors);
+ if(yarnProcessors < 1.0f) {
+ periodUS = MAX_QUOTA_US;
+ quotaUS = (int) (periodUS * yarnProcessors);
+ }
+
+ // cfs_period_us can't be less than 1000 microseconds
+ // if the value of periodUS is less than 1000, we can't really use cgroups
+ // to limit cpu
+ if (periodUS < MIN_PERIOD_US) {
+ periodUS = MAX_QUOTA_US;
+ quotaUS = -1;
+ }
+
+ ret[0] = periodUS;
+ ret[1] = quotaUS;
+ return ret;
+ }
boolean isCpuWeightEnabled() {
return this.cpuWeightEnabled;
@@ -274,7 +323,7 @@ public String getResourcesOption(ContainerId containerId) {
BufferedReader in = null;
try {
- in = new BufferedReader(new FileReader(new File(MTAB_FILE)));
+ in = new BufferedReader(new FileReader(new File(getMtabFileName())));
for (String str = in.readLine(); str != null;
str = in.readLine()) {
@@ -292,13 +341,13 @@ public String getResourcesOption(ContainerId containerId) {
}
}
} catch (IOException e) {
- throw new IOException("Error while reading " + MTAB_FILE, e);
+ throw new IOException("Error while reading " + getMtabFileName(), e);
} finally {
// Close the streams
try {
in.close();
} catch (IOException e2) {
- LOG.warn("Error closing the stream: " + MTAB_FILE, e2);
+ LOG.warn("Error closing the stream: " + getMtabFileName(), e2);
}
}
@@ -334,7 +383,12 @@ private void initializeControllerPaths() throws IOException {
}
} else {
throw new IOException("Not able to enforce cpu weights; cannot find "
- + "cgroup for cpu controller in " + MTAB_FILE);
+ + "cgroup for cpu controller in " + getMtabFileName());
}
}
+
+ @VisibleForTesting
+ String getMtabFileName() {
+ return MTAB_FILE;
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
new file mode 100644
index 0000000..df34988
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
+
+import java.io.IOException;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class NodeManagerHardwareUtils {
+
+ /**
+ *
+ * Returns the number of cores that should be used for YARN containers. The
+ * number is derived based on various configuration params such as
+ * YarnConfiguration.NM_CONTAINERS_CPU_PERC, and
+ * YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE
+ *
+ * @param conf
+ * - Configuration object
+ * @return the number of cores to be used for YARN containers
+ */
+ public static float getContainerCores(Configuration conf) {
+ ResourceCalculatorPlugin plugin = ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf);
+ return NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ }
+
+ /**
+ *
+ * Returns the number of cores that should be used for YARN containers. The
+ * number is derived based on various configuration params such as
+ * YarnConfiguration.NM_CONTAINERS_CPU_PERC, and
+ * YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE
+ *
+ * @param plugin
+ * - ResourceCalculatorPlugin object to determine hardware specs
+ * @param conf
+ * - Configuration object
+ * @return the number of cores to be used for YARN containers
+ */
+ public static float getContainerCores(ResourceCalculatorPlugin plugin,
+ Configuration conf) {
+ int numProcessors = plugin.getNumProcessors();
+ int nodeCpuPercentage =
+ Math.min(conf.getInt(YarnConfiguration.NM_CONTAINERS_CPU_PERC,
+ YarnConfiguration.DEFAULT_NM_CONTAINERS_CPU_PERC), 100);
+ nodeCpuPercentage = Math.max(0, nodeCpuPercentage);
+ int nodeCpuAbsolute =
+ conf.getInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE,
+ YarnConfiguration.DEFAULT_NM_CONTAINERS_CPU_ABSOLUTE);
+
+ if (nodeCpuAbsolute == 0) {
+ String message =
+ "Illegal value for " + YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE
+ + ". Value cannot be 0.";
+ throw new IllegalArgumentException(message);
+ }
+ if (nodeCpuPercentage == 0) {
+ String message =
+ "Illegal value for " + YarnConfiguration.NM_CONTAINERS_CPU_PERC
+ + ". Value cannot be less than or equal to 0.";
+ throw new IllegalArgumentException(message);
+ }
+
+ float yarnProcessors;
+ if (nodeCpuAbsolute <= -1) {
+ // absolute number of cores less than 0, fall back on cpu percentage
+ yarnProcessors = (nodeCpuPercentage * numProcessors) / 100.0f;
+ } else {
+ // absolute number of cores specified, use that number over cpu percentage
+ yarnProcessors = Math.min(nodeCpuAbsolute, numProcessors);
+ }
+ return yarnProcessors;
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
index 611045e..d736175 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestCgroupsLCEResourcesHandler.java
@@ -17,13 +17,17 @@
*/
package org.apache.hadoop.yarn.server.nodemanager.util;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
+import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.junit.Assert;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.Clock;
import org.junit.Test;
-import java.io.File;
-import java.io.FileOutputStream;
+import java.io.*;
+import java.util.List;
+import java.util.Scanner;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
@@ -70,4 +74,168 @@ public void run() {
Assert.assertFalse(handler.deleteCgroup(file.getPath()));
}
+ static class MockLinuxContainerExecutor extends LinuxContainerExecutor {
+ @Override
+ public void mountCgroups(List x, String y) {
+ }
+ }
+
+ static class CustomCgroupsLCEResourceHandler extends
+ CgroupsLCEResourcesHandler {
+
+ String mtabFile;
+ int[] limits = new int[2];
+
+ @Override
+ int[] getOverallLimits(float x) {
+ return limits;
+ }
+
+ void setMtabFile(String file) {
+ mtabFile = file;
+ }
+
+ @Override
+ String getMtabFileName() {
+ return mtabFile;
+ }
+ }
+
+ // only override getNumProcessors, it's the one we care about
+ static class TestResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+ @Override
+ public long getVirtualMemorySize() {
+ return 0;
+ }
+
+ @Override
+ public long getPhysicalMemorySize() {
+ return 0;
+ }
+
+ @Override
+ public long getAvailableVirtualMemorySize() {
+ return 0;
+ }
+
+ @Override
+ public long getAvailablePhysicalMemorySize() {
+ return 0;
+ }
+
+ @Override
+ public int getNumProcessors() {
+ return 4;
+ }
+
+ @Override
+ public long getCpuFrequency() {
+ return 0;
+ }
+
+ @Override
+ public long getCumulativeCpuTime() {
+ return 0;
+ }
+
+ @Override
+ public float getCpuUsage() {
+ return 0;
+ }
+ }
+
+ @Test
+ public void testInit() throws IOException {
+ LinuxContainerExecutor mockLCE = new MockLinuxContainerExecutor();
+ CustomCgroupsLCEResourceHandler handler =
+ new CustomCgroupsLCEResourceHandler();
+ YarnConfiguration conf = new YarnConfiguration();
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE, 4);
+ handler.setConf(conf);
+ handler.initConfig();
+
+ // create mock cgroup
+ File cgroupDir = new File("target", UUID.randomUUID().toString());
+ if (!cgroupDir.mkdir()) {
+ String message = "Could not create dir " + cgroupDir.getAbsolutePath();
+ throw new IOException(message);
+ }
+ File cgroupMountDir = new File(cgroupDir.getAbsolutePath(), "/hadoop-yarn");
+ if (!cgroupMountDir.mkdir()) {
+ String message =
+ "Could not create dir " + cgroupMountDir.getAbsolutePath();
+ throw new IOException(message);
+ }
+
+ // create mock mtab
+ String mtabContent =
+ "none " + cgroupDir.getAbsolutePath() + " cgroup rw,relatime,cpu 0 0";
+ File mockMtab = new File("target", UUID.randomUUID().toString());
+ if (!mockMtab.exists()) {
+ if (!mockMtab.createNewFile()) {
+ String message = "Could not create file " + mockMtab.getAbsolutePath();
+ throw new IOException(message);
+ }
+ }
+ FileWriter mtabWriter = new FileWriter(mockMtab.getAbsoluteFile());
+ mtabWriter.write(mtabContent);
+ mtabWriter.close();
+ mockMtab.deleteOnExit();
+
+ // setup our handler and call init()
+ handler.setMtabFile(mockMtab.getAbsolutePath());
+
+ // check values
+ handler.limits[0] = 100 * 1000;
+ handler.limits[1] = 1000 * 1000;
+ handler.init(mockLCE, new TestResourceCalculatorPlugin());
+ int period = readIntFromFile(cgroupMountDir + "/" + "cpu.cfs_period_us");
+ int quota = readIntFromFile(cgroupMountDir + "/" + "cpu.cfs_quota_us");
+ Assert.assertEquals(handler.limits[0], period);
+ Assert.assertEquals(handler.limits[1], quota);
+
+ handler.limits[0] = 1000 * 1000;
+ handler.limits[1] = -1;
+ handler.init(mockLCE, new TestResourceCalculatorPlugin());
+ period = readIntFromFile(cgroupMountDir + "/" + "cpu.cfs_period_us");
+ quota = readIntFromFile(cgroupMountDir + "/" + "cpu.cfs_quota_us");
+ Assert.assertEquals(1000 * 1000, period);
+ Assert.assertEquals(-1, quota);
+
+ FileUtils.deleteQuietly(cgroupDir);
+ }
+
+ private int readIntFromFile(String filename) throws IOException {
+ Scanner scanner = new Scanner(new File(filename));
+ if (scanner.hasNextInt()) {
+ return scanner.nextInt();
+ }
+ return -1;
+ }
+
+ @Test
+ public void testGetOverallLimits() {
+
+ int expectedQuota = 1000 * 1000;
+ CgroupsLCEResourcesHandler handler = new CgroupsLCEResourcesHandler();
+
+ int[] ret = handler.getOverallLimits(2);
+ Assert.assertEquals(expectedQuota / 2, ret[0]);
+ Assert.assertEquals(expectedQuota, ret[1]);
+
+ ret = handler.getOverallLimits(2000);
+ Assert.assertEquals(expectedQuota, ret[0]);
+ Assert.assertEquals(-1, ret[1]);
+
+ int[] params = { 0, -1 };
+ for (int cores : params) {
+ try {
+ handler.getOverallLimits(cores);
+ Assert.fail("Function call should throw error.");
+ } catch (IllegalArgumentException ie) {
+ // expected
+ }
+ }
+ return;
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
new file mode 100644
index 0000000..2072d4f
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.util;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestNodeManagerHardwareUtils {
+
+ static class TestResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+ @Override
+ public long getVirtualMemorySize() {
+ return 0;
+ }
+
+ @Override
+ public long getPhysicalMemorySize() {
+ return 0;
+ }
+
+ @Override
+ public long getAvailableVirtualMemorySize() {
+ return 0;
+ }
+
+ @Override
+ public long getAvailablePhysicalMemorySize() {
+ return 0;
+ }
+
+ @Override
+ public int getNumProcessors() {
+ return 4;
+ }
+
+ @Override
+ public long getCpuFrequency() {
+ return 0;
+ }
+
+ @Override
+ public long getCumulativeCpuTime() {
+ return 0;
+ }
+
+ @Override
+ public float getCpuUsage() {
+ return 0;
+ }
+ }
+
+ @Test
+ public void testGetContainerCores() {
+
+ ResourceCalculatorPlugin plugin = new TestResourceCalculatorPlugin();
+ YarnConfiguration conf = new YarnConfiguration();
+ float ret;
+
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE, 0);
+ try {
+ NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ Assert.fail("getContainerCores should have thrown exception");
+ } catch (IllegalArgumentException ie) {
+ // expected
+ }
+
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE, -1);
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_PERC, 0);
+ try {
+ NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ Assert.fail("getContainerCores should have thrown exception");
+ } catch (IllegalArgumentException ie) {
+ // expected
+ }
+
+
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE, 2);
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_PERC, 100);
+ ret = NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ Assert.assertEquals(2, (int) ret);
+
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE, -1);
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_PERC, 100);
+ ret = NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ Assert.assertEquals(4, (int) ret);
+
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE, -1);
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_PERC, 50);
+ ret = NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ Assert.assertEquals(2, (int) ret);
+
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE, -1);
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_PERC, 75);
+ ret = NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ Assert.assertEquals(3, (int) ret);
+
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE, -1);
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_PERC, 85);
+ ret = NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ Assert.assertEquals(3.4, ret, 0.1);
+
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE, 1);
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_PERC, 85);
+ ret = NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ Assert.assertEquals(1, (int) ret);
+
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE, 5);
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_PERC, 85);
+ ret = NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ Assert.assertEquals(4, (int) ret);
+
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_ABSOLUTE, -1);
+ conf.setInt(YarnConfiguration.NM_CONTAINERS_CPU_PERC, 110);
+ ret = NodeManagerHardwareUtils.getContainerCores(plugin, conf);
+ Assert.assertEquals(4, (int) ret);
+
+
+ return;
+ }
+}