diff --git hadoop-common-project/hadoop-common/src/main/winutils/task.c hadoop-common-project/hadoop-common/src/main/winutils/task.c index 21b1893..dccbee0 100644 --- hadoop-common-project/hadoop-common/src/main/winutils/task.c +++ hadoop-common-project/hadoop-common/src/main/winutils/task.c @@ -49,6 +49,31 @@ typedef enum TaskCommandOptionType TaskProcessList } TaskCommandOption; + //---------------------------------------------------------------------------- +// Function: GetLimit +// +// Description: +// Get the resource limit value in long type given the command line argument. +// +// Returns: +// TRUE: If successfully get the value +// FALSE: otherwise +static BOOL GetLimit(__in const wchar_t *str, __out long *value) +{ + wchar_t *end = NULL; + if (str == NULL || value == NULL) return FALSE; + *value = wcstol(str, &end, 10); + if (end == NULL || *end != '\0') + { + *value = -1; + return FALSE; + } + else + { + return TRUE; + } +} + //---------------------------------------------------------------------------- // Function: ParseCommandLine // @@ -61,7 +86,9 @@ typedef enum TaskCommandOptionType // FALSE: otherwise static BOOL ParseCommandLine(__in int argc, __in_ecount(argc) wchar_t *argv[], - __out TaskCommandOption *command) + __out TaskCommandOption *command, + __out_opt long *memory, + __out_opt long *vcore) { *command = TaskInvalid; @@ -88,9 +115,44 @@ static BOOL ParseCommandLine(__in int argc, } } - if (argc == 4) { + if (argc >= 4 && argc <= 8) { if (wcscmp(argv[1], L"create") == 0) { + int i; + for (i = 2; i < argc - 3; i++) + { + if (wcscmp(argv[i], L"-c") == 0) + { + if (vcore != NULL && !GetLimit(argv[i + 1], vcore)) + { + return FALSE; + } + else + { + i++; + continue; + } + } + else if (wcscmp(argv[i], L"-m") == 0) + { + if (memory != NULL && !GetLimit(argv[i + 1], memory)) + { + return FALSE; + } + else + { + i++; + continue; + } + } + else + { + break; + } + } + if (argc - i != 2) + return FALSE; + *command = TaskCreate; return TRUE; } @@ -573,7 +635,7 @@ done: // ERROR_SUCCESS: On success // GetLastError: otherwise DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PCWSTR cmdLine, - __in LPCWSTR userName) + __in LPCWSTR userName, __in long memory, __in long cpuRate) { DWORD dwErrorCode = ERROR_SUCCESS; DWORD exitCode = EXIT_FAILURE; @@ -616,6 +678,12 @@ DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PC return dwErrorCode; } jeli.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; + if (memory > 0) + { + jeli.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_JOB_MEMORY; + jeli.ProcessMemoryLimit = memory * 1024 * 1024; + jeli.JobMemoryLimit = memory * 1024 * 1024; + } if(SetInformationJobObject(jobObject, JobObjectExtendedLimitInformation, &jeli, @@ -626,6 +694,24 @@ DWORD CreateTaskImpl(__in_opt HANDLE logonHandle, __in PCWSTR jobObjName,__in PC CloseHandle(jobObject); return dwErrorCode; } +#ifdef NTDDI_WIN8 + if (cpuRate > 0) + { + JOBOBJECT_CPU_RATE_CONTROL_INFORMATION jcrci = { 0 }; + SYSTEM_INFO sysinfo; + GetSystemInfo(&sysinfo); + jcrci.ControlFlags = JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | + JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP; + jcrci.CpuRate = min(10000, cpuRate); + if(SetInformationJobObject(jobObject, JobObjectCpuRateControlInformation, + &jcrci, sizeof(jcrci)) == 0) + { + dwErrorCode = GetLastError(); + CloseHandle(jobObject); + return dwErrorCode; + } + } +#endif if (logonHandle != NULL) { dwErrorCode = AddNodeManagerAndUserACEsToObject(jobObject, userName, JOB_OBJECT_ALL_ACCESS); @@ -809,10 +895,10 @@ create_process_done: // Returns: // ERROR_SUCCESS: On success // GetLastError: otherwise -DWORD CreateTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) +DWORD CreateTask(__in PCWSTR jobObjName,__in PWSTR cmdLine, __in long memory, __in long cpuRate) { // call with null logon in order to create tasks utilizing the current logon - return CreateTaskImpl( NULL, jobObjName, cmdLine, NULL); + return CreateTaskImpl( NULL, jobObjName, cmdLine, NULL, memory, cpuRate); } //---------------------------------------------------------------------------- @@ -893,7 +979,7 @@ DWORD CreateTaskAsUser(__in PCWSTR jobObjName, goto done; } - err = CreateTaskImpl(logonHandle, jobObjName, cmdLine, user); + err = CreateTaskImpl(logonHandle, jobObjName, cmdLine, user, -1, -1); done: if( profileIsLoaded ) { @@ -1095,6 +1181,8 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) { DWORD dwErrorCode = ERROR_SUCCESS; TaskCommandOption command = TaskInvalid; + long memory = -1; + long cpuRate = -1; wchar_t* cmdLine = NULL; wchar_t buffer[16*1024] = L""; // 32K max command line size_t charCountBufferLeft = sizeof(buffer)/sizeof(wchar_t); @@ -1111,7 +1199,7 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) ARGC_COMMAND_ARGS }; - if (!ParseCommandLine(argc, argv, &command)) { + if (!ParseCommandLine(argc, argv, &command, &memory, &cpuRate)) { dwErrorCode = ERROR_INVALID_COMMAND_LINE; fwprintf(stderr, L"Incorrect command line arguments.\n\n"); @@ -1123,7 +1211,7 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) { // Create the task jobobject // - dwErrorCode = CreateTask(argv[2], argv[3]); + dwErrorCode = CreateTask(argv[argc-2], argv[argc-1], memory, cpuRate); if (dwErrorCode != ERROR_SUCCESS) { ReportErrorCode(L"CreateTask", dwErrorCode); @@ -1238,18 +1326,30 @@ void TaskUsage() // jobobject's are being used. // ProcessTree.isSetsidSupported() fwprintf(stdout, L"\ - Usage: task create [TASKNAME] [COMMAND_LINE] |\n\ - task createAsUser [TASKNAME] [USERNAME] [PIDFILE] [COMMAND_LINE] |\n\ - task isAlive [TASKNAME] |\n\ - task kill [TASKNAME]\n\ - task processList [TASKNAME]\n\ - Creates a new task jobobject with taskname\n\ - Creates a new task jobobject with taskname as the user provided\n\ - Checks if task jobobject is alive\n\ - Kills task jobobject\n\ - Prints to stdout a list of processes in the task\n\ - along with their resource usage. One process per line\n\ - and comma separated info per process\n\ - ProcessId,VirtualMemoryCommitted(bytes),\n\ - WorkingSetSize(bytes),CpuTime(Millisec,Kernel+User)\n"); +Usage: task create [OPTOINS] [TASKNAME] [COMMAND_LINE]\n\ + Creates a new task job object with taskname and options to set CPU\n\ + and memory limits on the job object\n\ +\n\ + OPTIONS: -c [cup rate] set the cpu rate limit on the job object.\n\ + -m [memory] set the memory limit on the job object.\n\ + The cpu limit is an integral value of percentage * 100. The memory\n\ + limit is an integral number of memory in MB. \n\ + The limit will not be set if 0 or negative value is passed in as\n\ + parameter(s).\n\ +\n\ + task createAsUser [TASKNAME] [USERNAME] [PIDFILE] [COMMAND_LINE]\n\ + Creates a new task jobobject with taskname as the user provided\n\ +\n\ + task isAlive [TASKNAME]\n\ + Checks if task job object is alive\n\ +\n\ + task kill [TASKNAME]\n\ + Kills task job object\n\ +\n\ + task processList [TASKNAME]\n\ + Prints to stdout a list of processes in the task\n\ + along with their resource usage. One process per line\n\ + and comma separated info per process\n\ + ProcessId,VirtualMemoryCommitted(bytes),\n\ + WorkingSetSize(bytes),CpuTime(Millisec,Kernel+User)\n"); } diff --git hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props new file mode 100644 index 0000000..503b37a --- /dev/null +++ hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props @@ -0,0 +1,28 @@ + + + + + + + $(VCInstallDir)bin\x86_amd64;$(VCInstallDir)bin;$(WindowsSdkDir)bin\NETFX 4.0 Tools;$(MSBuildProgramFiles32)\Windows Kits\8.1\bin\x86;$(VSInstallDir)Common7\Tools\bin;$(VSInstallDir)Common7\tools;$(VSInstallDir)Common7\ide;$(MSBuildProgramFiles32)\HTML Help Workshop;$(FrameworkSDKDir)\bin;$(MSBuildToolsPath32);$(VSInstallDir);$(SystemRoot)\SysWow64;$(FxCopDir);$(PATH) + $(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(FrameworkSDKDir)\include; + $(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(MSBuildProgramFiles32)\Windows Kits\8.1\lib\win8\um\x64;$(MSBuildProgramFiles32)\Windows Kits\8.1\Lib\winv6.3\um\x64;$(FrameworkSDKDir)\lib\x64 + $(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(FrameworkSDKDir)\include;$(MSBuildToolsPath32);$(VCInstallDir)atlmfc\lib;$(VCInstallDir)lib; + + + diff --git hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj index 9ecba0a..76a7414 100644 --- hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj +++ hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj @@ -67,6 +67,9 @@ + + + diff --git hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java index 2d4e442..d09c4de 100644 --- hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java +++ hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java @@ -545,4 +545,66 @@ public void testTaskCreate() throws IOException { assertThat(outNumber, containsString(testNumber)); } + + @Test (timeout = 30000) + public void testTaskCreateWithLimits() throws IOException { + // Generate a unique job id + String jobId = String.format("%f", Math.random()); + + // Run a task without any options + String out = Shell.execCommand(Shell.WINUTILS, "task", "create", + "job" + jobId, "cmd /c echo job" + jobId); + assertTrue(out.trim().equals("job" + jobId)); + + // Run a task without any limits + jobId = String.format("%f", Math.random()); + out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m", + "-1", "job" + jobId, "cmd /c echo job" + jobId); + assertTrue(out.trim().equals("job" + jobId)); + + // Run a task with limits (128MB should be enough for a cmd) + jobId = String.format("%f", Math.random()); + out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "10000", "-m", + "128", "job" + jobId, "cmd /c echo job" + jobId); + assertTrue(out.trim().equals("job" + jobId)); + + // Run a task without enough memory + try { + jobId = String.format("%f", Math.random()); + out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-m", "128", "job" + + jobId, "java -Xmx256m -version"); + fail("Failed to get Shell.ExitCodeException with insufficient memory"); + } catch (Shell.ExitCodeException ece) { + assertThat(ece.getExitCode(), is(1)); + } + + // Run tasks with wrong parameters + // + try { + jobId = String.format("%f", Math.random()); + Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m", + "-1", "foo", "job" + jobId, "cmd /c echo job" + jobId); + fail("Failed to get Shell.ExitCodeException with bad parameters"); + } catch (Shell.ExitCodeException ece) { + assertThat(ece.getExitCode(), is(1639)); + } + + try { + jobId = String.format("%f", Math.random()); + Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-m", "-1", + "job" + jobId, "cmd /c echo job" + jobId); + fail("Failed to get Shell.ExitCodeException with bad parameters"); + } catch (Shell.ExitCodeException ece) { + assertThat(ece.getExitCode(), is(1639)); + } + + try { + jobId = String.format("%f", Math.random()); + Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "foo", + "job" + jobId, "cmd /c echo job" + jobId); + fail("Failed to get Shell.ExitCodeException with bad parameters"); + } catch (Shell.ExitCodeException ece) { + assertThat(ece.getExitCode(), is(1639)); + } + } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index ff06eea..3335499 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1021,6 +1021,18 @@ private static void addDeprecatedKeys() { public static final long DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY = 20; + /** + * Indicates if memory and CPU limit will be set for the Windows Job + * Object for the containers launched by Windows container executor. + */ + public static final String NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED = + NM_PREFIX + "windows-container-executor.memory-limit.enabled"; + public static final boolean DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED = true; + + public static final String NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED = + NM_PREFIX + "windows-container-executor.cpu-limit.enabled"; + public static final boolean DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED = false; + /** /* The Windows group that the windows-secure-container-executor should run as. */ diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 77193df..2e96d0b 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent; @@ -298,6 +299,11 @@ protected Path getPidFilePath(ContainerId containerId) { readLock.unlock(); } } + + protected String[] getRunCommand(String command, String groupId, + String userName, Path pidFile, Configuration conf) { + return getRunCommand(command, groupId, userName, pidFile, conf, null); + } /** * Return a command to execute the given command in OS shell. @@ -306,7 +312,7 @@ protected Path getPidFilePath(ContainerId containerId) { * non-Windows, groupId is ignored. */ protected String[] getRunCommand(String command, String groupId, - String userName, Path pidFile, Configuration conf) { + String userName, Path pidFile, Configuration conf, Resource resource) { boolean containerSchedPriorityIsSet = false; int containerSchedPriorityAdjustment = YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY; diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsContainerExecutor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsContainerExecutor.java new file mode 100644 index 0000000..55d300b --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsContainerExecutor.java @@ -0,0 +1,62 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.hadoop.yarn.server.nodemanager; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils; +import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; + +public class WindowsContainerExecutor extends DefaultContainerExecutor { + + @Override + protected String[] getRunCommand(String command, String groupId, + String userName, Path pidFile, Configuration conf, Resource resource) { + if (!Shell.WINDOWS) + return null; + + int cpuRate = -1; + int memory = -1; + if (resource != null) { + if (conf.getBoolean(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, + YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED)) { + memory = resource.getMemory(); + } + + if (conf.getBoolean(YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED, + YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED)) { + int vcores = resource.getVirtualCores(); + // cap overall usage to the number of cores allocated to YARN + float yarnProcessors = NodeManagerHardwareUtils.getContainersCores( + ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf), + conf); + // CPU should be set to a percentage * 100, e.g. 20% cpu rate limit + // should be set as 20 * 100. The following setting is equal to: + // 100 * (100 * (vcores / Total # of cores allocated to YARN)) + cpuRate = Math.min(10000, (int) ((vcores * 10000) / yarnProcessors)); + } + } + + return new String[] { Shell.WINUTILS, "task", "create", "-m", + String.valueOf(memory), "-c", String.valueOf(cpuRate), groupId, + "cmd /c " + command }; + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestWindowsContainerExecutor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestWindowsContainerExecutor.java new file mode 100644 index 0000000..cd1197b --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestWindowsContainerExecutor.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager; + +import static org.junit.Assume.assumeTrue; + +import java.util.Arrays; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils; +import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class TestWindowsContainerExecutor { + @Before + public void setUp() { + // Not supported on non-Windows platforms + assumeTrue(Shell.WINDOWS); + } + + @Test + public void testRunCommandWithMemoryResources() { + Configuration conf = new Configuration(); + ContainerExecutor wce = new WindowsContainerExecutor(); + String[] command = wce.getRunCommand("echo", "group1", null, null, + conf, Resource.newInstance(1024, 1)); + // Assert the cpu and memory limits are set correctly in the command + String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c", + "-1", "group1", "cmd /c " + "echo" }; + Assert.assertTrue(Arrays.equals(expected, command)); + } + + @Test + public void testRunCommandWithNoResources() { + Configuration conf = new Configuration(); + conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, "false"); + ContainerExecutor wce = new WindowsContainerExecutor(); + String[] command = wce.getRunCommand("echo", "group1", null, null, + conf, Resource.newInstance(1024, 1)); + // Assert the cpu and memory limits are set correctly in the command + String[] expected = { Shell.WINUTILS, "task", "create", "-m", "-1", "-c", + "-1", "group1", "cmd /c " + "echo" }; + Assert.assertTrue(Arrays.equals(expected, command)); + } + + @Test + public void testRunCommandWithCpuAndMemoryResources() { + Configuration conf = new Configuration(); + conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED, "true"); + ContainerExecutor wce = new WindowsContainerExecutor(); + String[] command = wce.getRunCommand("echo", "group1", null, null, + conf, Resource.newInstance(1024, 1)); + float yarnProcessors = NodeManagerHardwareUtils.getContainersCores( + ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf), + conf); + int cpuRate = Math.min(10000, (int) ((1 * 10000) / yarnProcessors)); + // Assert the cpu and memory limits are set correctly in the command + String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c", + String.valueOf(cpuRate), "group1", "cmd /c " + "echo" }; + Assert.assertTrue(Arrays.equals(expected, command)); + } +}