diff --git BUILDING.txt BUILDING.txt index 3940a98..e95ceb7 100644 --- BUILDING.txt +++ BUILDING.txt @@ -210,19 +210,24 @@ Requirements: * Maven 3.0 or later * Findbugs 1.3.9 (if running findbugs) * ProtocolBuffer 2.5.0 +* Windows SDK 7.1 or Visual Studio 2010 Professional * CMake 2.6 or newer -* Windows SDK or Visual Studio 2010 Professional * Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip * zlib headers (if building native code bindings for zlib) * Internet connection for first build (to fetch all Maven and Hadoop dependencies) +* Windows SDK 8.1 (if building CPU rate control for Windows container executor) If using Visual Studio, it must be Visual Studio 2010 Professional (not 2012). Do not use Visual Studio Express. It does not support compiling for 64-bit, -which is problematic if running a 64-bit system. The Windows SDK is free to +which is problematic if running a 64-bit system. The Windows SDK 7.1 is free to download here: http://www.microsoft.com/en-us/download/details.aspx?id=8279 +The Windows SDK 8.1 is available to download at: + +http://msdn.microsoft.com/en-us/windows/bg162891.aspx + ---------------------------------------------------------------------------------- Building: diff --git hadoop-common-project/hadoop-common/src/main/winutils/task.c hadoop-common-project/hadoop-common/src/main/winutils/task.c index 19bda96..a2a76f1 100644 --- hadoop-common-project/hadoop-common/src/main/winutils/task.c +++ hadoop-common-project/hadoop-common/src/main/winutils/task.c @@ -40,18 +40,47 @@ typedef enum TaskCommandOptionType } TaskCommandOption; //---------------------------------------------------------------------------- +// Function: GetLimit +// +// Description: +// Get the resource limit value in long type given the command line argument. +// +// Returns: +// TRUE: If successfully get the value +// FALSE: otherwise +static BOOL GetLimit(__in const wchar_t *str, __out long *value) +{ + wchar_t *end = NULL; + if (str == NULL || value == NULL) return FALSE; + *value = wcstol(str, &end, 10); + if (end == NULL || *end != '\0') + { + *value = -1; + return FALSE; + } + else + { + return TRUE; + } +} + +//---------------------------------------------------------------------------- // Function: ParseCommandLine // // Description: // Parses the given command line. On success, out param 'command' contains -// the user specified command. +// the user specified command with optional memory and cpu limit. If no +// limits will be set to negative values if no options is given from command +// line. // // Returns: // TRUE: If the command line is valid // FALSE: otherwise static BOOL ParseCommandLine(__in int argc, __in_ecount(argc) wchar_t *argv[], - __out TaskCommandOption *command) + __out TaskCommandOption *command, + __out_opt long *memory, + __out_opt long *vcore) { *command = TaskInvalid; @@ -78,9 +107,44 @@ static BOOL ParseCommandLine(__in int argc, } } - if (argc == 4) { + if (argc >= 4 && argc <= 8) { if (wcscmp(argv[1], L"create") == 0) { + int i; + for (i = 2; i < argc - 3; i++) + { + if (wcscmp(argv[i], L"-c") == 0) + { + if (vcore != NULL && !GetLimit(argv[i + 1], vcore)) + { + return FALSE; + } + else + { + i++; + continue; + } + } + else if (wcscmp(argv[i], L"-m") == 0) + { + if (memory != NULL && !GetLimit(argv[i + 1], memory)) + { + return FALSE; + } + else + { + i++; + continue; + } + } + else + { + break; + } + } + if (argc - i != 2) + return FALSE; + *command = TaskCreate; return TRUE; } @@ -90,16 +154,17 @@ static BOOL ParseCommandLine(__in int argc, } //---------------------------------------------------------------------------- -// Function: createTask +// Function: createTaskWithLimit // // Description: -// Creates a task via a jobobject. Outputs the -// appropriate information to stdout on success, or stderr on failure. +// Creates a task via a jobobject, optionally set CPU and memory limits. +// Outputs the appropriate information to stdout on success, or stderr on failure. // // Returns: // ERROR_SUCCESS: On success // GetLastError: otherwise -DWORD createTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) +DWORD createTaskWithLimit(__in PCWSTR jobObjName, __in PWSTR cmdLine, + __in long memory, __in long cpuRate) { DWORD err = ERROR_SUCCESS; DWORD exitCode = EXIT_FAILURE; @@ -119,6 +184,12 @@ DWORD createTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) return err; } jeli.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; + if (memory > 0) + { + jeli.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_JOB_MEMORY; + jeli.ProcessMemoryLimit = memory * 1024 * 1024; + jeli.JobMemoryLimit = memory * 1024 * 1024; + } if(SetInformationJobObject(jobObject, JobObjectExtendedLimitInformation, &jeli, @@ -127,7 +198,25 @@ DWORD createTask(__in PCWSTR jobObjName,__in PWSTR cmdLine) err = GetLastError(); CloseHandle(jobObject); return err; - } + } +#ifdef NTDDI_WIN8 + if (cpuRate > 0) + { + JOBOBJECT_CPU_RATE_CONTROL_INFORMATION jcrci = { 0 }; + SYSTEM_INFO sysinfo; + GetSystemInfo(&sysinfo); + jcrci.ControlFlags = JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | + JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP; + jcrci.CpuRate = min(10000, cpuRate); + if(SetInformationJobObject(jobObject, JobObjectCpuRateControlInformation, + &jcrci, sizeof(jcrci)) == 0) + { + err = GetLastError(); + CloseHandle(jobObject); + return err; + } + } +#endif if(AssignProcessToJobObject(jobObject, GetCurrentProcess()) == 0) { @@ -372,10 +461,11 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) { DWORD dwErrorCode = ERROR_SUCCESS; TaskCommandOption command = TaskInvalid; + long memory = -1; + long cpuRate = -1; - if (!ParseCommandLine(argc, argv, &command)) { + if (!ParseCommandLine(argc, argv, &command, &memory, &cpuRate)) { dwErrorCode = ERROR_INVALID_COMMAND_LINE; - fwprintf(stderr, L"Incorrect command line arguments.\n\n"); TaskUsage(); goto TaskExit; @@ -385,13 +475,14 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) { // Create the task jobobject // - dwErrorCode = createTask(argv[2], argv[3]); + dwErrorCode = createTaskWithLimit(argv[argc-2], argv[argc-1], memory, cpuRate); if (dwErrorCode != ERROR_SUCCESS) { ReportErrorCode(L"createTask", dwErrorCode); goto TaskExit; } - } else if (command == TaskIsAlive) + } + else if (command == TaskIsAlive) { // Check if task jobobject // @@ -415,7 +506,8 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) ReportErrorCode(L"isTaskAlive returned false", dwErrorCode); goto TaskExit; } - } else if (command == TaskKill) + } + else if (command == TaskKill) { // Check if task jobobject // @@ -425,7 +517,8 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) ReportErrorCode(L"killTask", dwErrorCode); goto TaskExit; } - } else if (command == TaskProcessList) + } + else if (command == TaskProcessList) { // Check if task jobobject // @@ -435,7 +528,8 @@ int Task(__in int argc, __in_ecount(argc) wchar_t *argv[]) ReportErrorCode(L"printTaskProcessList", dwErrorCode); goto TaskExit; } - } else + } + else { // Should not happen // @@ -452,16 +546,27 @@ void TaskUsage() // jobobject's are being used. // ProcessTree.isSetsidSupported() fwprintf(stdout, L"\ - Usage: task create [TASKNAME] [COMMAND_LINE] |\n\ - task isAlive [TASKNAME] |\n\ - task kill [TASKNAME]\n\ - task processList [TASKNAME]\n\ - Creates a new task jobobject with taskname\n\ - Checks if task jobobject is alive\n\ - Kills task jobobject\n\ - Prints to stdout a list of processes in the task\n\ - along with their resource usage. One process per line\n\ - and comma separated info per process\n\ - ProcessId,VirtualMemoryCommitted(bytes),\n\ - WorkingSetSize(bytes),CpuTime(Millisec,Kernel+User)\n"); +Usage: task create [OPTOINS] [TASKNAME] [COMMAND_LINE]\n\ + Creates a new task job object with taskname and options to set CPU\n\ + and memory limits on the job object\n\ +\n\ + OPTIONS: -c [cup rate] set the cpu rate limit on the job object.\n\ + -m [memory] set the memory limit on the job object.\n\ + The cpu limit is an integral value of percentage * 100. The memory\n\ + limit is an integral number of memory in MB. \n\ + The limit will not be set if 0 or negative value is passed in as\n\ + parameter(s).\n\ +\n\ + task isAlive [TASKNAME] |\n\ + Checks if task job object is alive\n\ +\n\ + task kill [TASKNAME]\n\ + Kills task job object\n\ +\n\ + task processList [TASKNAME]\n\ + Prints to stdout a list of processes in the task\n\ + along with their resource usage. One process per line\n\ + and comma separated info per process\n\ + ProcessId,VirtualMemoryCommitted(bytes),\n\ + WorkingSetSize(bytes),CpuTime(Millisec,Kernel+User)\n"); } diff --git hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props new file mode 100644 index 0000000..503b37a --- /dev/null +++ hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props @@ -0,0 +1,28 @@ + + + + + + + $(VCInstallDir)bin\x86_amd64;$(VCInstallDir)bin;$(WindowsSdkDir)bin\NETFX 4.0 Tools;$(MSBuildProgramFiles32)\Windows Kits\8.1\bin\x86;$(VSInstallDir)Common7\Tools\bin;$(VSInstallDir)Common7\tools;$(VSInstallDir)Common7\ide;$(MSBuildProgramFiles32)\HTML Help Workshop;$(FrameworkSDKDir)\bin;$(MSBuildToolsPath32);$(VSInstallDir);$(SystemRoot)\SysWow64;$(FxCopDir);$(PATH) + $(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(FrameworkSDKDir)\include; + $(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(MSBuildProgramFiles32)\Windows Kits\8.1\lib\win8\um\x64;$(MSBuildProgramFiles32)\Windows Kits\8.1\Lib\winv6.3\um\x64;$(FrameworkSDKDir)\lib\x64 + $(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(FrameworkSDKDir)\include;$(MSBuildToolsPath32);$(VCInstallDir)atlmfc\lib;$(VCInstallDir)lib; + + + diff --git hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj index 5b9a195..ed9f062 100644 --- hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj +++ hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj @@ -67,6 +67,9 @@ + + + @@ -179,4 +182,4 @@ - \ No newline at end of file + diff --git hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java index 588b217..823eadb 100644 --- hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java +++ hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java @@ -25,6 +25,7 @@ import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; +import java.util.Random; import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; @@ -33,6 +34,7 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; + import static org.junit.Assume.*; import static org.hamcrest.CoreMatchers.*; @@ -521,4 +523,67 @@ public void testReadLink() throws IOException { assertThat(ece.getExitCode(), is(1)); } } + + @Test (timeout = 30000) + public void testTaskCreate() throws IOException { + // Generate a unique job id + Random rand = new Random(); + long id = rand.nextLong(); + + // Run a task without any options + String out = Shell.execCommand(Shell.WINUTILS, "task", "create", + "job" + id, "cmd /c echo job" + id); + assertTrue(out.trim().equals("job" + id)); + + // Run a task without any limits + id = rand.nextLong(); + out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m", + "-1", "job" + id, "cmd /c echo job" + id); + assertTrue(out.trim().equals("job" + id)); + + // Run a task with limits (128MB should be enough for a cmd) + id = rand.nextLong(); + out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "10000", "-m", + "128", "job" + id, "cmd /c echo job" + id); + assertTrue(out.trim().equals("job" + id)); + + // Run a task without enough memory + try { + id = rand.nextLong(); + out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-m", "128", "job" + + id, "java -Xmx256m -version"); + fail("Failed to get Shell.ExitCodeException with insufficient memory"); + } catch (Shell.ExitCodeException ece) { + assertThat(ece.getExitCode(), is(1)); + } + + // Run tasks with wrong parameters + // + try { + id = rand.nextLong(); + Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m", + "-1", "foo", "job" + id, "cmd /c echo job" + id); + fail("Failed to get Shell.ExitCodeException with bad parameters"); + } catch (Shell.ExitCodeException ece) { + assertThat(ece.getExitCode(), is(1639)); + } + + try { + id = rand.nextLong(); + Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-m", "-1", + "job" + id, "cmd /c echo job" + id); + fail("Failed to get Shell.ExitCodeException with bad parameters"); + } catch (Shell.ExitCodeException ece) { + assertThat(ece.getExitCode(), is(1639)); + } + + try { + id = rand.nextLong(); + Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "foo", + "job" + id, "cmd /c echo job" + id); + fail("Failed to get Shell.ExitCodeException with bad parameters"); + } catch (Shell.ExitCodeException ece) { + assertThat(ece.getExitCode(), is(1639)); + } + } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index e642d05..eeed417 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -877,8 +877,8 @@ Percentage of CPU that can be allocated for containers. This setting allows users to limit the amount of - CPU that YARN containers use. Currently functional only - on Linux using cgroups. The default is to use 100% of CPU. + CPU that YARN containers use. Currently functional on Linux using cgroups + or Windows using job objects. The default is to use 100% of CPU. yarn.nodemanager.resource.percentage-physical-cpu-limit 100 diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 7391872..7e795a6 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent; @@ -257,6 +258,11 @@ protected Path getPidFilePath(ContainerId containerId) { readLock.unlock(); } } + + protected String[] getRunCommand(String command, String groupId, + Configuration conf) { + return getRunCommand(command, groupId, conf, null); + } /** * Return a command to execute the given command in OS shell. @@ -264,8 +270,8 @@ protected Path getPidFilePath(ContainerId containerId) { * and associate the given groupId in a process group. On * non-Windows, groupId is ignored. */ - protected static String[] getRunCommand(String command, String groupId, - Configuration conf) { + protected String[] getRunCommand(String command, String groupId, + Configuration conf, Resource resource) { boolean containerSchedPriorityIsSet = false; int containerSchedPriorityAdjustment = YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY; diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java index a7af1c5..969261e 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java @@ -185,7 +185,7 @@ public int launchContainer(Container container, // Setup command to run String[] command = getRunCommand(sb.getWrapperScriptPath().toString(), - containerIdStr, this.getConf()); + containerIdStr, this.getConf(), container.getResource()); LOG.info("launchContainer: " + Arrays.toString(command)); shExec = new ShellCommandExecutor( diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsContainerExecutor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsContainerExecutor.java new file mode 100644 index 0000000..dddbe26 --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsContainerExecutor.java @@ -0,0 +1,54 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.hadoop.yarn.server.nodemanager; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.nodemanager.util.NodeManagerHardwareUtils; +import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; + +public class WindowsContainerExecutor extends DefaultContainerExecutor { + + @Override + protected String[] getRunCommand(String command, String groupId, + Configuration conf, Resource resource) { + if (!Shell.WINDOWS) + return null; + + int cpuRate = -1; + int memory = -1; + if (resource != null) { + memory = resource.getMemory(); + + int vcores = resource.getVirtualCores(); + // cap overall usage to the number of cores allocated to YARN + float yarnProcessors = NodeManagerHardwareUtils.getContainersCores( + ResourceCalculatorPlugin.getResourceCalculatorPlugin(null, conf), + conf); + // CPU should be set to a percentage * 100, e.g. 20% cpu rate limit + // should be set as 20 * 100. The following setting is equal to: + // 100 * (100 * (vcores / Total # of cores allocated to YARN)) + cpuRate = Math.min(10000, (int) ((vcores * 10000) / yarnProcessors)); + } + + return new String[] { Shell.WINUTILS, "task", "create", "-m", + String.valueOf(memory), "-c", String.valueOf(cpuRate), groupId, + "cmd /c " + command }; + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java index c04ec29..146eb91 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java @@ -31,7 +31,8 @@ @Test (timeout = 5000) public void testRunCommandNoPriority() throws Exception { Configuration conf = new Configuration(); - String[] command = ContainerExecutor.getRunCommand("echo", "group1", conf); + ContainerExecutor exe = new DefaultContainerExecutor(); + String[] command = exe.getRunCommand("echo", "group1", conf); assertTrue("first command should be the run command for the platform", command[0].equals(Shell.WINUTILS) || command[0].equals("bash")); } @@ -40,7 +41,8 @@ public void testRunCommandNoPriority() throws Exception { public void testRunCommandwithPriority() throws Exception { Configuration conf = new Configuration(); conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, 2); - String[] command = ContainerExecutor.getRunCommand("echo", "group1", conf); + ContainerExecutor exe = new DefaultContainerExecutor(); + String[] command = exe.getRunCommand("echo", "group1", conf); if (Shell.WINDOWS) { // windows doesn't currently support assertEquals("first command should be the run command for the platform", @@ -54,7 +56,7 @@ public void testRunCommandwithPriority() throws Exception { // test with negative number conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, -5); - command = ContainerExecutor.getRunCommand("echo", "group1", conf); + command = exe.getRunCommand("echo", "group1", conf); if (Shell.WINDOWS) { // windows doesn't currently support assertEquals("first command should be the run command for the platform", diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestWindowsContainerExecutor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestWindowsContainerExecutor.java new file mode 100644 index 0000000..d3a6c95 --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestWindowsContainerExecutor.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager; + +import static org.junit.Assume.assumeTrue; + +import java.util.Arrays; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.api.records.Resource; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class TestWindowsContainerExecutor { + @Before + public void setUp() { + // Not supported on non-Windows platforms + assumeTrue(Shell.WINDOWS); + } + + @Test + public void testRunCommandWithResources() { + Configuration conf = new Configuration(); + ContainerExecutor wce = new WindowsContainerExecutor(); + String[] command = wce.getRunCommand("echo", "group1", conf, + Resource.newInstance(1024, 1)); + // Assert the cpu and memory limits are set correctly in the command + String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c", + "1", "group1", "cmd /c " + "echo" }; + Assert.assertTrue(Arrays.equals(expected, command)); + } +}