Index: BUILDING.txt
===================================================================
--- BUILDING.txt (revision 1618292)
+++ BUILDING.txt (working copy)
@@ -189,19 +189,24 @@
* Maven 3.0 or later
* Findbugs 1.3.9 (if running findbugs)
* ProtocolBuffer 2.5.0
+* Windows SDK 7.1 or Visual Studio 2010 Professional
* CMake 2.6 or newer
-* Windows SDK or Visual Studio 2010 Professional
* Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
* zlib headers (if building native code bindings for zlib)
* Internet connection for first build (to fetch all Maven and Hadoop dependencies)
+* Windows SDK 8.1 (if building CPU rate control for Windows container executor)
If using Visual Studio, it must be Visual Studio 2010 Professional (not 2012).
Do not use Visual Studio Express. It does not support compiling for 64-bit,
-which is problematic if running a 64-bit system. The Windows SDK is free to
+which is problematic if running a 64-bit system. The Windows SDK 7.1 is free to
download here:
http://www.microsoft.com/en-us/download/details.aspx?id=8279
+The Windows SDK 8.1 is available to download at:
+
+http://msdn.microsoft.com/en-us/windows/bg162891.aspx
+
----------------------------------------------------------------------------------
Building:
Index: hadoop-common-project/hadoop-common/src/main/winutils/task.c
===================================================================
--- hadoop-common-project/hadoop-common/src/main/winutils/task.c (revision 1618292)
+++ hadoop-common-project/hadoop-common/src/main/winutils/task.c (working copy)
@@ -40,11 +40,38 @@
} TaskCommandOption;
//----------------------------------------------------------------------------
+// Function: GetLimit
+//
+// Description:
+// Get the resource limit value in long type given the command line argument.
+//
+// Returns:
+// TRUE: If successfully get the value
+// FALSE: otherwise
+static BOOL GetLimit(__in const wchar_t *str, __out long *value)
+{
+ wchar_t *end = NULL;
+ if (str == NULL || value == NULL) return FALSE;
+ *value = wcstol(str, &end, 10);
+ if (end == NULL || *end != '\0')
+ {
+ *value = -1;
+ return FALSE;
+ }
+ else
+ {
+ return TRUE;
+ }
+}
+
+//----------------------------------------------------------------------------
// Function: ParseCommandLine
//
// Description:
// Parses the given command line. On success, out param 'command' contains
-// the user specified command.
+// the user specified command with optional memory and cpu limit. If no
+// limits will be set to negative values if no options is given from command
+// line.
//
// Returns:
// TRUE: If the command line is valid
@@ -51,7 +78,9 @@
// FALSE: otherwise
static BOOL ParseCommandLine(__in int argc,
__in_ecount(argc) wchar_t *argv[],
- __out TaskCommandOption *command)
+ __out TaskCommandOption *command,
+ __out_opt long *memory,
+ __out_opt long *vcore)
{
*command = TaskInvalid;
@@ -78,9 +107,44 @@
}
}
- if (argc == 4) {
+ if (argc >= 4 && argc <= 8) {
if (wcscmp(argv[1], L"create") == 0)
{
+ int i;
+ for (i = 2; i < argc - 3; i++)
+ {
+ if (wcscmp(argv[i], L"-c") == 0)
+ {
+ if (vcore != NULL && !GetLimit(argv[i + 1], vcore))
+ {
+ return FALSE;
+ }
+ else
+ {
+ i++;
+ continue;
+ }
+ }
+ else if (wcscmp(argv[i], L"-m") == 0)
+ {
+ if (memory != NULL && !GetLimit(argv[i + 1], memory))
+ {
+ return FALSE;
+ }
+ else
+ {
+ i++;
+ continue;
+ }
+ }
+ else
+ {
+ break;
+ }
+ }
+ if (argc - i != 2)
+ return FALSE;
+
*command = TaskCreate;
return TRUE;
}
@@ -90,16 +154,17 @@
}
//----------------------------------------------------------------------------
-// Function: createTask
+// Function: createTaskWithLimit
//
// Description:
-// Creates a task via a jobobject. Outputs the
-// appropriate information to stdout on success, or stderr on failure.
+// Creates a task via a jobobject, optionally set CPU and memory limits.
+// Outputs the appropriate information to stdout on success, or stderr on failure.
//
// Returns:
// ERROR_SUCCESS: On success
// GetLastError: otherwise
-DWORD createTask(__in PCWSTR jobObjName,__in PWSTR cmdLine)
+DWORD createTaskWithLimit(__in PCWSTR jobObjName, __in PWSTR cmdLine,
+ __in long memory, __in long vcores)
{
DWORD err = ERROR_SUCCESS;
DWORD exitCode = EXIT_FAILURE;
@@ -119,6 +184,12 @@
return err;
}
jeli.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE;
+ if (memory > 0)
+ {
+ jeli.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_JOB_MEMORY;
+ jeli.ProcessMemoryLimit = memory * 1024 * 1024;
+ jeli.JobMemoryLimit = memory * 1024 * 1024;
+ }
if(SetInformationJobObject(jobObject,
JobObjectExtendedLimitInformation,
&jeli,
@@ -127,7 +198,28 @@
err = GetLastError();
CloseHandle(jobObject);
return err;
- }
+ }
+#ifdef NTDDI_WIN8
+ if (vcores > 0)
+ {
+ JOBOBJECT_CPU_RATE_CONTROL_INFORMATION jcrci = { 0 };
+ SYSTEM_INFO sysinfo;
+ GetSystemInfo(&sysinfo);
+ jcrci.ControlFlags = JOB_OBJECT_CPU_RATE_CONTROL_ENABLE |
+ JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP;
+ // CPU should be set to a percentage * 100, e.g. 20% cpu rate limit
+ // should be set as 20 * 100.
+ // The following setting = 100 * (100 * (vcores / total # of cores))
+ jcrci.CpuRate = min(10000, (vcores * 10000) / sysinfo.dwNumberOfProcessors);
+ if(SetInformationJobObject(jobObject, JobObjectCpuRateControlInformation,
+ &jcrci, sizeof(jcrci)) == 0)
+ {
+ err = GetLastError();
+ CloseHandle(jobObject);
+ return err;
+ }
+ }
+#endif
if(AssignProcessToJobObject(jobObject, GetCurrentProcess()) == 0)
{
@@ -372,10 +464,11 @@
{
DWORD dwErrorCode = ERROR_SUCCESS;
TaskCommandOption command = TaskInvalid;
+ long memory = -1;
+ long vcores = -1;
- if (!ParseCommandLine(argc, argv, &command)) {
+ if (!ParseCommandLine(argc, argv, &command, &memory, &vcores)) {
dwErrorCode = ERROR_INVALID_COMMAND_LINE;
-
fwprintf(stderr, L"Incorrect command line arguments.\n\n");
TaskUsage();
goto TaskExit;
@@ -385,13 +478,14 @@
{
// Create the task jobobject
//
- dwErrorCode = createTask(argv[2], argv[3]);
+ dwErrorCode = createTaskWithLimit(argv[argc-2], argv[argc-1], memory, vcores);
if (dwErrorCode != ERROR_SUCCESS)
{
ReportErrorCode(L"createTask", dwErrorCode);
goto TaskExit;
}
- } else if (command == TaskIsAlive)
+ }
+ else if (command == TaskIsAlive)
{
// Check if task jobobject
//
@@ -415,7 +509,8 @@
ReportErrorCode(L"isTaskAlive returned false", dwErrorCode);
goto TaskExit;
}
- } else if (command == TaskKill)
+ }
+ else if (command == TaskKill)
{
// Check if task jobobject
//
@@ -425,7 +520,8 @@
ReportErrorCode(L"killTask", dwErrorCode);
goto TaskExit;
}
- } else if (command == TaskProcessList)
+ }
+ else if (command == TaskProcessList)
{
// Check if task jobobject
//
@@ -435,7 +531,8 @@
ReportErrorCode(L"printTaskProcessList", dwErrorCode);
goto TaskExit;
}
- } else
+ }
+ else
{
// Should not happen
//
@@ -452,16 +549,28 @@
// jobobject's are being used.
// ProcessTree.isSetsidSupported()
fwprintf(stdout, L"\
- Usage: task create [TASKNAME] [COMMAND_LINE] |\n\
- task isAlive [TASKNAME] |\n\
- task kill [TASKNAME]\n\
- task processList [TASKNAME]\n\
- Creates a new task jobobject with taskname\n\
- Checks if task jobobject is alive\n\
- Kills task jobobject\n\
- Prints to stdout a list of processes in the task\n\
- along with their resource usage. One process per line\n\
- and comma separated info per process\n\
- ProcessId,VirtualMemoryCommitted(bytes),\n\
- WorkingSetSize(bytes),CpuTime(Millisec,Kernel+User)\n");
+Usage: task create [OPTOINS] [TASKNAME] [COMMAND_LINE]\n\
+ Creates a new task job object with taskname and options to set CPU\n\
+ and memory limits on the job object\n\
+\n\
+ OPTIONS: -c [cores] set virtual core limits on the job object.\n\
+ -m [memory] set the memory limit on the job object.\n\
+ The core limit is an integral value of number of cores. The memory\n\
+ limit is an integral number of memory in MB. The definition\n\
+ follows the org.apache.hadoop.yarn.api.records.Resource model.\n\
+ The limit will not be set if 0 or negative value is passed in as\n\
+ parameter(s).\n\
+\n\
+ task isAlive [TASKNAME] |\n\
+ Checks if task job object is alive\n\
+\n\
+ task kill [TASKNAME]\n\
+ Kills task job object\n\
+\n\
+ task processList [TASKNAME]\n\
+ Prints to stdout a list of processes in the task\n\
+ along with their resource usage. One process per line\n\
+ and comma separated info per process\n\
+ ProcessId,VirtualMemoryCommitted(bytes),\n\
+ WorkingSetSize(bytes),CpuTime(Millisec,Kernel+User)\n");
}
Index: hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props
===================================================================
--- hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props (revision 0)
+++ hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props (working copy)
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+ $(VCInstallDir)bin\x86_amd64;$(VCInstallDir)bin;$(WindowsSdkDir)bin\NETFX 4.0 Tools;$(MSBuildProgramFiles32)\Windows Kits\8.1\bin\x86;$(VSInstallDir)Common7\Tools\bin;$(VSInstallDir)Common7\tools;$(VSInstallDir)Common7\ide;$(MSBuildProgramFiles32)\HTML Help Workshop;$(FrameworkSDKDir)\bin;$(MSBuildToolsPath32);$(VSInstallDir);$(SystemRoot)\SysWow64;$(FxCopDir);$(PATH)
+ $(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(FrameworkSDKDir)\include;
+ $(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(MSBuildProgramFiles32)\Windows Kits\8.1\lib\win8\um\x64;$(MSBuildProgramFiles32)\Windows Kits\8.1\Lib\winv6.3\um\x64;$(FrameworkSDKDir)\lib\x64
+ $(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(FrameworkSDKDir)\include;$(MSBuildToolsPath32);$(VCInstallDir)atlmfc\lib;$(VCInstallDir)lib;
+
+
+
Index: hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj
===================================================================
--- hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj (revision 1618292)
+++ hadoop-common-project/hadoop-common/src/main/winutils/winutils.vcxproj (working copy)
@@ -67,6 +67,9 @@
+
+
+
@@ -179,4 +182,4 @@
-
\ No newline at end of file
+
Index: hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java
===================================================================
--- hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java (revision 1618292)
+++ hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestWinUtils.java (working copy)
@@ -25,6 +25,7 @@
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
+import java.util.Random;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
@@ -33,6 +34,7 @@
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+
import static org.junit.Assume.*;
import static org.hamcrest.CoreMatchers.*;
@@ -521,4 +523,67 @@
assertThat(ece.getExitCode(), is(1));
}
}
+
+ @Test (timeout = 30000)
+ public void testTaskCreate() throws IOException {
+ // Generate a unique job id
+ Random rand = new Random();
+ long id = rand.nextLong();
+
+ // Run a task without any options
+ String out = Shell.execCommand(Shell.WINUTILS, "task", "create",
+ "job" + id, "cmd /c echo job" + id);
+ assertTrue(out.trim().equals("job" + id));
+
+ // Run a task without any limits
+ id = rand.nextLong();
+ out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m",
+ "-1", "job" + id, "cmd /c echo job" + id);
+ assertTrue(out.trim().equals("job" + id));
+
+ // Run a task with limits (128MB should be enough for a cmd)
+ id = rand.nextLong();
+ out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "1", "-m",
+ "128", "job" + id, "cmd /c echo job" + id);
+ assertTrue(out.trim().equals("job" + id));
+
+ // Run a task without enough memory
+ try {
+ id = rand.nextLong();
+ out = Shell.execCommand(Shell.WINUTILS, "task", "create", "-m", "128", "job"
+ + id, "java -Xmx256m -version");
+ fail("Failed to get Shell.ExitCodeException with insufficient memory");
+ } catch (Shell.ExitCodeException ece) {
+ assertThat(ece.getExitCode(), is(1));
+ }
+
+ // Run tasks with wrong parameters
+ //
+ try {
+ id = rand.nextLong();
+ Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-1", "-m",
+ "-1", "foo", "job" + id, "cmd /c echo job" + id);
+ fail("Failed to get Shell.ExitCodeException with bad parameters");
+ } catch (Shell.ExitCodeException ece) {
+ assertThat(ece.getExitCode(), is(1639));
+ }
+
+ try {
+ id = rand.nextLong();
+ Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "-m", "-1",
+ "job" + id, "cmd /c echo job" + id);
+ fail("Failed to get Shell.ExitCodeException with bad parameters");
+ } catch (Shell.ExitCodeException ece) {
+ assertThat(ece.getExitCode(), is(1639));
+ }
+
+ try {
+ id = rand.nextLong();
+ Shell.execCommand(Shell.WINUTILS, "task", "create", "-c", "foo",
+ "job" + id, "cmd /c echo job" + id);
+ fail("Failed to get Shell.ExitCodeException with bad parameters");
+ } catch (Shell.ExitCodeException ece) {
+ assertThat(ece.getExitCode(), is(1639));
+ }
+ }
}
Index: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
===================================================================
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java (revision 1618292)
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java (working copy)
@@ -38,6 +38,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
@@ -257,6 +258,11 @@
readLock.unlock();
}
}
+
+ protected String[] getRunCommand(String command, String groupId,
+ Configuration conf) {
+ return getRunCommand(command, groupId, conf, null);
+ }
/**
* Return a command to execute the given command in OS shell.
@@ -264,8 +270,8 @@
* and associate the given groupId in a process group. On
* non-Windows, groupId is ignored.
*/
- protected static String[] getRunCommand(String command, String groupId,
- Configuration conf) {
+ protected String[] getRunCommand(String command, String groupId,
+ Configuration conf, Resource resource) {
boolean containerSchedPriorityIsSet = false;
int containerSchedPriorityAdjustment =
YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY;
Index: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
===================================================================
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java (revision 1618292)
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java (working copy)
@@ -185,7 +185,7 @@
// Setup command to run
String[] command = getRunCommand(sb.getWrapperScriptPath().toString(),
- containerIdStr, this.getConf());
+ containerIdStr, this.getConf(), container.getResource());
LOG.info("launchContainer: " + Arrays.toString(command));
shExec = new ShellCommandExecutor(
Index: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsContainerExecutor.java
===================================================================
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsContainerExecutor.java (revision 0)
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsContainerExecutor.java (working copy)
@@ -0,0 +1,43 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.api.records.Resource;
+
+public class WindowsContainerExecutor extends DefaultContainerExecutor {
+
+ @Override
+ protected String[] getRunCommand(String command, String groupId,
+ Configuration conf, Resource resource) {
+ if (!Shell.WINDOWS)
+ return null;
+
+ int vcores = -1;
+ int memory = -1;
+ if (resource != null) {
+ vcores = resource.getVirtualCores();
+ memory = resource.getMemory();
+ }
+
+ return new String[] { Shell.WINUTILS, "task", "create", "-m",
+ String.valueOf(memory), "-c", String.valueOf(vcores), groupId,
+ "cmd /c " + command };
+ }
+}
Index: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java
===================================================================
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java (revision 1618292)
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerExecutor.java (working copy)
@@ -31,7 +31,8 @@
@Test (timeout = 5000)
public void testRunCommandNoPriority() throws Exception {
Configuration conf = new Configuration();
- String[] command = ContainerExecutor.getRunCommand("echo", "group1", conf);
+ ContainerExecutor exe = new DefaultContainerExecutor();
+ String[] command = exe.getRunCommand("echo", "group1", conf);
assertTrue("first command should be the run command for the platform",
command[0].equals(Shell.WINUTILS) || command[0].equals("bash"));
}
@@ -40,7 +41,8 @@
public void testRunCommandwithPriority() throws Exception {
Configuration conf = new Configuration();
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, 2);
- String[] command = ContainerExecutor.getRunCommand("echo", "group1", conf);
+ ContainerExecutor exe = new DefaultContainerExecutor();
+ String[] command = exe.getRunCommand("echo", "group1", conf);
if (Shell.WINDOWS) {
// windows doesn't currently support
assertEquals("first command should be the run command for the platform",
@@ -54,7 +56,7 @@
// test with negative number
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, -5);
- command = ContainerExecutor.getRunCommand("echo", "group1", conf);
+ command = exe.getRunCommand("echo", "group1", conf);
if (Shell.WINDOWS) {
// windows doesn't currently support
assertEquals("first command should be the run command for the platform",
Index: hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestWindowsContainerExecutor.java
===================================================================
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestWindowsContainerExecutor.java (revision 0)
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestWindowsContainerExecutor.java (working copy)
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager;
+
+import static org.junit.Assume.assumeTrue;
+
+import java.util.Arrays;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestWindowsContainerExecutor {
+ @Before
+ public void setUp() {
+ // Not supported on non-Windows platforms
+ assumeTrue(Shell.WINDOWS);
+ }
+
+ @Test
+ public void testRunCommandWithResources() {
+ Configuration conf = new Configuration();
+ ContainerExecutor wce = new WindowsContainerExecutor();
+ String[] command = wce.getRunCommand("echo", "group1", conf,
+ Resource.newInstance(1024, 1));
+ // Assert the cpu and memory limits are set correctly in the command
+ String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c",
+ "1", "group1", "cmd /c " + "echo" };
+ Assert.assertTrue(Arrays.equals(expected, command));
+ }
+}