diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java index 4305824..2678a78 100755 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java @@ -890,6 +890,12 @@ private static ContainerLaunchContext createCommonContainerLaunchContext( MRJobConfig.DEFAULT_SHELL) ); + // Add application queue + environment.put( + Environment.YARN_RESOURCEMANAGER_APPLICATION_QUEUE.name(), + conf.get(MRJobConfig.QUEUE_NAME) + ); + // Add pwd to LD_LIBRARY_PATH, add this before adding anything else MRApps.addToEnvironment( environment, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 46e3323..212146c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1461,9 +1461,28 @@ public static boolean isAclEnabled(Configuration conf) { public static final String DEFAULT_NM_DOCKER_DEFAULT_CONTAINER_NETWORK = "host"; + /** True if Java containers should run with the Java security manager.*/ + public static final String YARN_CONTAINER_SANDBOX = + NM_PREFIX + "linux-container-executor.sandbox-mode"; + public static final boolean DEFAULT_YARN_CONTAINER_SANDBOX = false; + + /** Permissions for application local directories.*/ + public static final String YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS = + YARN_CONTAINER_SANDBOX + ".local-dirs.permissions"; + public static final String DEFAULT_YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS = + "read"; + + /** Location for non-default java policy file.*/ + public static final String YARN_CONTAINER_SANDBOX_POLICY = + YARN_CONTAINER_SANDBOX + ".policy"; + + /** The application queue which will run without the java security manager.*/ + public static final String YARN_CONTAINER_SANDBOX_WHITELIST_QUEUE = + YARN_CONTAINER_SANDBOX + ".queue"; + /** The path to the Linux container executor.*/ public static final String NM_LINUX_CONTAINER_EXECUTOR_PATH = - NM_PREFIX + "linux-container-executor.path"; + NM_PREFIX + "linux-container-executor.path"; /** * The UNIX group that the linux-container-executor should run as. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java index 6890b25..1ee20ac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java @@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.server.nodemanager.util.LCEResourcesHandler; import java.io.File; import java.io.IOException; +import java.io.OutputStream; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Arrays; @@ -362,6 +363,18 @@ public void buildMainArgs(List command, String user, String appId, } @Override + public void writeLaunchEnv(OutputStream out, Map environment, + Map> resources, List command, Path logDir, + String user) throws IOException { + try { + linuxContainerRuntime.prepareContainer(environment, resources, command); + } catch (ContainerExecutionException e) { + throw new IOException("Unable to prepare container: ", e); + } + super.writeLaunchEnv(out, environment, resources, command, logDir, user); + } + + @Override public int launchContainer(ContainerStartContext ctx) throws IOException { Container container = ctx.getContainer(); Path nmPrivateContainerScriptPath = ctx.getNmPrivateContainerScriptPath(); @@ -380,6 +393,7 @@ public int launchContainer(ContainerStartContext ctx) throws IOException { verifyUsernamePattern(user); String runAsUser = getRunAsUser(user); + ContainerRuntimeContext runtimeContext = null; ContainerId containerId = container.getContainerId(); String containerIdStr = containerId.toString(); @@ -467,8 +481,8 @@ public int launchContainer(ContainerStartContext ctx) throws IOException { if (tcCommandFile != null) { builder.setExecutionAttribute(TC_COMMAND_FILE, tcCommandFile); } - - linuxContainerRuntime.launchContainer(builder.build()); + runtimeContext = builder.build(); + linuxContainerRuntime.launchContainer(runtimeContext); } else { LOG.info( "Container was marked as inactive. Returning terminated error"); @@ -510,16 +524,19 @@ public int launchContainer(ContainerStartContext ctx) throws IOException { resourcesHandler.postExecute(containerId); try { + linuxContainerRuntime.reapContainer(runtimeContext); if (resourceHandlerChain != null) { resourceHandlerChain.postComplete(containerId); } } catch (ResourceHandlerException e) { LOG.warn("ResourceHandlerChain.postComplete failed for " + "containerId: " + containerId + ". Exception: " + e); + } catch (ContainerExecutionException e) { + LOG.warn("LinuxContainerRuntime.reapContainer failed for " + + "containerId: " + containerId + ". Exception: " + e); } + return 0; } - - return 0; } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java index ee94aaa..f9c1ce9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; @@ -36,6 +37,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; import java.util.List; +import java.util.Map; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; @@ -72,7 +74,8 @@ public void initialize(Configuration conf) } @Override - public void prepareContainer(ContainerRuntimeContext ctx) + public void prepareContainer(Map environment, + Map> resources, List command) throws ContainerExecutionException { //nothing to do here at the moment. } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java index c12858e..1196bf8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java @@ -25,20 +25,22 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntime; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; +import java.util.List; import java.util.Map; /** * This class is a {@link ContainerRuntime} implementation that delegates all - * operations to either a {@link DefaultLinuxContainerRuntime} instance or a - * {@link DockerLinuxContainerRuntime} instance, depending on whether the - * {@link DockerLinuxContainerRuntime} instance believes the operation to be - * requesting a Docker container. + * operations to a {@link DefaultLinuxContainerRuntime} instance, a + * {@link DockerLinuxContainerRuntime} instance, or a + * {@link JavaSandboxLinuxContainerRuntime} instance depending on whether + * each instance believes the operation to be within its scope. * * @see DockerLinuxContainerRuntime#isDockerContainerRequested */ @@ -49,6 +51,7 @@ .getLog(DelegatingLinuxContainerRuntime.class); private DefaultLinuxContainerRuntime defaultLinuxContainerRuntime; private DockerLinuxContainerRuntime dockerLinuxContainerRuntime; + private JavaSandboxLinuxContainerRuntime javaSandboxLinuxContainerRuntime; @Override public void initialize(Configuration conf) @@ -61,15 +64,20 @@ public void initialize(Configuration conf) dockerLinuxContainerRuntime = new DockerLinuxContainerRuntime( privilegedOperationExecutor); dockerLinuxContainerRuntime.initialize(conf); + javaSandboxLinuxContainerRuntime = new JavaSandboxLinuxContainerRuntime( + privilegedOperationExecutor); + javaSandboxLinuxContainerRuntime.initialize(conf); } - private LinuxContainerRuntime pickContainerRuntime(Container container) { - Map env = container.getLaunchContext().getEnvironment(); + private LinuxContainerRuntime pickContainerRuntime( + Map environment){ LinuxContainerRuntime runtime; - if (DockerLinuxContainerRuntime.isDockerContainerRequested(env)){ + if (DockerLinuxContainerRuntime.isDockerContainerRequested(environment)){ runtime = dockerLinuxContainerRuntime; - } else { + } else if (javaSandboxLinuxContainerRuntime.isSandboxContainerRequested()) { + runtime = javaSandboxLinuxContainerRuntime; + } else { runtime = defaultLinuxContainerRuntime; } @@ -81,13 +89,16 @@ private LinuxContainerRuntime pickContainerRuntime(Container container) { return runtime; } + private LinuxContainerRuntime pickContainerRuntime(Container container) { + return pickContainerRuntime(container.getLaunchContext().getEnvironment()); + } + @Override - public void prepareContainer(ContainerRuntimeContext ctx) + public void prepareContainer(Map environment, Map> resources, List command) throws ContainerExecutionException { - Container container = ctx.getContainer(); - LinuxContainerRuntime runtime = pickContainerRuntime(container); - - runtime.prepareContainer(ctx); + LinuxContainerRuntime runtime = pickContainerRuntime(environment); + runtime.prepareContainer(environment, resources, command); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java index 33b8add..7af9bb6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java @@ -241,7 +241,8 @@ public void initialize(Configuration conf) } @Override - public void prepareContainer(ContainerRuntimeContext ctx) + public void prepareContainer(Map environment, Map> resources, List command) throws ContainerExecutionException { } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java new file mode 100644 index 0000000..2511aee --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java @@ -0,0 +1,352 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager. + containermanager.linux.runtime; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; +import org.apache.log4j.Logger; +import sun.security.provider.PolicyParser; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FilePermission; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.Reader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.attribute.PosixFilePermissions; +import java.security.AllPermission; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.apache.hadoop.util.Shell.SYSPROP_HADOOP_HOME_DIR; +import static org.apache.hadoop.yarn.api.ApplicationConstants.Environment.CONTAINER_ID; +import static org.apache.hadoop.yarn.api.ApplicationConstants.Environment.YARN_RESOURCEMANAGER_APPLICATION_QUEUE; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_YARN_CONTAINER_SANDBOX; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.YARN_CONTAINER_SANDBOX; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.YARN_CONTAINER_SANDBOX_POLICY; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.YARN_CONTAINER_SANDBOX_WHITELIST_QUEUE; + +/** + *

This class extends the {@link DefaultLinuxContainerRuntime} specifically + * for containers which run Java commands. It generates a new java security + * policy file per container and modifies the java command to enable the + * Java Security Manager with the generated policy.

+ * + * The behavior of the {@link JavaSandboxLinuxContainerRuntime} can be modified + * using the following settings: + * + *
    + *
  • + * {@value YarnConfiguration#YARN_CONTAINER_SANDBOX} : + * This yarn-site.xml setting determines whether or not the + * {@link JavaSandboxLinuxContainerRuntime} will be used. + *
  • + *
  • + * {@value YarnConfiguration#YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS} : + * Determines the file permissions for the application directories. The + * permissions come in the form of comma separated values + * (e.g. read,write,execute,delete). Defaults to {@code read} for read-only. + *
  • + *
  • + * {@value YarnConfiguration#YARN_CONTAINER_SANDBOX_POLICY} : + * Accepts canonical path to a java policy file on the local filesystem. + * This file will be loaded as the base policy, any additional container + * grants will be appended to this base file. If not specified, the default + * java.policy file provided with hadoop resources will be used. + *
  • + *
  • + * {@value YarnConfiguration#YARN_CONTAINER_SANDBOX_WHITELIST_QUEUE} : + * Optional setting to specify a YARN queue which will be exempt from the + * sand-boxing process. + *
  • + *
+ */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class JavaSandboxLinuxContainerRuntime + extends DefaultLinuxContainerRuntime { + private static final Log LOG = + LogFactory.getLog(DefaultLinuxContainerRuntime.class); + private Configuration configuration; + private Map> containerPolicies = new HashMap<>(); + private NMContainerPolicyUtils nmContainerPolicyUtils; + private boolean sandboxEnabled; + + private static final String POLICY_DIR = "/nm-java-policies"; + private static final String POLICY_DIR_PERMISSIONS = "rwxr-xr-x"; + private static final String POLICY_FILE_PERMISSIONS = "rw-r--r--"; + + /** + * Create an instance using the given {@link PrivilegedOperationExecutor} + * instance for performing operations. + * + * @param privilegedOperationExecutor the {@link PrivilegedOperationExecutor} + * instance + */ + public JavaSandboxLinuxContainerRuntime(PrivilegedOperationExecutor + privilegedOperationExecutor) { + super(privilegedOperationExecutor); + nmContainerPolicyUtils = new NMContainerPolicyUtils(); + } + + @Override + public void initialize(Configuration conf) + throws ContainerExecutionException { + this.configuration = conf; + this.sandboxEnabled = configuration.getBoolean(YARN_CONTAINER_SANDBOX, + DEFAULT_YARN_CONTAINER_SANDBOX); + super.initialize(conf); + } + + /** + * Prior to environment from being written locally need to generate + * policy file which limits container access to a small set of directories. + * + * @param environment - The container environment variables + * @param resources - The list of localized files for a container + * @param command - The run command for the container + * @throws ContainerExecutionException - Exception thrown if temporary policy + * file directory can't be created, or if any exceptions occur during policy + * file parsing and generation. + */ + @Override + public void prepareContainer(Map environment, + Map> resources, List command) + throws ContainerExecutionException { + + // Circumvent enabling the security manager if the container's queue is + // whitelisted + String whitelistQueue = configuration.get( + YARN_CONTAINER_SANDBOX_WHITELIST_QUEUE); + String containerQueue = environment.get( + YARN_RESOURCEMANAGER_APPLICATION_QUEUE.name()); + if(whitelistQueue != null && containerQueue != null + && containerQueue.equals(whitelistQueue)) { + return; + } + + String tmpDirBase = configuration.get("hadoop.tmp.dir"); + if (tmpDirBase == null) { + throw new ContainerExecutionException("hadoop.tmp.dir not set!"); + } + String tmpDirPath = tmpDirBase + POLICY_DIR; + + File tmpDir = new File(tmpDirPath); + if (!(tmpDir.exists() || tmpDir.mkdirs())) { + LOG.warn("Unable to create directory: " + tmpDirPath); + throw new ContainerExecutionException("Unable to create directory: " + + tmpDirPath); + } + + OutputStream policyOutputStream = null; + + try { + File javaPolicyFile = File.createTempFile("java-", + ".policy", new File(tmpDirPath)); + String containerID = environment.get(CONTAINER_ID.name()); + List policyFiles = containerPolicies.get(containerID); + if(policyFiles == null) { + policyFiles = new ArrayList<>(); + containerPolicies.put(containerID, policyFiles); + } + policyFiles.add(javaPolicyFile); + + policyOutputStream = new FileOutputStream(javaPolicyFile); + Files.setPosixFilePermissions(tmpDir.toPath(), + PosixFilePermissions.fromString(POLICY_DIR_PERMISSIONS)); + Files.setPosixFilePermissions(javaPolicyFile.toPath(), + PosixFilePermissions.fromString(POLICY_FILE_PERMISSIONS)); + + nmContainerPolicyUtils.generatePolicyFile( + policyOutputStream, environment, resources, configuration); + nmContainerPolicyUtils.sanitizeCommand( + command, javaPolicyFile.getAbsolutePath()); + + } catch (Exception e) { + throw new ContainerExecutionException(e); + } finally { + IOUtils.cleanup(LOG, policyOutputStream); + } + } + + /** + * Clean up java.policy files generated for this container. + * @param ctx - Runtime context + */ + @Override + public void reapContainer(ContainerRuntimeContext ctx) { + String containerId = ctx.getContainer().getContainerId().toString(); + for(File javaPolicyFile : containerPolicies.remove(containerId)) { + if (!javaPolicyFile.delete()) { + LOG.warn("Unable to delete generated java.policy file: " + + javaPolicyFile.getAbsolutePath()); + } + } + + } + + /** + * Determine if JVMSandboxLinuxContainerRuntime should be used. This is + * decided based on the value of + * {@value YarnConfiguration#YARN_CONTAINER_SANDBOX} + * @return true if Sandbox is requested, false otherwise + */ + public boolean isSandboxContainerRequested() { + return sandboxEnabled; + } + + /** + * Static utility class defining String constants and static methods for the + * use of the {@link JavaSandboxLinuxContainerRuntime}. + */ + public static final class NMContainerPolicyUtils{ + + public static final String POLICY_FILE = "java.policy"; + public static final String SECURITY_DEBUG = " -Djava.security.debug=all"; + public static final String SECURITY_FLAG = "-Djava.security.manager"; + public static final String POLICY_APPEND_FLAG = "-Djava.security.policy="; + public static final String POLICY_FLAG = POLICY_APPEND_FLAG + "="; + public static final String JAVA_CMD = "java"; + public static final String JVM_SECURITY_CMD = + JAVA_CMD + " " + SECURITY_FLAG + " " + POLICY_FLAG; + + private static final PolicyParser.GrantEntry HADOOP_GRANT = + new PolicyParser.GrantEntry(null, "file:" + + new File(System.getProperty(SYSPROP_HADOOP_HOME_DIR)) + + Path.SEPARATOR + "-"); + private static final Logger LOG = + Logger.getLogger(NMContainerPolicyUtils.class); + + static{ + HADOOP_GRANT.add(new PolicyParser.PermissionEntry( + AllPermission.class.getCanonicalName(), null, null)); + } + + private NMContainerPolicyUtils(){} + + /** + * Write new policy file to policyOutStream which will include read access + * to localize resources. Optionally a default policyFilePath can be + * specified to append a custom policy implementation to the new policy file + * @param policyOutStream - outputstream pointing to java.policy file + * @param environment - Container environment variables + * @param resources - List of local container resources + * @param conf - YARN configuration + * @throws PolicyParser.ParsingException - If provided + * {@value YarnConfiguration#YARN_CONTAINER_SANDBOX_POLICY} is set and is + * invalid. + * @throws IOException - If policy file generation is unable to read the + * base policy file or if it is unable to create a new policy file. + */ + public static void generatePolicyFile(OutputStream policyOutStream, + Map environment, Map> resources, + Configuration conf) throws PolicyParser.ParsingException, IOException { + + PolicyParser parser = new PolicyParser(); + PolicyParser.GrantEntry clientGrant = new PolicyParser.GrantEntry(); + + String policyFilePath = conf.get(YARN_CONTAINER_SANDBOX_POLICY); + String filePermissions = + conf.get(YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS, + DEFAULT_YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS); + String localDirs = environment.get( + ApplicationConstants.Environment.LOCAL_DIRS.toString()); + + Set cacheDirs = new HashSet<>(); + + if(policyFilePath == null) { + parser.read(new InputStreamReader( + NMContainerPolicyUtils.class.getResourceAsStream("/" + POLICY_FILE), + StandardCharsets.UTF_8)); + } else { + Reader fileReader = new InputStreamReader( + new FileInputStream(policyFilePath), StandardCharsets.UTF_8); + try { + parser.read(fileReader); + } finally { + fileReader.close(); + } + } + + parser.add(clientGrant); + parser.add(HADOOP_GRANT); + + for(Path path : resources.keySet()) { + cacheDirs.add(path.getParent().toString()); + } + for(String localDir : localDirs.split(",")) { + clientGrant.add(new PolicyParser.PermissionEntry( + FilePermission.class.getCanonicalName(), + localDir + Path.SEPARATOR + "-", filePermissions)); + } + for(String cacheDir : cacheDirs) { + clientGrant.add(new PolicyParser.PermissionEntry( + FilePermission.class.getCanonicalName(), + cacheDir + Path.SEPARATOR + "-", "read")); + } + + parser.write( + new OutputStreamWriter(policyOutStream, StandardCharsets.UTF_8)); + } + + /** + * Modify command to enable the Java Security Manager and specify + * java.policy file. + * @param commands - List of container commands + * @param policyPath - Path to the container specific policy file + */ + public static void sanitizeCommand(List commands, + String policyPath) { + for(int i = 0; i < commands.size(); i++){ + String command = commands.get(i); + command = command.replaceAll(SECURITY_FLAG, ""); + command = command.replaceAll(POLICY_APPEND_FLAG + "[^ ]+", ""); + if(command.contains(JAVA_CMD)) { + String securityString = JVM_SECURITY_CMD + policyPath; + if(LOG.isDebugEnabled()) { + securityString += SECURITY_DEBUG; + } + commands.set(i, command.replaceFirst(JAVA_CMD, securityString)); + } + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java index b15690f..7c58481 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java @@ -22,8 +22,12 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import java.util.List; +import java.util.Map; + /** * An abstraction for various container runtime implementations. Examples * include Process Tree, Docker, Appc runtimes etc. These implementations @@ -37,11 +41,14 @@ /** * Prepare a container to be ready for launch. * - * @param ctx the {@link ContainerRuntimeContext} + * @param command - Container commands + * @param environment - Container runtime environment + * @param resources - Local container resources * @throws ContainerExecutionException if an error occurs while preparing * the container */ - void prepareContainer(ContainerRuntimeContext ctx) + void prepareContainer(Map environment, + Map> resources, List command) throws ContainerExecutionException; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/java.policy b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/java.policy new file mode 100644 index 0000000..d9320c2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/java.policy @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Standard extensions get all permissions by default +grant codeBase "file:${{java.ext.dirs}}/*" { + permission java.security.AllPermission; +}; + +// default permissions granted to all domains +grant { + permission java.lang.RuntimePermission "accessDeclaredMembers"; + + permission java.util.PropertyPermission "java.version", "read"; + permission java.util.PropertyPermission "java.vendor", "read"; + permission java.util.PropertyPermission "java.vendor.url", "read"; + permission java.util.PropertyPermission "java.class.version", "read"; + permission java.util.PropertyPermission "os.name", "read"; + permission java.util.PropertyPermission "os.version", "read"; + permission java.util.PropertyPermission "os.arch", "read"; + permission java.util.PropertyPermission "file.separator", "read"; + permission java.util.PropertyPermission "path.separator", "read"; + permission java.util.PropertyPermission "line.separator", "read"; + + permission java.util.PropertyPermission "java.specification.version", "read"; + permission java.util.PropertyPermission "java.specification.vendor", "read"; + permission java.util.PropertyPermission "java.specification.name", "read"; + + permission java.util.PropertyPermission "java.vm.specification.version", "read"; + permission java.util.PropertyPermission "java.vm.specification.vendor", "read"; + permission java.util.PropertyPermission "java.vm.specification.name", "read"; + permission java.util.PropertyPermission "java.vm.version", "read"; + permission java.util.PropertyPermission "java.vm.vendor", "read"; + permission java.util.PropertyPermission "java.vm.name", "read"; + + //additional hadoop permissions + permission java.util.PropertyPermission "awt.Toolkit", "read"; + permission java.util.PropertyPermission "file.encoding", "read"; + permission java.util.PropertyPermission "file.encoding.pkg", "read"; + permission java.util.PropertyPermission "hadoop.metrics.log.level", "read"; + permission java.util.PropertyPermission "hadoop.root.logger", "read"; + permission java.util.PropertyPermission "java.awt.graphicsenv" ,"read"; + permission java.util.PropertyPermission "java.awt.printerjob", "read"; + permission java.util.PropertyPermission "java.class.path", "read"; + permission java.util.PropertyPermission "yarn.app.container.log.dir", "read"; + permission java.util.PropertyPermission "yarn.app.container.log.filesize", "read"; + permission java.lang.RuntimePermission "loadLibrary.gplcompression"; + permission javax.security.auth.AuthPermission "getSubject"; +}; \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java new file mode 100644 index 0000000..623eb80 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestJavaSandboxLinuxContainerRuntime.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.nodemanager. + containermanager.linux.runtime; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.FilePermission; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Test policy file generation and policy enforcement for the + * {@link JavaSandboxLinuxContainerRuntime}. + */ +public class TestJavaSandboxLinuxContainerRuntime { + + private static final String HADOOP_HOME = "hadoop.home.dir"; + private static String hadoopHomeDir = System.getProperty(HADOOP_HOME); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + private static File grantFile, denyFile, policyFile, + grantDir, denyDir; + private static SecurityManager securityManager; + + @BeforeClass + public static void setup() throws Exception { + + Map> resources = new HashMap<>(); + File baseTestDirectory = new File(System.getProperty("test.build.data", + System.getProperty("java.io.tmpdir", "target")), + TestJavaSandboxLinuxContainerRuntime.class.getName()); + grantDir = new File(baseTestDirectory + "grantDir"); + denyDir = new File(baseTestDirectory + "denyDir"); + grantDir.mkdirs(); + denyDir.mkdirs(); + + grantFile = File.createTempFile("grantFile", "tmp", grantDir); + denyFile = File.createTempFile("denyFile", "tmp", denyDir); + + List symLinks = new ArrayList<>(); + symLinks.add(grantFile.getName()); + resources.put(new Path(grantFile.getCanonicalPath()), symLinks); + + policyFile = File.createTempFile("java", "policy"); + + Map environment = new HashMap<>(); + environment.put(ApplicationConstants.Environment.LOCAL_DIRS.toString(), + "/tmp/fakeDir"); + Configuration conf = new Configuration(); + + if(hadoopHomeDir == null) { + System.setProperty(HADOOP_HOME, policyFile.getParent()); + } + + OutputStream outStream = new FileOutputStream(policyFile); + JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils + .generatePolicyFile(outStream, environment, resources, conf); + outStream.close(); + + System.setProperty("java.security.policy", policyFile.getCanonicalPath()); + securityManager = new SecurityManager(); + + } + + @Test + public void testGrant() throws Exception { + FilePermission grantPermission = + new FilePermission(grantFile.getAbsolutePath(), "read"); + securityManager.checkPermission(grantPermission); + } + + @Test + public void testDeny() throws Exception { + FilePermission denyPermission = + new FilePermission(denyFile.getAbsolutePath(), "read"); + exception.expect(java.security.AccessControlException.class); + securityManager.checkPermission(denyPermission); + } + + @Test + public void testCommandSanitizer() throws ContainerExecutionException { + String securityString = "-Djava.security.manager -Djava.security.policy==" + + policyFile.getAbsolutePath(); + String[] badCommands = { + "java -Djava.security.manager " + + "-Djava.security.policy==/home/user/java.policy", + "java -cp MyJar.jar MrAppMaster" + }; + String[] goodCommands = { + "java " + securityString, + "java " + securityString + " -cp MyJar.jar MrAppMaster" + }; + + List commands = Arrays.asList(badCommands); + JavaSandboxLinuxContainerRuntime.NMContainerPolicyUtils + .sanitizeCommand(commands, policyFile.getAbsolutePath()); + + for(int i = 0; i < commands.size(); i++) { + assert commands.get(i).trim().equals(goodCommands[i].trim()); + } + } + + @After + public void cleanup(){ + System.setSecurityManager(null); + } +} \ No newline at end of file