diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 548d868a68f..978ba0d2995 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1945,6 +1945,7 @@ public static boolean isAclEnabled(Configuration conf) { *
  • default
  • *
  • docker
  • *
  • javasandbox
  • + *
  • runc
  • * */ public static final String LINUX_CONTAINER_RUNTIME_ALLOWED_RUNTIMES = @@ -1961,6 +1962,106 @@ public static boolean isAclEnabled(Configuration conf) { public static final String LINUX_CONTAINER_RUNTIME_TYPE = LINUX_CONTAINER_RUNTIME_PREFIX + "type"; + public static final String RUNC_CONTAINER_RUNTIME_PREFIX = + LINUX_CONTAINER_RUNTIME_PREFIX + "runc."; + + /** + * The runc image tag to manifest plugin class that should be used + */ + public static final String NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN = + RUNC_CONTAINER_RUNTIME_PREFIX + "image-tag-to-manifest-plugin"; + + /** Default runc image tag to manifest plugin class */ + public static final String DEFAULT_NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN = + "org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.ImageTagToManifestPlugin"; + + /** + * The runc manifest to resources plugin class that should be used + */ + public static final String NM_RUNC_MANIFEST_TO_RESOURCES_PLUGIN = + RUNC_CONTAINER_RUNTIME_PREFIX + "manifest-to-resources-plugin"; + + /** Default runc manifest to resources plugin plugin class */ + public static final String DEFAULT_NM_RUNC_MANIFEST_TO_RESOURCES_PLUGIN = + "org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.HdfsManifestToResourcesPlugin"; + + /** + * Target count of layer mounts that we should keep on disk at one time. + */ + public static final String NM_RUNC_LAYER_MOUNTS_TO_KEEP = + RUNC_CONTAINER_RUNTIME_PREFIX + "layer-mounts-to-keep"; + + public static final int DEFAULT_NM_RUNC_LAYER_MOUNTS_TO_KEEP = 100; + + /** + * The interval in seconds between executions of reaping layer mounts + */ + public static final String NM_REAP_RUNC_LAYER_MOUNTS_INTERVAL = + RUNC_CONTAINER_RUNTIME_PREFIX + "layer-mounts-interval-secs"; + + public static final int DEFAULT_NM_REAP_RUNC_LAYER_MOUNTS_INTERVAL = 600; + + /** Default runc image to be used. */ + public static final String NM_RUNC_IMAGE_NAME = + RUNC_CONTAINER_RUNTIME_PREFIX + "image-name"; + + /** Allow privileged containers. Use with extreme care. */ + public static final String NM_RUNC_ALLOW_PRIVILEGED_CONTAINERS = + RUNC_CONTAINER_RUNTIME_PREFIX + "privileged-containers.allowed"; + + /** Privileged containers are disabled by default. */ + public static final boolean DEFAULT_NM_RUNC_ALLOW_PRIVILEGED_CONTAINERS = + false; + + /** The set of networks allowed when launching containers using the + * RuncContainerRuntime. */ + public static final String NM_RUNC_ALLOWED_CONTAINER_NETWORKS = + RUNC_CONTAINER_RUNTIME_PREFIX + "allowed-container-networks"; + + /** The set of networks allowed when launching containers using the + * RuncContainerRuntime. */ + public static final String[] DEFAULT_NM_RUNC_ALLOWED_CONTAINER_NETWORKS = + {"host", "none", "bridge"}; + + /** The set of runtimes allowed when launching containers using the + * RuncContainerRuntime. */ + public static final String NM_RUNC_ALLOWED_CONTAINER_RUNTIMES = + RUNC_CONTAINER_RUNTIME_PREFIX + "allowed-container-runtimes"; + + /** The set of runtimes allowed when launching containers using the + * RuncContainerRuntime. */ + public static final String[] DEFAULT_NM_RUNC_ALLOWED_CONTAINER_RUNTIMES = + {"runc"}; + + /** ACL list for users allowed to run privileged containers. */ + public static final String NM_RUNC_PRIVILEGED_CONTAINERS_ACL = + RUNC_CONTAINER_RUNTIME_PREFIX + "privileged-containers.acl"; + + /** Default list for users allowed to run privileged containers is empty. */ + public static final String DEFAULT_NM_RUNC_PRIVILEGED_CONTAINERS_ACL = ""; + + /** Allow host pid namespace for containers. Use with care. */ + public static final String NM_RUNC_ALLOW_HOST_PID_NAMESPACE = + RUNC_CONTAINER_RUNTIME_PREFIX + "host-pid-namespace.allowed"; + + /** Host pid namespace for containers is disabled by default. */ + public static final boolean DEFAULT_NM_RUNC_ALLOW_HOST_PID_NAMESPACE = + false; + + /** The default list of read-only mounts to be bind-mounted into all + * Docker containers that use DockerContainerRuntime. */ + public static final String NM_RUNC_DEFAULT_RO_MOUNTS = + RUNC_CONTAINER_RUNTIME_PREFIX + "default-ro-mounts"; + + /** The default list of read-write mounts to be bind-mounted into all + * Docker containers that use DockerContainerRuntime. */ + public static final String NM_RUNC_DEFAULT_RW_MOUNTS = + RUNC_CONTAINER_RUNTIME_PREFIX + "default-rw-mounts"; + + /** Path to the seccomp profile to use with Runc containers */ + public static final String NM_RUNC_SECCOMP_PROFILE = + RUNC_CONTAINER_RUNTIME_PREFIX + "seccomp-profile"; + public static final String DOCKER_CONTAINER_RUNTIME_PREFIX = LINUX_CONTAINER_RUNTIME_PREFIX + "docker."; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 9506509acf4..8d1270a9604 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.nodemanager; import java.io.File; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; @@ -48,6 +49,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ConfigurationException; @@ -127,6 +130,10 @@ public Configuration getConf() { */ public abstract void init(Context nmContext) throws IOException; + public void start() {} + + public void stop() {} + /** * This function localizes the JAR file on-demand. * On Windows the ContainerLaunch creates a temporary special JAR manifest of @@ -259,6 +266,11 @@ public abstract void symLink(String target, String symlink) public abstract boolean isContainerAlive(ContainerLivenessContext ctx) throws IOException; + + public Map getLocalResources(Container container) throws IOException { + return container.getLaunchContext().getLocalResources(); + } + /** * Update cluster information inside container. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java index 06a32be9d5b..bf326fa6ef0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java @@ -31,6 +31,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ConfigurationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; @@ -70,6 +71,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.regex.Pattern; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; @@ -343,6 +345,18 @@ public void init(Context context) throws IOException { resourcesHandler.init(this); } + @Override + public void start() { + super.start(); + linuxContainerRuntime.start(); + } + + @Override + public void stop() { + super.stop(); + linuxContainerRuntime.stop(); + } + @Override public void startLocalizer(LocalizerStartContext ctx) throws IOException, InterruptedException { @@ -1042,4 +1056,9 @@ public String getExposedPorts(Container container) throws ContainerExecutionException { return linuxContainerRuntime.getExposedPorts(container); } + + @Override + public Map getLocalResources(Container container) throws IOException { + return linuxContainerRuntime.getLocalResources(container); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index db3aaca11fb..43cbbaeabb7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -423,7 +423,10 @@ protected void serviceInit(Configuration conf) throws Exception { exec.init(context); } catch (IOException e) { throw new YarnRuntimeException("Failed to initialize container executor", e); - } + } + + ((NMContext) context).setContainerExecutor(exec); + DeletionService del = createDeletionService(exec); addService(del); @@ -513,6 +516,7 @@ protected void serviceInit(Configuration conf) throws Exception { registerMXBean(); + context.getContainerExecutor().start(); super.serviceInit(conf); // TODO add local dirs to del } @@ -525,6 +529,7 @@ protected void serviceStop() throws Exception { try { super.serviceStop(); DefaultMetricsSystem.shutdown(); + context.getContainerExecutor().stop(); // Cleanup ResourcePluginManager if (null != context) { @@ -685,6 +690,8 @@ protected void reregisterCollectors() { private AuxServices auxServices; + private ContainerExecutor exec; + public NMContext(NMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInNM nmTokenSecretManager, LocalDirsHandlerService dirsHandler, ApplicationACLsManager aclsManager, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java index 356c2e094e7..c2bc3db7958 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java @@ -32,6 +32,8 @@ /** * The ContainerManager is an entity that manages the life cycle of Containers. */ +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; + public interface ContainerManager extends ServiceStateChangeListener, ContainerManagementProtocol, EventHandler { @@ -45,4 +47,7 @@ ContainerScheduler getContainerScheduler(); void handleCredentialUpdate(); + + ResourceLocalizationService getResourceLocalizationService(); + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index c43b82539d4..71548005feb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -2008,4 +2008,7 @@ public GetLocalizationStatusesResponse getLocalizationStatuses( return container.getLocalizationStatuses(); } + public ResourceLocalizationService getResourceLocalizationService() { + return rsrcLocalizationSrvc; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java index 5a457c9015e..db8112720ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java @@ -112,6 +112,12 @@ boolean isRecovering(); + void setContainerRuntimeData(Object object); + + Object getContainerRuntimeData(); + + + /** * Get assigned resource mappings to the container. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index b79c305a0e6..3d94efed5cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -180,6 +180,7 @@ private ReInitializationContext createContextForRollback() { private volatile ReInitializationContext reInitContext; private volatile boolean isReInitializing = false; private volatile boolean isMarkeForKilling = false; + private Object containerRuntimeData; /** The NM-wide configuration - not specific to this container */ private final Configuration daemonConf; @@ -1211,26 +1212,28 @@ public ContainerState transition(ContainerImpl container, container.containerLocalizationStartTime = clock.getTime(); // Send requests for public, private resources - Map cntrRsrc = ctxt.getLocalResources(); - if (!cntrRsrc.isEmpty()) { - try { + Map cntrRsrc; + try { + cntrRsrc = container.context + .getContainerExecutor().getLocalResources(container); + if (!cntrRsrc.isEmpty()) { Map> req = container.resourceSet.addResources(ctxt.getLocalResources()); container.dispatcher.getEventHandler().handle( new ContainerLocalizationRequestEvent(container, req)); - } catch (URISyntaxException e) { + return ContainerState.LOCALIZING; + } else { + container.sendScheduleEvent(); + container.metrics.endInitingContainer(); + return ContainerState.SCHEDULED; + } + } catch (URISyntaxException | IOException e) { // malformed resource; abort container launch LOG.warn("Failed to parse resource-request", e); container.cleanup(); container.metrics.endInitingContainer(); return ContainerState.LOCALIZATION_FAILED; } - return ContainerState.LOCALIZING; - } else { - container.sendScheduleEvent(); - container.metrics.endInitingContainer(); - return ContainerState.SCHEDULED; - } } } @@ -2283,4 +2286,13 @@ public void setExposedPorts(String ports) { this.readLock.unlock(); } } + + public void setContainerRuntimeData(Object containerRuntimeData) { + this.containerRuntimeData = containerRuntimeData; + } + + + public Object getContainerRuntimeData() { + return containerRuntimeData; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java index e92560e3f87..653eca285d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java @@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DockerContainerDeletionTask; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerRuntime; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReapContext; import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext; @@ -147,7 +148,7 @@ public void run() { } // rm container in docker - if (OCIContainerRuntime.isOCICompliantContainerRequested(conf, + if (DockerLinuxContainerRuntime.isDockerContainerRequested(conf, container.getLaunchContext().getEnvironment())) { rmDockerContainerDelayed(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java index a17daede2a8..979f4821b24 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java @@ -59,7 +59,9 @@ ADD_NUMA_PARAMS(""), // no CLI switch supported yet. REMOVE_DOCKER_CONTAINER("--remove-docker-container"), INSPECT_DOCKER_CONTAINER("--inspect-docker-container"), - SYNC_YARN_SYSFS(""); + SYNC_YARN_SYSFS(""), + RUN_RUNC_CONTAINER("--run-runc-container"), + REAP_RUNC_LAYER_MOUNTS("--reap-runc-layer-mounts"); private final String option; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java index acbfe9c3ba8..dbbd0a08bb5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; @@ -37,6 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -60,6 +62,7 @@ LoggerFactory.getLogger(DelegatingLinuxContainerRuntime.class); private DefaultLinuxContainerRuntime defaultLinuxContainerRuntime; private DockerLinuxContainerRuntime dockerLinuxContainerRuntime; + private RuncContainerRuntime runcContainerRuntime; private JavaSandboxLinuxContainerRuntime javaSandboxLinuxContainerRuntime; private Set allowedRuntimes = new HashSet<>(); private List pluggableRuntimes = new ArrayList<>(); @@ -92,6 +95,12 @@ public void initialize(Configuration conf, Context nmContext) PrivilegedOperationExecutor.getInstance(conf)); dockerLinuxContainerRuntime.initialize(conf, nmContext); } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.RUNC.name())) { + runcContainerRuntime = new RuncContainerRuntime( + PrivilegedOperationExecutor.getInstance(conf)); + runcContainerRuntime.initialize(conf, nmContext); + } if (isRuntimeAllowed( LinuxContainerRuntimeConstants.RuntimeType.DEFAULT.name())) { defaultLinuxContainerRuntime = new DefaultLinuxContainerRuntime( @@ -116,6 +125,9 @@ LinuxContainerRuntime pickContainerRuntime( } else if (dockerLinuxContainerRuntime != null && dockerLinuxContainerRuntime.isRuntimeRequested(environment)) { runtime = dockerLinuxContainerRuntime; + } else if (runcContainerRuntime != null && + runcContainerRuntime.isRuntimeRequested(environment)) { + runtime = runcContainerRuntime; } else { LinuxContainerRuntime pluggableRuntime = pickPluggableRuntime( environment); @@ -244,4 +256,58 @@ public IOStreamPair execContainer(ContainerExecContext ctx) LinuxContainerRuntime runtime = pickContainerRuntime(container); return runtime.execContainer(ctx); } + + + @Override + public Map getLocalResources(Container container) throws IOException { + try { + LinuxContainerRuntime runtime = pickContainerRuntime(container); + return runtime.getLocalResources(container); + } catch (ContainerExecutionException e) { + throw new IOException(e); + } + } + + @Override + public void start() { + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.JAVASANDBOX.name())) { + javaSandboxLinuxContainerRuntime.start(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.DOCKER.name())) { + dockerLinuxContainerRuntime.start(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.RUNC.name())) { + runcContainerRuntime.start(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.DEFAULT.name())) { + defaultLinuxContainerRuntime.start(); + } + + } + + @Override + public void stop() { + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.JAVASANDBOX.name())) { + javaSandboxLinuxContainerRuntime.stop(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.DOCKER.name())) { + dockerLinuxContainerRuntime.stop(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.RUNC.name())) { + runcContainerRuntime.stop(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.DEFAULT.name())) { + defaultLinuxContainerRuntime.stop(); + } + + } + } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java index 50721debe5e..dbd95934254 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java @@ -208,6 +208,9 @@ private static final String DEFAULT_PROCFS = "/proc"; + @InterfaceAudience.Private + private static final String RUNTIME_TYPE = "DOCKER"; + @InterfaceAudience.Private public static final String ENV_DOCKER_CONTAINER_IMAGE = "YARN_CONTAINER_RUNTIME_DOCKER_IMAGE"; @@ -237,12 +240,10 @@ "YARN_CONTAINER_RUNTIME_DOCKER_RUNTIME"; @InterfaceAudience.Private - private static final String RUNTIME_TYPE = "DOCKER"; - @InterfaceAudience.Private - private final static String ENV_OCI_CONTAINER_PID_NAMESPACE = + public final static String ENV_OCI_CONTAINER_PID_NAMESPACE = formatOciEnvKey(RUNTIME_TYPE, CONTAINER_PID_NAMESPACE_SUFFIX); @InterfaceAudience.Private - private final static String ENV_OCI_CONTAINER_RUN_PRIVILEGED_CONTAINER = + public final static String ENV_OCI_CONTAINER_RUN_PRIVILEGED_CONTAINER = formatOciEnvKey(RUNTIME_TYPE, RUN_PRIVILEGED_CONTAINER_SUFFIX); private Configuration conf; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/HdfsManifestToResourcesPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/HdfsManifestToResourcesPlugin.java new file mode 100644 index 00000000000..d62c73a6eb0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/HdfsManifestToResourcesPlugin.java @@ -0,0 +1,189 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.api.records.URL; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.RUNC_CONTAINER_RUNTIME_PREFIX; + +public class HdfsManifestToResourcesPlugin extends AbstractService implements + RuncManifestToResourcesPlugin { + private Configuration conf; + private String layersDir; + private String configDir; + private FileSystem fs; + private LoadingCache statCache; + + private static String HDFS_MANIFEST_TO_RESOURCES_PLUGIN_PREFIX = + RUNC_CONTAINER_RUNTIME_PREFIX + "hdfs-manifest-to-resources-plugin."; + + /** + * The timeout value in seconds for the values in the stat cache + */ + private static String NM_RUNC_STAT_CACHE_TIMEOUT = + HDFS_MANIFEST_TO_RESOURCES_PLUGIN_PREFIX + "stat-cache-timeout-interval-secs"; + + private static int DEFAULT_NM_RUNC_STAT_CACHE_TIMEOUT = 60 * 60; + + /** + * The size of the stat cache which stores stats of the layers and config + */ + private static String NM_RUNC_STAT_CACHE_SIZE = + HDFS_MANIFEST_TO_RESOURCES_PLUGIN_PREFIX + "stat-cache-size"; + + private static int DEFAULT_RUNC_STAT_CACHE_SIZE = 500; + + /** + * The HDFS location under which the oci image manifests, layers, + * and configs directories exist + */ + private static String NM_RUNC_IMAGE_TOPLEVEL_DIR = + RUNC_CONTAINER_RUNTIME_PREFIX + "image-toplevel-dir"; + + private static String CONFIG_MEDIA_TYPE = + "application/vnd.docker.container.image.v1+json"; + + private static String LAYER_TAR_GZIP_MEDIA_TYPE = + "application/vnd.docker.image.rootfs.diff.tar.gzip"; + + private static String SHA_256 = "sha256"; + + private static String CONFIG_HASH_ALGORITHM = + SHA_256; + + private static String LAYER_HASH_ALGORITHM = + SHA_256; + + private static int SHA256_HASH_LENGTH = 64; + + private static String ALPHA_NUMERIC = "[a-zA-Z0-9]+"; + + HdfsManifestToResourcesPlugin() { + super(HdfsManifestToResourcesPlugin.class.getName()); + } + + @Override + public void serviceInit(Configuration conf) { + this.conf = conf; + this.layersDir = conf.get(NM_RUNC_IMAGE_TOPLEVEL_DIR) + "/layers/"; + this.configDir = conf.get(NM_RUNC_IMAGE_TOPLEVEL_DIR) + "/config/"; + CacheLoader cacheLoader = + new CacheLoader() { + @Override + public FileStatus load(@Nonnull Path path) throws Exception { + return statBlob(path); + } + }; + int statCacheSize = conf.getInt(NM_RUNC_STAT_CACHE_SIZE, + DEFAULT_RUNC_STAT_CACHE_SIZE); + int statCacheTimeout = conf.getInt(NM_RUNC_STAT_CACHE_TIMEOUT, + DEFAULT_NM_RUNC_STAT_CACHE_TIMEOUT); + this.statCache = CacheBuilder.newBuilder().maximumSize(statCacheSize) + .refreshAfterWrite(statCacheTimeout, TimeUnit.SECONDS) + .build(cacheLoader); + } + + @Override + public void serviceStart() throws IOException { + Path path = new Path(layersDir); + this.fs = path.getFileSystem(conf); + } + + @Override + public List getLayerResources(ImageManifest manifest) + throws IOException { + List localRsrcs = new ArrayList<>(); + + for(ImageManifest.Blob blob : manifest.getLayers()) { + LocalResource rsrc = getResource(blob, layersDir, + LAYER_TAR_GZIP_MEDIA_TYPE, LAYER_HASH_ALGORITHM, ".sqsh"); + localRsrcs.add(rsrc); + } + return localRsrcs; + } + + public LocalResource getConfigResource(ImageManifest manifest) + throws IOException { + ImageManifest.Blob config = manifest.getConfig(); + return getResource(config, configDir, CONFIG_MEDIA_TYPE, + CONFIG_HASH_ALGORITHM, ""); + } + + public LocalResource getResource(ImageManifest.Blob blob, + String dir, String expectedMediaType, + String expectedHashAlgorithm, String resourceSuffix) throws IOException { + String mediaType = blob.getMediaType(); + if (!mediaType.equals(expectedMediaType)) { + throw new IOException("Invalid blob mediaType: " + mediaType); + } + + String[] blobDigest = blob.getDigest().split(":", 2); + + String algorithm = blobDigest[0]; + if (!algorithm.equals(expectedHashAlgorithm)) { + throw new IOException("Invalid blob digest algorithm: " + algorithm); + } + + String hash = blobDigest[1]; + if (!hash.matches(ALPHA_NUMERIC) || hash.length() != SHA256_HASH_LENGTH) { + throw new IOException("Malformed blob digest: " + hash); + } + + long size = blob.getSize(); + Path path = new Path(dir, hash + resourceSuffix); + LocalResource rsrc; + + try { + FileStatus stat = statCache.get(path); + long timestamp = stat.getModificationTime(); + URL url = URL.fromPath(path); + + rsrc = LocalResource.newInstance(url, + LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, + size, timestamp); + } catch (ExecutionException e) { + throw new IOException(e); + } + + return rsrc; + } + + protected FileStatus statBlob(Path path) throws IOException { + return fs.getFileStatus(path); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageManifest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageManifest.java new file mode 100644 index 00000000000..74c89f418a2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageManifest.java @@ -0,0 +1,124 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import java.util.ArrayList; +import java.util.Map; + +public class ImageManifest { + final private int schemaVersion; + final private String mediaType; + final private Blob config; + final private ArrayList layers; + final private Map annotations; + + public ImageManifest() { + this(0, null, null, null, null); + } + + public ImageManifest(int schemaVersion, String mediaType, Blob config, + ArrayList layers, Map annotations) { + this.schemaVersion = schemaVersion; + this.mediaType = mediaType; + this.config = config; + this.layers = layers; + this.annotations = annotations; + } + + public int getSchemaVersion() { + return schemaVersion; + } + + public String getMediaType() { + return mediaType; + } + + public Blob getConfig() { + return config; + } + + public ArrayList getLayers() { + return layers; + } + + public Map getAnnotations() { + return annotations; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("schemaVersion: " + schemaVersion + "\n"); + sb.append("mediaType: " + mediaType + "\n"); + sb.append(config.toString()); + for(Blob b : layers) { + sb.append(b.toString()); + } + return sb.toString(); + } + + static class Blob { + final private String mediaType; + final private String digest; + final private long size; + final private ArrayList urls; + final private Map annotations; + + public Blob() { + this(null, null, 0, null, null); + } + + public Blob(String mediaType, String digest, long size, + ArrayList urls, Map annotations) { + this.mediaType = mediaType; + this.digest = digest; + this.size = size; + this.urls = urls; + this.annotations = annotations; + } + + public String getMediaType() { + return mediaType; + } + + public String getDigest() { + return digest; + } + + public long getSize() { + return size; + } + + public ArrayList getUrls() { + return urls; + } + + public Map getAnnotations() { + return annotations; + } + + @Override + public String toString() { + return "mediaType: " + mediaType + "\n" + "size: " + size + "\n" + + "digest: " + digest + "\n"; + } + } +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageTagToManifestPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageTagToManifestPlugin.java new file mode 100644 index 00000000000..d6431bd1e43 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageTagToManifestPlugin.java @@ -0,0 +1,353 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.commons.io.IOUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.codehaus.jackson.map.ObjectMapper; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.RUNC_CONTAINER_RUNTIME_PREFIX; + +public class ImageTagToManifestPlugin extends AbstractService + implements RuncImageTagToManifestPlugin { + + private Map manifestCache; + private ObjectMapper objMapper; + private AtomicReference> localImageToHashCache = + new AtomicReference<>(new HashMap<>()); + private AtomicReference> hdfsImageToHashCache = + new AtomicReference<>(new HashMap<>()); + private Configuration conf; + private ScheduledExecutorService exec; + private long hdfsModTime; + private long localModTime; + private String hdfsImageToHashFile; + private String manifestDir; + private String localImageTagToHashFile; + + private static final Log LOG = LogFactory.getLog( + ImageTagToManifestPlugin.class); + + public static String IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX = + RUNC_CONTAINER_RUNTIME_PREFIX + "image-tag-to-manifest-plugin."; + + /** + * The HDFS location where the oci image tag to hash file exists + */ + public static String NM_HDFS_RUNC_IMAGE_TAG_TO_HASH_FILE = + IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "hdfs-hash-file"; + + /** + * The local file system location where the oci image tag to hash file exists + */ + public static String NM_LOCAL_RUNC_IMAGE_TAG_TO_HASH_FILE = + IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "local-hash-file"; + + /** + * The interval in seconds between refreshing the hdfs image Tag to + * hash cache. + */ + public static String NM_RUNC_CACHE_REFRESH_INTERVAL = + IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "cache-refresh-interval-secs"; + + public static int DEFAULT_NM_RUNC_CACHE_REFRESH_INTERVAL = 60; + + /** + * The interval in seconds between refreshing the hdfs image Tag to + * hash cache. + */ + public static String NM_RUNC_NUM_MANIFESTS_TO_CACHE = + IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "num-manifests-to-cache"; + + public static int DEFAULT_NUM_MANIFESTS_TO_CACHE = 10; + + /** + * The HDFS location under which the oci image manifests, layers, + * and configs directories exist + */ + public static String NM_RUNC_IMAGE_TOPLEVEL_DIR = + RUNC_CONTAINER_RUNTIME_PREFIX + "image-toplevel-dir"; + + private static int SHA256_HASH_LENGTH = 64; + + + private static String ALPHA_NUMERIC = "[a-zA-Z0-9]+"; + + ImageTagToManifestPlugin() { + super("ImageTagToManifestPluginService"); + } + + @Override + public ImageManifest getManifestFromImageTag(String imageTag) + throws IOException { + String hash = getHashFromImageTag(imageTag); + ImageManifest manifest = manifestCache.get(hash); + if (manifest != null) { + return manifest; + } + + Path manifestPath = new Path(manifestDir + hash); + FileSystem fs = manifestPath.getFileSystem(conf); + FSDataInputStream input; + try { + input = fs.open(manifestPath); + } catch (IllegalArgumentException iae) { + throw new IOException("Manifest file is not a valid HDFS file: " + + manifestPath.toString(), iae); + } + + byte[] bytes = IOUtils.toByteArray(input); + manifest = objMapper.readValue(bytes, ImageManifest.class); + + manifestCache.put(hash, manifest); + return manifest; + } + + @Override + public String getHashFromImageTag(String imageTag) { + String hash; + Map localImageToHashCacheMap = localImageToHashCache.get(); + Map hdfsImageToHashCacheMap = hdfsImageToHashCache.get(); + + // 1) Go to local file + // 2) Go to HDFS + // 3) Use tag as is/Assume tag is the hash + if ((hash = localImageToHashCacheMap.get(imageTag)) != null) { + return hash; + } else if ((hash = hdfsImageToHashCacheMap.get(imageTag)) != null) { + return hash; + } else { + return imageTag; + } + } + + protected BufferedReader getLocalImageToHashReader() throws IOException { + if (localImageTagToHashFile == null) { + LOG.debug("Did not load local image to hash file, " + + "file is null"); + return null; + } + + File imageTagToHashFile = new File(localImageTagToHashFile); + if(!imageTagToHashFile.exists()) { + LOG.debug("Did not load local image to hash file, " + + "file doesn't exist"); + return null; + } + + long newLocalModTime = imageTagToHashFile.lastModified(); + if (newLocalModTime == localModTime) { + LOG.debug("Did not load local image to hash file, " + + "file is unmodified"); + return null; + } + localModTime = newLocalModTime; + + return new BufferedReader(new FileReader(imageTagToHashFile)); + } + + protected BufferedReader getHdfsImageToHashReader() throws IOException { + if (hdfsImageToHashFile == null) { + LOG.debug("Did not load hdfs image to hash file, " + + "file is null"); + return null; + } + + Path imageToHash = new Path(hdfsImageToHashFile); + FileSystem fs = imageToHash.getFileSystem(conf); + if (!fs.exists(imageToHash)) { + LOG.debug("Did not load hdfs image to hash file, " + + "file doesn't exist"); + return null; + } + + long newHdfsModTime = fs.getFileStatus(imageToHash).getModificationTime(); + if (newHdfsModTime == hdfsModTime) { + LOG.debug("Did not load hdfs image to hash file, " + + "file is unmodified"); + return null; + } + hdfsModTime = newHdfsModTime; + + return new BufferedReader(new InputStreamReader(fs.open(imageToHash))); + } + + // You may specify multiple tags per hash all on the same line. + // Comments are allowed using #. Anything after this character will not + // be read + // Example file: + // foo/bar:current,fizz/gig:latest:123456789 + // #this/line:wont,be:parsed:2378590895 + // + // This will map both foo/bar:current and fizz/gig:latest to 123456789 + protected static Map readImageToHashFile( + BufferedReader br) throws IOException { + if (br == null) { + return null; + } + + String line; + Map imageToHashCache = new HashMap<>(); + while ((line = br.readLine()) != null) { + int index; + index = line.indexOf("#"); + if (index == 0) { + continue; + } else if (index != -1) { + line = line.substring(0, index); + } + + index = line.lastIndexOf(":"); + if (index == -1) { + LOG.warn("Malformed imageTagToManifest entry: " + line); + continue; + } + String imageTags = line.substring(0, index); + String[] imageTagArray = imageTags.split(","); + String hash = line.substring(index + 1); + if (!hash.matches(ALPHA_NUMERIC) || hash.length() != SHA256_HASH_LENGTH) { + LOG.warn("Malformed image hash: " + hash); + continue; + } + + for (String imageTag : imageTagArray) { + imageToHashCache.put(imageTag, hash); + } + } + return imageToHashCache; + } + + protected boolean loadImageToHashFiles() throws IOException { + boolean ret = false; + try ( + BufferedReader localBr = getLocalImageToHashReader(); + BufferedReader hdfsBr = getHdfsImageToHashReader() + ) { + Map localImageToHash = readImageToHashFile(localBr); + Map hdfsImageToHash = readImageToHashFile(hdfsBr); + + Map tmpLocalImageToHash = localImageToHashCache.get(); + Map tmpHdfsImageToHash = hdfsImageToHashCache.get(); + + if (localImageToHash != null && !localImageToHash.equals(tmpLocalImageToHash)) { + localImageToHashCache.set(localImageToHash); + LOG.info("Reloaded local image tag to hash cache"); + ret = true; + } + if (hdfsImageToHash != null && !hdfsImageToHash.equals(tmpHdfsImageToHash)) { + hdfsImageToHashCache.set(hdfsImageToHash); + LOG.info("Reloaded hdfs image tag to hash cache"); + ret = true; + } + } + return ret; + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + super.serviceInit(conf); + localImageTagToHashFile = conf.get(NM_LOCAL_RUNC_IMAGE_TAG_TO_HASH_FILE); + if (localImageTagToHashFile == null) { + LOG.debug("Failed to load local oci image to hash file. " + + "Config not set"); + } + hdfsImageToHashFile = conf.get(NM_HDFS_RUNC_IMAGE_TAG_TO_HASH_FILE); + if (hdfsImageToHashFile == null) { + LOG.debug("Failed to load HDFS oci image to hash file. Config not set"); + } + if(hdfsImageToHashFile == null && localImageTagToHashFile == null) { + throw new ContainerExecutionException("No valid image-tag-to-hash files"); + } + manifestDir = conf.get(NM_RUNC_IMAGE_TOPLEVEL_DIR) + "/manifests/"; + int numManifestsToCache = conf.getInt(NM_RUNC_NUM_MANIFESTS_TO_CACHE, + DEFAULT_NUM_MANIFESTS_TO_CACHE); + this.objMapper = new ObjectMapper(); + this.manifestCache = Collections.synchronizedMap( + new LRUCache(numManifestsToCache, 0.75f)); + this.conf = conf; + + exec = HadoopExecutors.newScheduledThreadPool(1); + } + + @Override + protected void serviceStart() throws Exception { + super.serviceStart(); + if(!loadImageToHashFiles()) { + throw new RuntimeException("Couldn't load any image-tag-to-hash-files"); + } + int ociCacheRefreshInterval = conf.getInt(NM_RUNC_CACHE_REFRESH_INTERVAL, + DEFAULT_NM_RUNC_CACHE_REFRESH_INTERVAL); + exec = HadoopExecutors.newScheduledThreadPool(1); + ScheduledFuture refreshHdfsCacheThread = exec.scheduleWithFixedDelay( + new Runnable() { + @Override + public void run() { + try { + loadImageToHashFiles(); + } catch (Exception e) { + LOG.warn("OCI cache refresh thread caught an exception: ", e); + } + } + }, ociCacheRefreshInterval, ociCacheRefreshInterval, TimeUnit.SECONDS); + } + + @Override + protected void serviceStop() throws Exception { + super.serviceStop(); + exec.shutdownNow(); + } + + private class LRUCache extends LinkedHashMap { + private int cacheSize; + + LRUCache(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor, true); + this.cacheSize = initialCapacity; + } + + @Override + protected boolean removeEldestEntry( + Map.Entry eldest) { + return this.size() > cacheSize; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java index 7381f7a2e2a..e7718c9ad64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java @@ -1,5 +1,4 @@ /* - * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +14,6 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; @@ -23,10 +21,13 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntime; +import java.io.IOException; import java.util.Map; /** @@ -55,5 +56,13 @@ * @return whether this runtime is requested */ boolean isRuntimeRequested(Map env); + + default void start() {} + + default void stop() {} + + default Map getLocalResources(Container container) throws IOException { + return container.getLaunchContext().getLocalResources(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java index fc86b17ed9e..cbbbe96828f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java @@ -37,7 +37,8 @@ private LinuxContainerRuntimeConstants() { public enum RuntimeType { DEFAULT, DOCKER, - JAVASANDBOX; + JAVASANDBOX, + RUNC; } public static final Attribute LOCALIZED_RESOURCES = Attribute diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerExecutorConfig.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerExecutorConfig.java new file mode 100644 index 00000000000..9337d7981f8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerExecutorConfig.java @@ -0,0 +1,1334 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.codehaus.jackson.annotate.JsonRawValue; +import org.codehaus.jackson.map.annotate.JsonSerialize; + +import java.util.List; +import java.util.Map; + +@JsonSerialize(include=JsonSerialize.Inclusion.NON_DEFAULT) +public class OCIContainerExecutorConfig { + final private String version; + final private String username; + final private String containerId; + final private String applicationId; + final private String pidFile; + final private String containerScriptPath; + final private String containerCredentialsPath; + final private int https; + final private String keystorePath; + final private String truststorePath; + final private List localDirs; + final private List logDirs; + final private List layers; + final private int reapLayerKeepCount; + final private OCIRuntimeConfig ociRuntimeConfig; + + public String getVersion() { + return version; + } + + public String getUsername() { + return username; + } + + public String getContainerId() { + return containerId; + } + + public String getApplicationId() { + return applicationId; + } + + public String getPidFile() { + return pidFile; + } + + public String getContainerScriptPath() { + return containerScriptPath; + } + + public String getContainerCredentialsPath() { + return containerCredentialsPath; + } + + public int getHttps() { + return https; + } + public String getKeystorePath() { + return keystorePath; + } + public String getTruststorePath() { + return truststorePath; + } + public List getLocalDirs() { + return localDirs; + } + + public List getLogDirs() { + return logDirs; + } + + public List getLayers() { + return layers; + } + + public int getReapLayerKeepCount() { + return reapLayerKeepCount; + } + + public OCIRuntimeConfig getOciRuntimeConfig() { + return ociRuntimeConfig; + } + + public OCIContainerExecutorConfig() { + this(null, null, null, null, null, null, null, 0, null, null, null, null, null, 0, null); + } + + public OCIContainerExecutorConfig(String username, + String containerId, String applicationId, + String pidFile, String containerScriptPath, String containerCredentialsPath, + int https, String keystorePath, String truststorePath, + List localDirs, + List logDirs, List layers, int reapLayerKeepCount, + OCIRuntimeConfig ociRuntimeConfig) { + this("0.1", username, containerId, applicationId, pidFile, + containerScriptPath, containerCredentialsPath, https, keystorePath, + truststorePath, localDirs, logDirs, + layers, reapLayerKeepCount, ociRuntimeConfig); + } + + public OCIContainerExecutorConfig(String version, String username, + String containerId, String applicationId, + String pidFile, String containerScriptPath, String containerCredentialsPath, + int https, String keystorePath, String truststorePath, + List localDirs, + List logDirs, List layers, int reapLayerKeepCount, + OCIRuntimeConfig ociRuntimeConfig) { + this.version = version; + this.username = username; + this.containerId = containerId; + this.applicationId = applicationId; + this.pidFile = pidFile; + this.containerScriptPath = containerScriptPath; + this.containerCredentialsPath = containerCredentialsPath; + this.https = https; + this.keystorePath = keystorePath; + this.truststorePath = truststorePath; + this.localDirs = localDirs; + this.logDirs = logDirs; + this.layers = layers; + this.reapLayerKeepCount = reapLayerKeepCount; + this.ociRuntimeConfig = ociRuntimeConfig; + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + public static class OCILayer { + final private String mediaType; + final private String path; + + public String getMediaType() { + return mediaType; + } + + public String getPath() { + return path; + } + + public OCILayer(String mediaType, String path) { + this.mediaType = mediaType; + this.path = path; + } + + public OCILayer() { + this(null, null); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + public static class OCIRuntimeConfig { + final private OCIRootConfig root; + final private List mounts; + final private OCIProcessConfig process; + final private OCIHooksConfig hooks; + final private OCIAnnotationsConfig annotations; + final private OCILinuxConfig linux; + + public OCIRootConfig getRoot() { + return root; + } + + public List getMounts() { + return mounts; + } + + public OCIProcessConfig getProcess() { + return process; + } + + public String getHostname() { + return hostname; + } + + public OCIHooksConfig getHooks() { + return hooks; + } + + public OCIAnnotationsConfig getAnnotations() { + return annotations; + } + + public OCILinuxConfig getLinux() { + return linux; + } + + final private String hostname; + + public OCIRuntimeConfig() { + this(null, null, null, null, null, null, null); + } + + + public OCIRuntimeConfig(OCIRootConfig root, List mounts, + OCIProcessConfig process, String hostname, OCIHooksConfig hooks, OCIAnnotationsConfig annotations, + OCILinuxConfig linux) { + this.root = root; + this.mounts = mounts; + this.process = process; + this.hostname = hostname; + this.hooks = hooks; + this.annotations = annotations; + this.linux= linux; + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class OCIRootConfig { + public String getPath() { + return path; + } + + public boolean isReadonly() { + return readonly; + } + + final private String path; + final private boolean readonly; + + public OCIRootConfig(String path, boolean readonly) { + this.path = path; + this.readonly = readonly; + } + + public OCIRootConfig() { + this(null, false); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class OCIMount { + final private String destination; + final private String type; + final private String source; + final private List options; + + public String getDestination() { + return destination; + } + + public String getType() { + return type; + } + + public String getSource() { + return source; + } + + public List getOptions() { + return options; + } + + public OCIMount(String destination, String type, String source, List options) { + this.destination = destination; + this.type = type; + this.source = source; + this.options = options; + } + + public OCIMount(String destination, String source, List options) { + this.destination = destination; + this.type = null; + this.source = source; + this.options = options; + } + + public OCIMount() { + this(null, null, null, null); + } + } + + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class OCIProcessConfig { + final private boolean terminal; + final private ConsoleSize consoleSize; + final private String cwd; + final private List env; + final private List args; + final private RLimits rlimits; + final private String apparmorProfile; + final private Capabilities capabilities; + final private boolean noNewPrivileges; + final private int oomScoreAdj; + final private String selinuxLabel; + final private User user; + + public boolean isTerminal() { + return terminal; + } + + public ConsoleSize getConsoleSize() { + return consoleSize; + } + + public String getCwd() { + return cwd; + } + + public List getEnv() { + return env; + } + + public List getArgs() { + return args; + } + + public RLimits getRlimits() { + return rlimits; + } + + public String getApparmorProfile() { + return apparmorProfile; + } + + public Capabilities getCapabilities() { + return capabilities; + } + + public boolean isNoNewPrivileges() { + return noNewPrivileges; + } + + public int getOomScoreAdj() { + return oomScoreAdj; + } + + public String getSelinuxLabel() { + return selinuxLabel; + } + + public User getUser() { + return user; + } + + + public OCIProcessConfig(boolean terminal, ConsoleSize consoleSize, String cwd, + List env, List args, RLimits rlimits, + String apparmorProfile, Capabilities capabilities, boolean noNewPrivileges, + int oomScoreAdj, String selinuxLabel, User user) { + this.terminal = terminal; + this.consoleSize = consoleSize; + this.cwd = cwd; + this.env = env; + this.args = args; + this.rlimits = rlimits; + this.apparmorProfile = apparmorProfile; + this.capabilities = capabilities; + this.noNewPrivileges = noNewPrivileges; + this.oomScoreAdj = oomScoreAdj; + this.selinuxLabel = selinuxLabel; + this.user = user; + } + + public OCIProcessConfig() { + this(false, null, null, null, null, null, null, null, false, 0, null, null); + } + + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class ConsoleSize { + public int getHeight() { + return height; + } + + public int getWidth() { + return width; + } + + final private int height; + + public ConsoleSize(int height, int width) { + this.height = height; + this.width = width; + } + + public ConsoleSize() { + this(0, 0); + } + + final private int width; + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class RLimits { + public String getType() { + return type; + } + + public long getSoft() { + return soft; + } + + public long getHard() { + return hard; + } + + final private String type; + + public RLimits(String type, long soft, long hard) { + this.type = type; + this.soft = soft; + this.hard = hard; + } + + public RLimits() { + this(null, 0, 0); + } + + final private long soft; + final private long hard; + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Capabilities { + final private List effective; + final private List bounding; + final private List inheritable; + final private List permitted; + final private List ambient; + + public List getEffective() { + return effective; + } + + public List getBounding() { + return bounding; + } + + public List getInheritable() { + return inheritable; + } + + public List getPermitted() { + return permitted; + } + + public List getAmbient() { + return ambient; + } + + + public Capabilities(List effective, List bounding, + List inheritable, List permitted, + List ambient) { + this.effective = effective; + this.bounding = bounding; + this.inheritable = inheritable; + this.permitted = permitted; + this.ambient = ambient; + } + + public Capabilities() { + this(null, null, null, null, null); + } + + } + + static class User { + final private int uid; + final private int gid; + final private int[] additionalGids; + + public User(int uid, int gid, int[] additionalGids) { + this.uid = uid; + this.gid = gid; + this.additionalGids = additionalGids; + } + + public User() { + this(0, 0, null); + } + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class OCIHooksConfig { + final private List prestart; + final private List poststart; + final private List poststop; + + public List getPrestart() { + return prestart; + } + + public List getPoststart() { + return poststart; + } + + public List getPoststop() { + return poststop; + } + + public OCIHooksConfig(List prestart, List poststart, List poststop) { + this.prestart = prestart; + this.poststart = poststart; + this.poststop = poststop; + } + + public OCIHooksConfig() { + this(null, null, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class HookType { + final private String path; + final private List args; + final private List env; + final private int timeout; + + public String getPath() { + return path; + } + + public List getArgs() { + return args; + } + + public List getEnv() { + return env; + } + + public int getTimeout() { + return timeout; + } + + public HookType(String path, List args, List env, int timeout) { + this.path = path; + this.args = args; + this.env = env; + this.timeout = timeout; + } + + public HookType() { + this(null, null, null, 0); + } + + } + } + + static class OCIAnnotationsConfig { + Map annotations; + + public OCIAnnotationsConfig(Map annotations) { + this.annotations = annotations; + } + public OCIAnnotationsConfig() { + this(null); + } + + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class OCILinuxConfig { + final private List namespaces; + final private List uidMappings; + final private List gidMappings; + final private List devices; + final private String cgroupsPath; + final private Resources resources; + final private IntelRdt intelRdt; + final private Sysctl sysctl; + @JsonRawValue + final private String seccomp; + final private String rootfsPropagation; + final private List maskedPaths; + final private List readonlyPaths; + final private String mountLabel; + + public List getNamespaces() { + return namespaces; + } + + public List getUidMappings() { + return uidMappings; + } + + public List getGidMappings() { + return gidMappings; + } + + public List getDevices() { + return devices; + } + + public String getCgroupsPath() { + return cgroupsPath; + } + + public Resources getResources() { + return resources; + } + + public IntelRdt getIntelRdt() { + return intelRdt; + } + + public Sysctl getSysctl() { + return sysctl; + } + + public String getSeccomp() { + return seccomp; + } + + public String getRootfsPropagation() { + return rootfsPropagation; + } + + public List getMaskedPaths() { + return maskedPaths; + } + + public List getReadonlyPaths() { + return readonlyPaths; + } + + public String getMountLabel() { + return mountLabel; + } + + public OCILinuxConfig(List namespaces, List uidMappings, + List gidMappings, List devices, + String cgroupsPath, Resources resources, IntelRdt intelRdt, + Sysctl sysctl, String seccomp, String rootfsPropagation, + List maskedPaths, List readonlyPaths, + String mountLabel) { + this.namespaces = namespaces; + this.uidMappings = uidMappings; + this.gidMappings = gidMappings; + this.devices = devices; + this.cgroupsPath = cgroupsPath; + this.resources = resources; + this.intelRdt = intelRdt; + this.sysctl = sysctl; + this.seccomp = seccomp; + this.rootfsPropagation = rootfsPropagation; + this.maskedPaths = maskedPaths; + this.readonlyPaths = readonlyPaths; + this.mountLabel = mountLabel; + } + + public OCILinuxConfig() { + this(null, null, null, null, null, null, null, null, null, null, null, null, null); + } + + static class Namespace { + final private String type; + final private String path; + + public Namespace(String type, String path) { + this.type = type; + this.path = path; + } + + public Namespace() { + this(null, null); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class IDMapping { + final private int containerID; + final private int hostID; + final private int size; + + public int getContainerID() { + return containerID; + } + + public int getHostID() { + return hostID; + } + + public int getSize() { + return size; + } + + public IDMapping(int containerID, int hostID, int size) { + this.containerID = containerID; + this.hostID = hostID; + this.size = size; + } + public IDMapping() { + this(0, 0, 0); + } + + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Device { + final private String type; + final private String path; + final private long major; + final private long minor; + final private int fileMode; + final private int uid; + final private int gid; + + public String getType() { + return type; + } + + public String getPath() { + return path; + } + + public long getMajor() { + return major; + } + + public long getMinor() { + return minor; + } + + public int getFileMode() { + return fileMode; + } + + public int getUid() { + return uid; + } + + public int getGid() { + return gid; + } + + public Device(String type, String path, long major, long minor, + int fileMode, int uid, int gid) { + this.type = type; + this.path = path; + this.major = major; + this.minor = minor; + this.fileMode = fileMode; + this.uid = uid; + this.gid = gid; + } + + public Device() { + this(null, null, 0, 0, 0, 0, 0); + } + + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Resources { + final private List device; + final private Memory memory; + final private CPU cpu; + final private BlockIO blockIO; + final private List hugePageLimits; + final private Network network; + final private PID pid; + final private RDMA rdma; + + public List getDevice() { + return device; + } + + public Memory getMemory() { + return memory; + } + + public CPU getCPU() { + return cpu; + } + + public BlockIO getBlockIO() { + return blockIO; + } + + public List getHugePageLimits() { + return hugePageLimits; + } + + public Network getNetwork() { + return network; + } + + public PID getPID() { + return pid; + } + + public RDMA getRDMA() { + return rdma; + } + + public Resources(List device, + Memory memory, CPU cpu, + BlockIO blockIO, List hugePageLimits, + Network network, PID pid, + RDMA rdma) { + this.device = device; + this.memory = memory; + this.cpu = cpu; + this.blockIO = blockIO; + this.hugePageLimits = hugePageLimits; + this.network = network; + this.pid = pid; + this.rdma = rdma; + } + + public Resources() { + this(null, null, null, null, null, null, null, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Device { + final private boolean allow; + final private String type; + final private long major; + final private long minor; + final private String access; + + public boolean isAllow() { + return allow; + } + + public String getType() { + return type; + } + + public long getMajor() { + return major; + } + + public long getMinor() { + return minor; + } + + public String getAccess() { + return access; + } + + public Device(boolean allow, String type, long major, long minor, String access) { + this.allow = allow; + this.type = type; + this.major = major; + this.minor = minor; + this.access = access; + } + + public Device() { + this(false, null, 0, 0, null); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Memory { + final private long limit; + final private long reservation; + final private long swap; + final private long kernel; + final private long kernelTCP; + final private long swappiness; + final private boolean disableOOMKiller; + + public long getLimit() { + return limit; + } + + public long getReservation() { + return reservation; + } + + public long getSwap() { + return swap; + } + + public long getKernel() { + return kernel; + } + + public long getKernelTCP() { + return kernelTCP; + } + + public long getSwappiness() { + return swappiness; + } + + public boolean isDisableOOMKiller() { + return disableOOMKiller; + } + + public Memory(long limit, long reservation, long swap, + long kernel, long kernelTCP, long swappiness, + boolean disableOOMKiller) { + this.limit = limit; + this.reservation = reservation; + this.swap = swap; + this.kernel = kernel; + this.kernelTCP = kernelTCP; + this.swappiness = swappiness; + this.disableOOMKiller = disableOOMKiller; + } + + public Memory() { + this(0, 0, 0, 0, 0, 0, false); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class CPU { + final private long quota; + final private long period; + final private long realtimeRuntime; + final private long realtimePeriod; + final private String cpus; + final private String mems; + + public long getShares() { + return shares; + } + + public long getQuota() { + return quota; + } + + public long getPeriod() { + return period; + } + + public long getRealtimeRuntime() { + return realtimeRuntime; + } + + public long getRealtimePeriod() { + return realtimePeriod; + } + + public String getCpus() { + return cpus; + } + + public String getMems() { + return mems; + } + + final private long shares; + + public CPU(long shares, long quota, long period, + long realtimeRuntime, long realtimePeriod, + String cpus, String mems) { + this.shares = shares; + this.quota = quota; + this.period = period; + this.realtimeRuntime = realtimeRuntime; + this.realtimePeriod = realtimePeriod; + this.cpus = cpus; + this.mems = mems; + } + + public CPU() { + this(0, 0, 0, 0, 0, null, null); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class BlockIO { + final private int weight; + final private int leafWeight; + final private List weightDevices; + final private List throttleReadBpsDevice; + final private List throttleWriteBpsDevice; + final private List throttleReadIOPSDevice; + final private List throttleWriteIOPSDevice; + + public int getWeight() { + return weight; + } + + public int getLeafWeight() { + return leafWeight; + } + + public List getWeightDevices() { + return weightDevices; + } + + public List getThrottleReadBpsDevice() { + return throttleReadBpsDevice; + } + + public List getThrottleWriteBpsDevice() { + return throttleWriteBpsDevice; + } + + public List getThrottleReadIOPSDevice() { + return throttleReadIOPSDevice; + } + + public List getThrottleWriteIOPSDevice() { + return throttleWriteIOPSDevice; + } + + public BlockIO(int weight, int leafWeight, List weightDevices, + List throttleReadBpsDevice, + List throttleWriteBpsDevice, + List throttleReadIOPSDevice, + List throttleWriteIOPSDevice) { + this.weight = weight; + this.leafWeight = leafWeight; + this.weightDevices = weightDevices; + this.throttleReadBpsDevice = throttleReadBpsDevice; + this.throttleWriteBpsDevice = throttleWriteBpsDevice; + this.throttleReadIOPSDevice = throttleReadIOPSDevice; + this.throttleWriteIOPSDevice = throttleWriteIOPSDevice; + } + + public BlockIO() { + this(0, 0, null, null, null, null, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class WeightDevice { + final private long major; + final private long minor; + final private int weight; + final private int leafWeight; + + public long getMajor() { + return major; + } + + public long getMinor() { + return minor; + } + + public int getWeight() { + return weight; + } + + public int getLeafWeight() { + return leafWeight; + } + + public WeightDevice(long major, long minor, int weight, int leafWeight) { + this.major = major; + this.minor = minor; + this.weight = weight; + this.leafWeight = leafWeight; + } + + public WeightDevice() { + this(0, 0, 0, 0); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class ThrottleDevice { + final private long major; + final private long minor; + final private long rate; + + public long getMajor() { + return major; + } + + public long getMinor() { + return minor; + } + + public long getRate() { + return rate; + } + + public ThrottleDevice(long major, long minor, long rate) { + this.major = major; + this.minor = minor; + this.rate = rate; + } + + public ThrottleDevice() { + this(0, 0, 0); + } + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class HugePageLimits { + final private String pageSize; + final private long limit; + + public String getPageSize() { + return pageSize; + } + + public long getLimit() { + return limit; + } + + public HugePageLimits(String pageSize, long limit) { + this.pageSize = pageSize; + this.limit = limit; + } + + public HugePageLimits() { + this(null, 0); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Network { + final private int classID; + final private List priorities; + + public int getClassID() { + return classID; + } + + public List getPriorities() { + return priorities; + } + + public Network(int classID, List priorities) { + this.classID = classID; + this.priorities = priorities; + } + + public Network() { + this(0, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class NetworkPriority { + final private String name; + final private int priority; + + public String getName() { + return name; + } + + public int getPriority() { + return priority; + } + + public NetworkPriority(String name, int priority) { + this.name = name; + this.priority = priority; + } + + public NetworkPriority() { + this(null, 0); + } + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class PID { + final private long limit; + + public long getLimit() { + return limit; + } + + public PID(long limit) { + this.limit = limit; + } + + public PID() { + this(0); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class RDMA { + final private int hcaHandles; + final private int hcaObjects; + + public int getHcaHandles() { + return hcaHandles; + } + + public int getHcaObjects() { + return hcaObjects; + } + + public RDMA(int hcaHandles, int hcaObjects) { + this.hcaHandles = hcaHandles; + this.hcaObjects = hcaObjects; + } + + public RDMA() { + this(0, 0); + } + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class IntelRdt { + final private String closID; + final private String l3CacheSchema; + final private String memBwSchema; + + public String getClosID() { + return closID; + } + + public String getL3CacheSchema() { + return l3CacheSchema; + } + + public String getMemBwSchema() { + return memBwSchema; + } + + public IntelRdt(String closID, String l3CacheSchema, String memBwSchema) { + this.closID = closID; + this.l3CacheSchema = l3CacheSchema; + this.memBwSchema = memBwSchema; + } + + public IntelRdt() { + this(null, null, null); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Sysctl { + // for kernel params + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Seccomp { + final private String defaultAction; + final private List architectures; + final private List syscalls; + + public String getDefaultAction() { + return defaultAction; + } + + public List getArchitectures() { + return architectures; + } + + public List getSyscalls() { + return syscalls; + } + + public Seccomp(String defaultAction, List architectures, List syscalls) { + this.defaultAction = defaultAction; + this.architectures = architectures; + this.syscalls = syscalls; + } + + public Seccomp() { + this(null, null, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Syscall { + final private List names; + final private String action; + final private List args; + + public List getNames() { + return names; + } + + public String getAction() { + return action; + } + + public List getArgs() { + return args; + } + + public Syscall(List names, String action, List args) { + this.names = names; + this.action = action; + this.args = args; + } + + public Syscall() { + this(null, null, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class SeccompArg { + final private int index; + final private long value; + final private long valueTwo; + final private String op; + + public int getIndex() { + return index; + } + + public long getValue() { + return value; + } + + public long getValueTwo() { + return valueTwo; + } + + public String getOp() { + return op; + } + + public SeccompArg(int index, long value, long valueTwo, String op) { + this.index = index; + this.value = value; + this.valueTwo = valueTwo; + this.op = op; + } + + public SeccompArg() { + this(0, 0, 0, null); + } + } + } + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerRuntime.java index 45105f7ad17..530f66e4d01 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerRuntime.java @@ -53,6 +53,7 @@ import java.util.regex.Pattern; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime.isDockerContainerRequested; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.RuncContainerRuntime.isRuncContainerRequested; /** *

    This class is a {@link ContainerRuntime} implementation that uses the @@ -117,7 +118,8 @@ public void initialize(Configuration conf, Context nmContext) public static boolean isOCICompliantContainerRequested( Configuration daemonConf, Map env) { - return isDockerContainerRequested(daemonConf, env); + return isDockerContainerRequested(daemonConf, env) || + isRuncContainerRequested(daemonConf, env); } @VisibleForTesting diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java new file mode 100644 index 00000000000..5797b0bf775 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java @@ -0,0 +1,839 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; +import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCIRuntimeConfig; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCILayer; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCIRuntimeConfig.OCILinuxConfig; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCIRuntimeConfig.OCIMount; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCIRuntimeConfig.OCIProcessConfig; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.volume.csi.ContainerVolumePublisher; +import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext; +import org.codehaus.jackson.JsonNode; +import org.codehaus.jackson.map.ObjectMapper; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_LAYER_MOUNTS_TO_KEEP; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_REAP_RUNC_LAYER_MOUNTS_INTERVAL; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_NM_RUNC_MANIFEST_TO_RESOURCES_PLUGIN; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_RUNC_LAYER_MOUNTS_TO_KEEP; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_RUNC_MANIFEST_TO_RESOURCES_PLUGIN; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_REAP_RUNC_LAYER_MOUNTS_INTERVAL; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; +/** + *

    This class is an extension of {@link OCIContainerRuntime} that uses the + * native {@code container-executor} binary via a + * {@link PrivilegedOperationExecutor} instance to launch processes inside + * Runc containers.

    + * + *

    The following environment variables are used to configure the Runc + * engine:

    + * + *
      + *
    • + * {@code YARN_CONTAINER_RUNTIME_RUNC_IMAGE} names which image + * will be used to launch the Runc container. + *
    • + *
    • + * {@code YARN_CONTAINER_RUNTIME_RUNC_CONTAINER_PID_NAMESPACE} + * controls which PID namespace will be used by the Runc container. By + * default, each Runc container has its own PID namespace. To share the + * namespace of the host, the + * {@code yarn.nodemanager.runtime.linux.runc.host-pid-namespace.allowed} + * property must be set to {@code true}. If the host PID namespace is + * allowed and this environment variable is set to {@code host}, the + * Runc container will share the host's PID namespace. No other value is + * allowed. + *
    • + *
    • + * {@code YARN_CONTAINER_RUNTIME_RUNC_CONTAINER_HOSTNAME} sets the + * hostname to be used by the Runc container. If not specified, a + * hostname will be derived from the container ID and set as default + * hostname for networks other than 'host'. + *
    • + *
    • + * {@code YARN_CONTAINER_RUNTIME_RUNC_RUN_PRIVILEGED_CONTAINER} + * controls whether the Runc container is a privileged container. In order + * to use privileged containers, the + * {@code yarn.nodemanager.runtime.linux.runc.privileged-containers.allowed} + * property must be set to {@code true}, and the application owner must + * appear in the value of the + * {@code yarn.nodemanager.runtime.linux.runc.privileged-containers.acl} + * property. If this environment variable is set to {@code true}, a + * privileged Runc container will be used if allowed. No other value is + * allowed, so the environment variable should be left unset rather than + * setting it to false. + *
    • + *
    + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class RuncContainerRuntime extends OCIContainerRuntime { + + private static final Log LOG = LogFactory.getLog( + RuncContainerRuntime.class); + + @InterfaceAudience.Private + private static final String RUNTIME_TYPE = "RUNC"; + + @InterfaceAudience.Private + public static final String ENV_RUNC_CONTAINER_IMAGE = + "YARN_CONTAINER_RUNTIME_RUNC_IMAGE"; + @InterfaceAudience.Private + public static final String ENV_RUNC_CONTAINER_HOSTNAME = + "YARN_CONTAINER_RUNTIME_RUNC_CONTAINER_HOSTNAME"; + + @InterfaceAudience.Private + public final static String ENV_RUNC_CONTAINER_PID_NAMESPACE = + formatOciEnvKey(RUNTIME_TYPE, CONTAINER_PID_NAMESPACE_SUFFIX); + @InterfaceAudience.Private + public final static String ENV_RUNC_CONTAINER_RUN_PRIVILEGED_CONTAINER = + formatOciEnvKey(RUNTIME_TYPE, RUN_PRIVILEGED_CONTAINER_SUFFIX); + + private Configuration conf; + private Context nmContext; + private PrivilegedOperationExecutor privilegedOperationExecutor; + private CGroupsHandler cGroupsHandler; + private RuncImageTagToManifestPlugin imageTagToManifestPlugin; + private RuncManifestToResourcesPlugin manifestToResourcesPlugin; + private ObjectMapper mapper; + private String seccomp; + private int layersToKeep; + private String defaultRuncImage; + private ScheduledExecutorService exec; + private String seccompProfile; + private Set defaultROMounts = new HashSet<>(); + private Set defaultRWMounts = new HashSet<>(); + private Set allowedNetworks = new HashSet<>(); + private Set allowedRuntimes = new HashSet<>(); + private AccessControlList privilegedContainersAcl; + + public RuncContainerRuntime(PrivilegedOperationExecutor + privilegedOperationExecutor) { + this(privilegedOperationExecutor, ResourceHandlerModule + .getCGroupsHandler()); + } + + //A constructor with an injected cGroupsHandler primarily used for testing. + @VisibleForTesting + public RuncContainerRuntime(PrivilegedOperationExecutor + privilegedOperationExecutor, CGroupsHandler cGroupsHandler) { + super(privilegedOperationExecutor, cGroupsHandler); + this.privilegedOperationExecutor = privilegedOperationExecutor; + + if (cGroupsHandler == null) { + LOG.info("cGroupsHandler is null - cgroups not in use."); + } else { + this.cGroupsHandler = cGroupsHandler; + } + } + + @Override + public void initialize(Configuration conf, Context nmContext) + throws ContainerExecutionException { + super.initialize(conf, nmContext); + this.conf = conf; + this.nmContext = nmContext; + imageTagToManifestPlugin = chooseImageTagToManifestPlugin(); + imageTagToManifestPlugin.init(conf); + manifestToResourcesPlugin = chooseManifestToResourcesPlugin(); + manifestToResourcesPlugin.init(conf); + mapper = new ObjectMapper(); + defaultRuncImage = conf.get(YarnConfiguration.NM_RUNC_IMAGE_NAME); + + allowedNetworks.clear(); + allowedRuntimes.clear(); + + allowedNetworks.addAll(Arrays.asList( + conf.getTrimmedStrings( + YarnConfiguration.NM_RUNC_ALLOWED_CONTAINER_NETWORKS, + YarnConfiguration.DEFAULT_NM_RUNC_ALLOWED_CONTAINER_NETWORKS))); + + allowedRuntimes.addAll(Arrays.asList( + conf.getTrimmedStrings( + YarnConfiguration.NM_RUNC_ALLOWED_CONTAINER_RUNTIMES, + YarnConfiguration.DEFAULT_NM_RUNC_ALLOWED_CONTAINER_RUNTIMES))); + + privilegedContainersAcl = new AccessControlList(conf.getTrimmed( + YarnConfiguration.NM_RUNC_PRIVILEGED_CONTAINERS_ACL, + YarnConfiguration.DEFAULT_NM_RUNC_PRIVILEGED_CONTAINERS_ACL)); + + seccompProfile = conf.get(YarnConfiguration.NM_RUNC_SECCOMP_PROFILE); + + defaultROMounts.addAll(Arrays.asList( + conf.getTrimmedStrings( + YarnConfiguration.NM_RUNC_DEFAULT_RO_MOUNTS))); + + defaultRWMounts.addAll(Arrays.asList( + conf.getTrimmedStrings( + YarnConfiguration.NM_RUNC_DEFAULT_RW_MOUNTS))); + + try { + //TODO Remove whitespace in seccomp that gets output to config.json + if (seccompProfile != null) { + seccomp = new String(Files.readAllBytes(Paths.get(seccompProfile))); + } + } catch (IOException ioe) { + throw new ContainerExecutionException(ioe); + } + + layersToKeep = conf.getInt(NM_RUNC_LAYER_MOUNTS_TO_KEEP, + DEFAULT_NM_RUNC_LAYER_MOUNTS_TO_KEEP); + + } + + @Override + public void start() { + int reapRuncLayerMountsInterval = + conf.getInt(NM_REAP_RUNC_LAYER_MOUNTS_INTERVAL, + DEFAULT_NM_REAP_RUNC_LAYER_MOUNTS_INTERVAL); + exec = HadoopExecutors.newScheduledThreadPool(1); + exec.scheduleAtFixedRate(new Runnable() { + @Override + public void run() { + try { + PrivilegedOperation launchOp = new PrivilegedOperation( + PrivilegedOperation.OperationType.REAP_RUNC_LAYER_MOUNTS); + launchOp.appendArgs(Integer.toString(layersToKeep)); + try { + String stdout = privilegedOperationExecutor + .executePrivilegedOperation(null, + launchOp, null, null, false, false); + if(stdout != null) { + LOG.info("Reap layer mounts thread: " + stdout); + } + } catch (PrivilegedOperationException e) { + LOG.warn("Failed to reap old runc layer mounts", e); + } + } catch (Exception e) { + LOG.warn("Reap layer mount thread caught an exception: ", e); + } + } + }, 0, reapRuncLayerMountsInterval, TimeUnit.SECONDS); + imageTagToManifestPlugin.start(); + manifestToResourcesPlugin.start(); + } + + @Override + public void stop() { + exec.shutdownNow(); + imageTagToManifestPlugin.stop(); + manifestToResourcesPlugin.stop(); + } + + @Override + public void launchContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + List env = new ArrayList<>(); + Container container = ctx.getContainer(); + String user = container.getUser(); + ContainerId containerId = container.getContainerId(); + ApplicationId appId = containerId.getApplicationAttemptId() + .getApplicationId(); + + Map environment = container.getLaunchContext() + .getEnvironment(); + ArrayList mounts = new ArrayList<>(); + ArrayList layers = new ArrayList<>(); + String hostname = environment.get(ENV_RUNC_CONTAINER_HOSTNAME); + + validateHostname(hostname); + + String containerIdStr = containerId.toString(); + String applicationId = appId.toString(); + Path containerWorkDir = ctx.getExecutionAttribute(CONTAINER_WORK_DIR); + + ContainerRuntimeObject containerRuntimeObject = + (ContainerRuntimeObject) container.getContainerRuntimeData(); + List layerResources = containerRuntimeObject.getOCILayers(); + + ResourceLocalizationService localizationService = + nmContext.getContainerManager().getResourceLocalizationService(); + + List args = new ArrayList<>(); + + try { + try { + LocalResource rsrc = containerRuntimeObject.getConfig(); + LocalResourceRequest req = new LocalResourceRequest(rsrc); + LocalizedResource localRsrc = localizationService + .getLocalizedResource(req, user, appId); + if (localRsrc == null) { + throw new ContainerExecutionException("Could not successfully " + + "localize layers. rsrc: " + rsrc.getResource().getFile()); + } + + File file = new File(localRsrc.getLocalPath().toString()); + List imageEnv = extractImageEnv(file); + if (imageEnv != null && !imageEnv.isEmpty()) { + env.addAll(imageEnv); + } + List entrypoint = extractImageEntrypoint(file); + if (entrypoint != null && !entrypoint.isEmpty()) { + args.addAll(entrypoint); + } + } catch (IOException ioe) { + throw new ContainerExecutionException(ioe); + } + + for (LocalResource rsrc : layerResources) { + LocalResourceRequest req = new LocalResourceRequest(rsrc); + LocalizedResource localRsrc = localizationService + .getLocalizedResource(req, user, appId); + + OCILayer layer = new OCILayer("application/vnd.squashfs", + localRsrc.getLocalPath().toString()); + layers.add(layer); + } + } catch (URISyntaxException e) { + throw new ContainerExecutionException(e); + } + + setContainerMounts(mounts, ctx, containerWorkDir); + + String resourcesOpts = ctx.getExecutionAttribute(RESOURCES_OPTIONS); + + Path nmPrivateContainerScriptPath = ctx.getExecutionAttribute( + NM_PRIVATE_CONTAINER_SCRIPT_PATH); + + Path nmPrivateTokensPath = + ctx.getExecutionAttribute(NM_PRIVATE_TOKENS_PATH); + + int cpuShares = container.getResource().getVirtualCores(); + + // Zero sets to default of 1024. 2 is the minimum value otherwise + if (cpuShares < 2) { + cpuShares = 2; + } + + Path launchDst = + new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT); + + args.add("bash"); + args.add(launchDst.toUri().getPath()); + + String cgroupPath = getCgroupPath(resourcesOpts, "runc-" + containerIdStr); + + String pidFile = ctx.getExecutionAttribute(PID_FILE_PATH).toString(); + + @SuppressWarnings("unchecked") + List localDirs = ctx.getExecutionAttribute(LOCAL_DIRS); + @SuppressWarnings("unchecked") + List logDirs = ctx.getExecutionAttribute(LOG_DIRS); + + Path keystorePath = ctx.getExecutionAttribute(NM_PRIVATE_KEYSTORE_PATH); + Path truststorePath = ctx.getExecutionAttribute(NM_PRIVATE_TRUSTSTORE_PATH); + + int https = 0; + String keystore = null; + String truststore = null; + + if (keystorePath != null && truststorePath != null) { + https = 1; + keystore = keystorePath.toUri().getPath(); + truststore = truststorePath.toUri().getPath(); + } + + OCIProcessConfig processConfig = createOCIProcessConfig( + containerWorkDir.toString(), env, args); + OCILinuxConfig linuxConfig = createOCILinuxConfig(cpuShares, + cgroupPath, seccomp); + + OCIRuntimeConfig ociRuntimeConfig = new OCIRuntimeConfig(null, mounts, + processConfig, hostname, null, null, linuxConfig); + + OCIContainerExecutorConfig ociContainerExecutorConfig = + createOCIContainerExecutorConfig(user, containerIdStr, applicationId, + pidFile, nmPrivateContainerScriptPath.toString(), + nmPrivateTokensPath.toString(), https, keystore, truststore, + localDirs, logDirs, layers, + ociRuntimeConfig); + + String commandFile = writeCommandToFile(mapper, + ociContainerExecutorConfig, container, nmContext); + PrivilegedOperation launchOp = new PrivilegedOperation( + PrivilegedOperation.OperationType.RUN_RUNC_CONTAINER); + + launchOp.appendArgs(commandFile); + + try { + privilegedOperationExecutor.executePrivilegedOperation(null, + launchOp, null, null, false, false); + } catch (PrivilegedOperationException e) { + LOG.info("Launch container failed: ", e); + try { + LOG.debug("config.json used: " + + mapper.writeValueAsString(ociContainerExecutorConfig)); + } catch (IOException ioe) { + LOG.info("Json Generation Exception", ioe); + } + + throw new ContainerExecutionException("Launch container failed", e + .getExitCode(), e.getOutput(), e.getErrorOutput()); + } + } + + private String getCgroupPath(String resourcesOptions, String containerIdStr) { + if (cGroupsHandler == null) { + LOG.debug("cGroupsHandler is null. cgroups are not in use. nothing to" + + " do."); + return null; + } + + if (resourcesOptions.equals( + (PrivilegedOperation.CGROUP_ARG_PREFIX + PrivilegedOperation + .CGROUP_ARG_NO_TASKS))) { + LOG.debug("no resource restrictions specified. not using runc's " + + "cgroup options"); + } else { + LOG.debug("using runc's cgroups options"); + + String cGroupPath = "/" + cGroupsHandler.getRelativePathForCGroup( + containerIdStr); + + LOG.debug("using cgroup parent: " + cGroupPath); + + return cGroupPath; + } + return null; + } + + private void addDefaultMountLocation(List mounts, + Set defaultMounts, boolean createSource, boolean isReadWrite) + throws ContainerExecutionException { + if(defaultMounts != null && !defaultMounts.isEmpty()) { + for (String mount : defaultMounts) { + String[] dir = StringUtils.split(mount, ':'); + if (dir.length != 2) { + throw new ContainerExecutionException("Invalid mount : " + + mount); + } + String src = dir[0]; + String dst = dir[1]; + addRuncMountLocation(mounts, src, dst, createSource, isReadWrite); + } + } + } + + private void addRuncMountLocation(List mounts, String srcPath, + String dstPath, boolean createSource, boolean isReadWrite) { + if (!createSource) { + boolean sourceExists = new File(srcPath).exists(); + if (!sourceExists) { + return; + } + } + + ArrayList options = new ArrayList<>(); + if (isReadWrite) { + options.add("rw"); + } else { + options.add("ro"); + } + options.add("rbind"); + options.add("rprivate"); + mounts.add(new OCIMount(dstPath, "bind", srcPath, options)); + } + + private void addAllRuncMountLocations(List mounts, + List paths, boolean createSource, boolean isReadWrite) { + for (String dir: paths) { + this.addRuncMountLocation(mounts, dir, dir, createSource, isReadWrite); + } + } + + public Map getLocalResources( + Container container) throws IOException { + Map containerLocalRsrc = + container.getLaunchContext().getLocalResources(); + long layerCount = 0; + Map environment = + container.getLaunchContext().getEnvironment(); + String imageName = environment.get(ENV_RUNC_CONTAINER_IMAGE); + if (imageName == null || imageName.isEmpty()) { + environment.put(ENV_RUNC_CONTAINER_IMAGE, + defaultRuncImage); + imageName = defaultRuncImage; + } + + ImageManifest manifest = + imageTagToManifestPlugin.getManifestFromImageTag(imageName); + LocalResource config = + manifestToResourcesPlugin.getConfigResource(manifest); + List layers = + manifestToResourcesPlugin.getLayerResources(manifest); + + ContainerRuntimeObject containerRuntimeObject = + new ContainerRuntimeObject(config, layers); + container.setContainerRuntimeData(containerRuntimeObject); + + for (LocalResource localRsrc : layers) { + while(containerLocalRsrc.putIfAbsent("runc-layer" + + Long.toString(layerCount++), localRsrc) != null); + } + + while(containerLocalRsrc.putIfAbsent("runc-config" + + Long.toString(layerCount++), config) != null); + + return containerLocalRsrc; + } + + protected RuncImageTagToManifestPlugin chooseImageTagToManifestPlugin() + throws ContainerExecutionException { + String pluginName = + conf.get(NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN, + DEFAULT_NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN); + RuncImageTagToManifestPlugin runcImageTagToManifestPlugin; + try { + Class clazz = Class.forName(pluginName); + runcImageTagToManifestPlugin = + (RuncImageTagToManifestPlugin) clazz.newInstance(); + } catch (Exception e) { + throw new ContainerExecutionException(e); + } + return runcImageTagToManifestPlugin; + } + + protected RuncManifestToResourcesPlugin chooseManifestToResourcesPlugin() + throws ContainerExecutionException { + String pluginName = + conf.get(NM_RUNC_MANIFEST_TO_RESOURCES_PLUGIN, + DEFAULT_NM_RUNC_MANIFEST_TO_RESOURCES_PLUGIN); + LOG.info("pluginName = " + pluginName); + RuncManifestToResourcesPlugin runcManifestToResourcesPlugin; + try { + Class clazz = Class.forName(pluginName); + runcManifestToResourcesPlugin = + (RuncManifestToResourcesPlugin) clazz.newInstance(); + } catch (Exception e) { + throw new ContainerExecutionException(e); + } + return runcManifestToResourcesPlugin; + } + + @SuppressWarnings("unchecked") + protected List extractImageEnv(File config) throws IOException { + JsonNode node = mapper.readTree(config); + JsonNode envNode = node.path("config").path("Env"); + if (envNode.isMissingNode()) { + return null; + } + return mapper.readValue(envNode, List.class); + } + + @SuppressWarnings("unchecked") + protected List extractImageEntrypoint(File config) throws IOException { + JsonNode node = mapper.readTree(config); + JsonNode entrypointNode = node.path("config").path("Entrypoint"); + if (entrypointNode.isMissingNode()) { + return null; + } + return mapper.readValue(entrypointNode, List.class); + } + + private OCIContainerExecutorConfig createOCIContainerExecutorConfig( + String username, String containerId, String applicationId, String pidFile, + String containerScriptPath, String containerCredentialsPath, + int https, String keystorePath, String truststorePath, + List localDirs, List logDirs, + List layers, OCIRuntimeConfig ociRuntimeConfig) { + + return new OCIContainerExecutorConfig(username, containerId, applicationId, + pidFile, containerScriptPath, containerCredentialsPath, + https, keystorePath, truststorePath, + localDirs, logDirs, layers, layersToKeep, ociRuntimeConfig); + } + + private OCIProcessConfig createOCIProcessConfig(String cwd, + List env, List args) { + return new OCIProcessConfig(false, null, cwd, env, + args, null, null, null, false, 0, null, null); + } + + private OCILinuxConfig createOCILinuxConfig(long cpuShares, + String cgroupsPath, String seccomp) { + OCILinuxConfig.Resources.CPU cgroupCPU = + new OCILinuxConfig.Resources.CPU(cpuShares, 0, 0, 0, 0, + null, null); + OCILinuxConfig.Resources cgroupResources = + new OCILinuxConfig.Resources(null, null, cgroupCPU, null, null, null, + null, null); + + return new OCILinuxConfig(null, null, null, null, + cgroupsPath, cgroupResources, null, null, seccomp, null, null, + null, null); + } + + private void setContainerMounts(ArrayList mounts, + ContainerRuntimeContext ctx, Path containerWorkDir) + throws ContainerExecutionException { + @SuppressWarnings("unchecked") + List filecacheDirs = ctx.getExecutionAttribute(FILECACHE_DIRS); + @SuppressWarnings("unchecked") + List containerLogDirs = ctx.getExecutionAttribute( + CONTAINER_LOG_DIRS); + @SuppressWarnings("unchecked") + List userFilecacheDirs = + ctx.getExecutionAttribute(USER_FILECACHE_DIRS); + @SuppressWarnings("unchecked") + List applicationLocalDirs = + ctx.getExecutionAttribute(APPLICATION_LOCAL_DIRS); + + addRuncMountLocation(mounts, containerWorkDir.toString() + + "/private_slash_tmp", "/tmp", true, true); + addRuncMountLocation(mounts, containerWorkDir.toString() + + "/private_var_slash_tmp", "/var/tmp", true, true); + + addAllRuncMountLocations(mounts, containerLogDirs, true, true); + addAllRuncMountLocations(mounts, applicationLocalDirs, true, true); + addAllRuncMountLocations(mounts, filecacheDirs, false, false); + addAllRuncMountLocations(mounts, userFilecacheDirs, false, false); + addDefaultMountLocation(mounts, defaultROMounts, false, false); + addDefaultMountLocation(mounts, defaultRWMounts, false, true); + } + + public String writeCommandToFile(ObjectMapper mapper, + OCIContainerExecutorConfig ociContainerExecutorConfig, + Container container, Context nmContext) + throws ContainerExecutionException { + ContainerId containerId = container.getContainerId(); + String filePrefix = containerId.toString(); + ApplicationId appId = containerId.getApplicationAttemptId() + .getApplicationId(); + File commandFile; + try { + File cmdDir = null; + + if(nmContext != null && nmContext.getLocalDirsHandler() != null) { + String cmdDirStr = nmContext.getLocalDirsHandler().getLocalPathForWrite( + ResourceLocalizationService.NM_PRIVATE_DIR + Path.SEPARATOR + + appId + Path.SEPARATOR + filePrefix + Path.SEPARATOR).toString(); + cmdDir = new File(cmdDirStr); + if (!cmdDir.mkdirs() && !cmdDir.exists()) { + throw new IOException("Cannot create container private directory " + + cmdDir); + } + } + commandFile = new File(cmdDir + "/runc-config.json"); + + try { + mapper.writeValue(commandFile, ociContainerExecutorConfig); + } catch (IOException ioe) { + throw new ContainerExecutionException(ioe); + } + + return commandFile.getAbsolutePath(); + } catch (IOException e) { + LOG.warn("Unable to write runc config.json to temporary file!"); + throw new ContainerExecutionException(e); + } + } + + public String getExposedPorts(Container container) { + return null; + } + + public String[] getIpAndHost(Container container) { + return null; + } + + public IOStreamPair execContainer(ContainerExecContext ctx) + throws ContainerExecutionException { + return null; + } + + public void reapContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + } + + public void relaunchContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + } + + + /** + * Return whether the given environment variables indicate that the operation + * is requesting a Runc container. If the environment contains a key + * called {@code YARN_CONTAINER_RUNTIME_TYPE} whose value is {@code runc}, + * this method will return true. Otherwise it will return false. + * + * @param daemonConf the NodeManager daemon configuration + * @param env the environment variable settings for the operation + * @return whether a Runc container is requested + */ + public static boolean isRuncContainerRequested(Configuration daemonConf, + Map env) { + String type = (env == null) + ? null : env.get(ContainerRuntimeConstants.ENV_CONTAINER_TYPE); + if (type == null) { + type = daemonConf.get(YarnConfiguration.LINUX_CONTAINER_RUNTIME_TYPE); + } + return type != null && type.equals( + ContainerRuntimeConstants.CONTAINER_RUNTIME_RUNC); + } + + + @Override + public boolean isRuntimeRequested(Map env) { + return isRuncContainerRequested(conf, env); + } + + @Override + public void signalContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + ContainerExecutor.Signal signal = ctx.getExecutionAttribute(SIGNAL); + Container container = ctx.getContainer(); + + if (signal == ContainerExecutor.Signal.KILL || + signal == ContainerExecutor.Signal.TERM) { + ContainerVolumePublisher publisher = new ContainerVolumePublisher( + container, container.getCsiVolumesRootDir(), this); + try { + publisher.unpublishVolumes(); + } catch (YarnException | IOException e) { + throw new ContainerExecutionException(e); + } + } + + PrivilegedOperation signalOp = new PrivilegedOperation( + PrivilegedOperation.OperationType.SIGNAL_CONTAINER); + + signalOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER), + ctx.getExecutionAttribute(USER), + Integer.toString(PrivilegedOperation.RunAsUserCommand + .SIGNAL_CONTAINER.getValue()), + ctx.getExecutionAttribute(PID), + Integer.toString(signal.getValue())); + + //Some failures here are acceptable. Let the calling executor decide. + signalOp.disableFailureLogging(); + + try { + PrivilegedOperationExecutor executor = PrivilegedOperationExecutor + .getInstance(conf); + + executor.executePrivilegedOperation(null, + signalOp, null, null, false, false); + } catch (PrivilegedOperationException e) { + //Don't log the failure here. Some kinds of signaling failures are + // acceptable. Let the calling executor decide what to do. + throw new ContainerExecutionException("Signal container failed", e + .getExitCode(), e.getOutput(), e.getErrorOutput()); + } + } + + static class ContainerRuntimeObject { + final List layers; + final LocalResource config; + + public ContainerRuntimeObject(LocalResource config, + List layers) { + this.config = config; + this.layers = layers; + } + + public LocalResource getConfig() { + return this.config; + } + + public List getOCILayers() { + return this.layers; + } + } + + boolean getHostPidNamespaceEnabled() { + return conf.getBoolean( + YarnConfiguration.NM_RUNC_ALLOW_HOST_PID_NAMESPACE, + YarnConfiguration.DEFAULT_NM_RUNC_ALLOW_HOST_PID_NAMESPACE); + } + + boolean getPrivilegedContainersEnabledOnCluster() { + return conf.getBoolean( + YarnConfiguration.NM_RUNC_ALLOW_PRIVILEGED_CONTAINERS, + YarnConfiguration.DEFAULT_NM_RUNC_ALLOW_PRIVILEGED_CONTAINERS); + } + + Set getAllowedNetworks() { + return allowedNetworks; + } + + Set getAllowedRuntimes() { + return allowedRuntimes; + } + + AccessControlList getPrivilegedContainersAcl() { + return privilegedContainersAcl; + } + + String getEnvOciContainerPidNamespace() { + return ENV_RUNC_CONTAINER_PID_NAMESPACE; + } + + String getEnvOciContainerRunPrivilegedContainer() { + return ENV_RUNC_CONTAINER_RUN_PRIVILEGED_CONTAINER; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncImageTagToManifestPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncImageTagToManifestPlugin.java new file mode 100644 index 00000000000..c629d6b9362 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncImageTagToManifestPlugin.java @@ -0,0 +1,32 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.service.Service; + +import java.io.IOException; + +@InterfaceStability.Unstable +public interface RuncImageTagToManifestPlugin extends Service { + ImageManifest getManifestFromImageTag(String imageTag) throws IOException; + + String getHashFromImageTag(String imageTag); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncManifestToResourcesPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncManifestToResourcesPlugin.java new file mode 100644 index 00000000000..1a703bffba1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncManifestToResourcesPlugin.java @@ -0,0 +1,35 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.service.Service; +import org.apache.hadoop.yarn.api.records.LocalResource; + +import java.io.IOException; +import java.util.List; + +@InterfaceStability.Unstable +public interface RuncManifestToResourcesPlugin extends Service { + //The layers should be returned in the order in which they appear in the manifest + List getLayerResources(ImageManifest manifest) throws IOException; + + LocalResource getConfigResource(ImageManifest manifest) throws IOException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index 0494c2d96bb..f8a4b7e49f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -1673,4 +1673,13 @@ private boolean checkLocalDir(String localDir) { localDirPathFsPermissionsMap.put(sysDir, nmPrivatePermission); return localDirPathFsPermissionsMap; } + + public LocalizedResource getLocalizedResource(LocalResourceRequest req, String user, + ApplicationId appId) { + LocalResourcesTracker tracker = getLocalResourcesTracker(req.getVisibility(), user, appId); + if (tracker == null) { + return null; + } + return tracker.getLocalizedResource(req); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java index a8b295ddb32..83622257c3a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java @@ -34,4 +34,8 @@ @Private public static final String CONTAINER_RUNTIME_DOCKER = "docker"; + + @Private + public static final String CONTAINER_RUNTIME_RUNC = + "runc"; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java index 3fec9596e68..e70f6c5c2cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java @@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerRuntime; import org.apache.hadoop.yarn.server.volume.csi.CsiConstants; import org.apache.hadoop.yarn.server.volume.csi.VolumeMetaData; import org.apache.hadoop.yarn.server.volume.csi.exception.InvalidVolumeException; @@ -51,10 +52,10 @@ private final Container container; private final String localMountRoot; - private final DockerLinuxContainerRuntime runtime; + private final OCIContainerRuntime runtime; public ContainerVolumePublisher(Container container, String localMountRoot, - DockerLinuxContainerRuntime runtime) { + OCIContainerRuntime runtime) { LOG.info("Initiate container volume publisher, containerID={}," + " volume local mount rootDir={}", container.getContainerId().toString(), localMountRoot); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java index ae1f574c07f..ae1d0144814 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java @@ -717,6 +717,14 @@ public void testUpdateYarnSysFS() throws Exception { verify(lce, times(1)).updateYarnSysFS(ctx, user, appId, spec); } + @Test + public void testGetLocalResources() throws Exception { + Container container = mock(Container.class); + LinuxContainerExecutor lce = mock(LinuxContainerExecutor.class); + lce.getLocalResources(container); + verify(lce, times(1)).getLocalResources(container); + } + private static class TestResourceHandler implements LCEResourcesHandler { static Set postExecContainers = new HashSet(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java index 15c1cac9cb8..09c836e596f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java @@ -223,6 +223,7 @@ public void setup() throws IOException { nodeHealthChecker.init(conf); containerManager = createContainerManager(delSrvc); ((NMContext)context).setContainerManager(containerManager); + ((NMContext)context).setContainerExecutor(exec); nodeStatusUpdater.init(conf); containerManager.init(conf); nodeStatusUpdater.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java index e920105abf9..de20abf6682 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java @@ -794,6 +794,7 @@ public int getHttpPort() { .byteValue() })); context.getContainerTokenSecretManager().setMasterKey(masterKey); context.getNMTokenSecretManager().setMasterKey(masterKey); + context.setContainerExecutor(exec); return context; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java index ea3acca35e1..3972d9b4470 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java @@ -1316,6 +1316,7 @@ public boolean matches(LocalizationEvent e) { new HashMap<>(); private final Map eventToFinalState = new HashMap<>(); + final ContainerExecutor exec; WrappedContainer(int appId, long timestamp, int id, String user) throws IOException { @@ -1352,6 +1353,7 @@ protected void scheduleContainer(Container container) { container.sendLaunchEvent(); } }; + exec = mock(ContainerExecutor.class); dispatcher.register(LocalizationEventType.class, localizerBus); dispatcher.register(ContainersLauncherEventType.class, launcherBus); dispatcher.register(ContainersMonitorEventType.class, monitorBus); @@ -1412,6 +1414,9 @@ protected void scheduleContainer(Container container) { } when(ctxt.getLocalResources()).thenReturn(localResources); + when(exec.getLocalResources(any())).thenReturn(localResources); + when(context.getContainerExecutor()).thenReturn(exec); + if (withServiceData) { Random r = new Random(); long seed = r.nextLong(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java index 8ab9df6321b..c4eaf4450ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java @@ -30,6 +30,7 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; @@ -38,6 +39,7 @@ import org.apache.hadoop.yarn.security.TestDockerClientConfigHandler; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManager; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; @@ -49,6 +51,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.DockerCommandPlugin; import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePlugin; import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePluginManager; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; @@ -136,6 +140,7 @@ private Container container; private ContainerId cId; private ApplicationAttemptId appAttemptId; + private ApplicationId mockApplicationId; private ContainerLaunchContext context; private Context nmContext; private HashMap env; @@ -165,7 +170,6 @@ private final String whitelistedUser = "yoda"; private String[] testCapabilities; private final String signalPid = "1234"; - private String runtimeTypeUpper = "DOCKER"; private static final String RUNTIME_TYPE = "DOCKER"; private final static String ENV_OCI_CONTAINER_PID_NAMESPACE = @@ -201,6 +205,7 @@ public void setup() { container = mock(Container.class); cId = mock(ContainerId.class); appAttemptId = mock(ApplicationAttemptId.class); + mockApplicationId = mock(ApplicationId.class); context = mock(ContainerLaunchContext.class); env = new HashMap(); env.put("FROM_CLIENT", "1"); @@ -210,6 +215,8 @@ public void setup() { env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_IMAGE, image); when(container.getContainerId()).thenReturn(cId); when(cId.toString()).thenReturn(containerId); + when(mockApplicationId.toString()).thenReturn("applicationId"); + when(appAttemptId.getApplicationId()).thenReturn(mockApplicationId); when(cId.getApplicationAttemptId()).thenReturn(appAttemptId); when(container.getLaunchContext()).thenReturn(context); when(context.getEnvironment()).thenReturn(env); @@ -280,6 +287,9 @@ public void setup() { localizedResources.put(new Path("/test_local_dir/test_resource_file"), Collections.singletonList("test_dir/test_resource_file")); + File tmpDir = new File(tmpPath); + tmpDir.mkdirs(); + testCapabilities = new String[] {"NET_BIND_SERVICE", "SYS_CHROOT"}; conf.setStrings(YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES, testCapabilities); @@ -328,6 +338,17 @@ public Context createMockNMContext() { when(mockNMContext.getContainers()).thenReturn(containerMap); when(containerMap.get(any())).thenReturn(container); + ContainerManager mockContainerManager = mock(ContainerManager.class); + ResourceLocalizationService mockLocalzationService = mock(ResourceLocalizationService.class); + + LocalizedResource mockLocalizedResource = mock(LocalizedResource.class); + + when(mockLocalizedResource.getLocalPath()).thenReturn(new Path("/local/layer1")); + when(mockLocalzationService.getLocalizedResource(any(), anyString(), any())) + .thenReturn(mockLocalizedResource); + when(mockContainerManager.getResourceLocalizationService()).thenReturn(mockLocalzationService); + when(mockNMContext.getContainerManager()).thenReturn(mockContainerManager); + try { when(localDirsHandler.getLocalPathForWrite(anyString())) .thenReturn(new Path(tmpPath)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestHdfsManifestToResourcesPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestHdfsManifestToResourcesPlugin.java new file mode 100644 index 00000000000..649978053fa --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestHdfsManifestToResourcesPlugin.java @@ -0,0 +1,168 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.api.records.URL; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.ImageTagToManifestPlugin.NM_RUNC_IMAGE_TOPLEVEL_DIR; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TestHdfsManifestToResourcesPlugin { + private static final Logger LOG = + LoggerFactory.getLogger(TestHdfsManifestToResourcesPlugin.class); + private Configuration conf; + private String tmpPath = new StringBuffer( + System.getProperty("test.build.data")) + .append('/').append("hadoop.tmp.dir").toString(); + private static final String LAYER_MEDIA_TYPE = + "application/vnd.docker.image.rootfs.diff.tar.gzip"; + private static final String CONFIG_MEDIA_TYPE = + "application/vnd.docker.container.image.v1+json"; + + @Before + public void setup() { + conf = new Configuration(); + File tmpDir = new File(tmpPath); + tmpDir.mkdirs(); + } + + @Test + public void testGetLayerResources() throws IOException { + ImageManifest mockManifest = mock(ImageManifest.class); + ImageManifest.Blob mockLayer1 = mock(ImageManifest.Blob.class); + ImageManifest.Blob mockLayer2 = mock(ImageManifest.Blob.class); + String digest1Hash = + "e060f9dd9e8cd9ec0e2814b661a96d78f7298120d7654ba9f83ebfb11ff1fb1e"; + String digest2Hash = + "5af5ff88469c8473487bfbc2fe81b4e7d84644bd91f1ab9305de47ef5673637e"; + String digest1 = + "sha256:" + digest1Hash; + String digest2 = + "sha256:" + digest2Hash; + long size1 = 1234; + long size2 = 5678; + + when(mockLayer1.getMediaType()).thenReturn(LAYER_MEDIA_TYPE); + when(mockLayer1.getDigest()).thenReturn(digest1); + when(mockLayer1.getSize()).thenReturn(size1); + + when(mockLayer2.getMediaType()).thenReturn(LAYER_MEDIA_TYPE); + when(mockLayer2.getDigest()).thenReturn(digest2); + when(mockLayer2.getSize()).thenReturn(size2); + + ArrayList mockLayers = new ArrayList<>(); + mockLayers.add(mockLayer1); + mockLayers.add(mockLayer2); + + when(mockManifest.getLayers()).thenReturn(mockLayers); + + conf.set(NM_RUNC_IMAGE_TOPLEVEL_DIR, tmpPath); + long modTime = 123456789; + + HdfsManifestToResourcesPlugin hdfsManifestToResourcesPlugin = + new HdfsManifestToResourcesPlugin() { + @Override + protected FileStatus statBlob(Path path) throws IOException { + FileStatus mockFileStatus = mock(FileStatus.class); + when(mockFileStatus.getModificationTime()).thenReturn(modTime); + return mockFileStatus; + } + }; + hdfsManifestToResourcesPlugin.init(conf); + + List returnedLayers = + hdfsManifestToResourcesPlugin.getLayerResources(mockManifest); + + URL url1 = URL.fromPath(new Path(tmpPath + "/layers", + digest1Hash + ".sqsh")); + URL url2 = URL.fromPath(new Path(tmpPath + "/layers", + digest2Hash + ".sqsh")); + + LocalResource rsrc1 = LocalResource.newInstance(url1, + LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, + size1, modTime); + LocalResource rsrc2 = LocalResource.newInstance(url2, + LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, + size2, modTime); + + Assert.assertEquals(rsrc1, returnedLayers.get(0)); + Assert.assertEquals(rsrc2, returnedLayers.get(1)); + + } + + @Test + public void testGetConfigResources() throws IOException { + ImageManifest mockManifest = mock(ImageManifest.class); + ImageManifest.Blob mockConfig = mock(ImageManifest.Blob.class); + String digestHash = + "e23cac476d0238f0f859c1e07e5faad85262bca490ef5c3a9da32a5b39c6b204"; + String digest = + "sha256:" + digestHash; + long size = 1234; + + when(mockConfig.getMediaType()).thenReturn(CONFIG_MEDIA_TYPE); + when(mockConfig.getDigest()).thenReturn(digest); + when(mockConfig.getSize()).thenReturn(size); + when(mockManifest.getConfig()).thenReturn(mockConfig); + + conf.set(NM_RUNC_IMAGE_TOPLEVEL_DIR, tmpPath); + long modTime = 123456789; + + HdfsManifestToResourcesPlugin hdfsManifestToResourcesPlugin = + new HdfsManifestToResourcesPlugin() { + @Override + protected FileStatus statBlob(Path path) throws IOException { + FileStatus mockFileStatus = mock(FileStatus.class); + when(mockFileStatus.getModificationTime()).thenReturn(modTime); + return mockFileStatus; + } + }; + hdfsManifestToResourcesPlugin.init(conf); + + LocalResource returnedLayer = + hdfsManifestToResourcesPlugin.getConfigResource(mockManifest); + + URL url1 = URL.fromPath(new Path(tmpPath + "/config", digestHash)); + + LocalResource rsrc = LocalResource.newInstance(url1, + LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, + size, modTime); + + Assert.assertEquals(rsrc, returnedLayer); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestImageTagToManifestPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestImageTagToManifestPlugin.java new file mode 100644 index 00000000000..e3796b55ad7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestImageTagToManifestPlugin.java @@ -0,0 +1,222 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.hadoop.conf.Configuration; +import org.codehaus.jackson.map.ObjectMapper; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; + +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.ImageTagToManifestPlugin.NM_HDFS_RUNC_IMAGE_TAG_TO_HASH_FILE; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.ImageTagToManifestPlugin.NM_LOCAL_RUNC_IMAGE_TAG_TO_HASH_FILE; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.ImageTagToManifestPlugin.NM_RUNC_IMAGE_TOPLEVEL_DIR; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TestImageTagToManifestPlugin { + private static final Logger LOG = + LoggerFactory.getLogger(TestImageTagToManifestPlugin.class); + private MockImageTagToManifestPlugin mockImageTagToManifestPlugin; + private Configuration conf; + private String tmpPath = new StringBuffer(System.getProperty("test.build.data")) + .append('/').append("hadoop.tmp.dir").toString(); + private ObjectMapper mapper; + + private String manifestJson = + "{\n" + + " \"schemaVersion\": 2,\n" + + " \"mediaType\": \"application/vnd.docker.distribution.manifest.v2+json\",\n" + + " \"config\": {\n" + + " \"mediaType\": \"application/vnd.docker.container.image.v1+json\",\n" + + " \"size\": 2857,\n" + + " \"digest\": \"sha256:e23cac476d0238f0f859c1e07e5faad85262bca490ef5c3a9da32a5b39c6b204\"\n" + + " },\n" + + " \"layers\": [\n" + + " {\n" + + " \"mediaType\": \"application/vnd.docker.image.rootfs.diff.tar.gzip\",\n" + + " \"size\": 185784329,\n" + + " \"digest\": \"sha256:e060f9dd9e8cd9ec0e2814b661a96d78f7298120d7654ba9f83ebfb11ff1fb1e\"\n" + + " },\n" + + " {\n" + + " \"mediaType\": \"application/vnd.docker.image.rootfs.diff.tar.gzip\",\n" + + " \"size\": 10852,\n" + + " \"digest\": \"sha256:5af5ff88469c8473487bfbc2fe81b4e7d84644bd91f1ab9305de47ef5673637e\"\n" + + " }\n" + + " ]\n" + + "}"; + + @Before + public void setup() { + conf = new Configuration(); + mapper = new ObjectMapper(); + File tmpDir = new File(tmpPath); + tmpDir.mkdirs(); + } + + public class MockImageTagToManifestPlugin extends ImageTagToManifestPlugin { + private BufferedReader mockLocalBufferedReader; + private BufferedReader mockHdfsBufferedReader; + + MockImageTagToManifestPlugin(BufferedReader mockLocalBufferedReader, + BufferedReader mockHdfsBufferedReader) { + super(); + this.mockLocalBufferedReader = mockLocalBufferedReader; + this.mockHdfsBufferedReader = mockHdfsBufferedReader; + } + + @Override + protected BufferedReader getLocalImageToHashReader() throws IOException { + return mockLocalBufferedReader; + } + + @Override + protected BufferedReader getHdfsImageToHashReader() throws IOException { + return mockHdfsBufferedReader; + } + } + + + @Test + public void testLocalGetHashFromImageTag() throws IOException { + BufferedReader mockLocalBufferedReader = mock(BufferedReader.class); + BufferedReader mockHdfsBufferedReader = mock(BufferedReader.class); + + String commentImage = "commentimage:latest"; + String commentImageHash = + "142fff813433c1faa8796388db3a1fa1e899ba08c9e42ad2e33c42696d0f15d2"; + + String fakeImageLatest = "fakeimage:latest"; + String fakeImageCurrent= "fakeimage:current"; + String fakeImageHash = + "f75903872eb2963e158502ef07f2e56d3a2e90a012b4afe3440461b54142a567"; + + String busyboxImage = "repo/busybox:123"; + String busyboxHash = + "c6912b9911deceec6c43ebb4c31c96374a8ebb3de4cd75f377dba6c07707de6e"; + + String commentLine = "#" + commentImage + commentImageHash + "#2nd comment"; + String busyboxLine = busyboxImage + ":" + busyboxHash + "#comment"; + String fakeImageLine = fakeImageLatest + "," + fakeImageCurrent + ":" + + fakeImageHash + "#fakeimage comment"; + + when(mockLocalBufferedReader.readLine()).thenReturn(commentLine, fakeImageLine, busyboxLine, null); + + mockImageTagToManifestPlugin = new MockImageTagToManifestPlugin + (mockLocalBufferedReader, mockHdfsBufferedReader); + mockImageTagToManifestPlugin.loadImageToHashFiles(); + + String returnedFakeImageHash = mockImageTagToManifestPlugin + .getHashFromImageTag(fakeImageLatest); + String returnedBusyboxHash = mockImageTagToManifestPlugin + .getHashFromImageTag(busyboxImage); + String returnedCommentHash = mockImageTagToManifestPlugin + .getHashFromImageTag(commentImage); + + Assert.assertEquals(fakeImageHash, returnedFakeImageHash); + Assert.assertEquals(busyboxHash, returnedBusyboxHash); + + //Image hash should not be found, so returned hash should be the tag + Assert.assertEquals(commentImage, returnedCommentHash); + } + + @Test + public void testHdfsGetHashFromImageTag() throws IOException { + BufferedReader mockLocalBufferedReader = mock(BufferedReader.class); + BufferedReader mockHdfsBufferedReader = mock(BufferedReader.class); + + String commentImage = "commentimage:latest"; + String commentImageHash = + "142fff813433c1faa8796388db3a1fa1e899ba08c9e42ad2e33c42696d0f15d2"; + + String fakeImageLatest = "fakeimage:latest"; + String fakeImageCurrent= "fakeimage:current"; + String fakeImageHash = + "f75903872eb2963e158502ef07f2e56d3a2e90a012b4afe3440461b54142a567"; + + String busyboxImage = "repo/busybox:123"; + String busyboxHash = + "c6912b9911deceec6c43ebb4c31c96374a8ebb3de4cd75f377dba6c07707de6e"; + + String commentLine = "#" + commentImage + commentImageHash + "#2nd comment"; + String busyboxLine = busyboxImage + ":" + busyboxHash + "#comment"; + String fakeImageLine = fakeImageLatest + "," + fakeImageCurrent + ":" + + fakeImageHash + "#fakeimage comment"; + + when(mockHdfsBufferedReader.readLine()).thenReturn(commentLine, fakeImageLine, busyboxLine, null); + + mockImageTagToManifestPlugin = new MockImageTagToManifestPlugin + (mockLocalBufferedReader, mockHdfsBufferedReader); + mockImageTagToManifestPlugin.loadImageToHashFiles(); + + String returnedFakeImageHash = mockImageTagToManifestPlugin + .getHashFromImageTag(fakeImageLatest); + String returnedBusyboxHash = mockImageTagToManifestPlugin + .getHashFromImageTag(busyboxImage); + String returnedCommentHash = mockImageTagToManifestPlugin + .getHashFromImageTag(commentImage); + + Assert.assertEquals(fakeImageHash, returnedFakeImageHash); + Assert.assertEquals(busyboxHash, returnedBusyboxHash); + + //Image hash should not be found, so returned hash should be the tag + Assert.assertEquals(commentImage, returnedCommentHash); + } + + @Test + public void testGetManifestFromImageTag() throws IOException { + String manifestPath = tmpPath + "/manifests"; + File manifestDir = new File(manifestPath); + manifestDir.mkdirs(); + + conf.set(NM_LOCAL_RUNC_IMAGE_TAG_TO_HASH_FILE, "local-image-tag-to-hash"); + conf.set(NM_HDFS_RUNC_IMAGE_TAG_TO_HASH_FILE, "hdfs-image-tag-to-hash"); + conf.set(NM_RUNC_IMAGE_TOPLEVEL_DIR, tmpPath); + String manifestHash = "d0e8c542d28e8e868848aeb58beecb31079eb7ada1293c4bc2eded08daed605a"; + + PrintWriter printWriter = new PrintWriter(manifestPath + "/" + manifestHash); + printWriter.println(manifestJson); + printWriter.close(); + + BufferedReader mockLocalBufferedReader = mock(BufferedReader.class); + BufferedReader mockHdfsBufferedReader = mock(BufferedReader.class); + + mockImageTagToManifestPlugin = new MockImageTagToManifestPlugin + (mockLocalBufferedReader, mockHdfsBufferedReader) { + @Override + public String getHashFromImageTag(String imageTag) { + return manifestHash; + } + }; + mockImageTagToManifestPlugin.init(conf); + + ImageManifest manifest = mockImageTagToManifestPlugin.getManifestFromImageTag("image"); + ImageManifest expectedManifest = mapper.readValue(manifestJson, ImageManifest.class); + Assert.assertEquals(expectedManifest.toString(), manifest.toString()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestRuncContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestRuncContainerRuntime.java new file mode 100644 index 00000000000..b1cdc49d0d6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestRuncContainerRuntime.java @@ -0,0 +1,909 @@ +/* + * * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.URL; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManager; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCIRuntimeConfig; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCILayer; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCIRuntimeConfig.OCIMount; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCIRuntimeConfig.OCIProcessConfig; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.RuncContainerRuntime.ContainerRuntimeObject; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePluginManager; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; +import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; +import org.codehaus.jackson.JsonNode; +import org.codehaus.jackson.map.ObjectMapper; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentMap; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_RUNC_DEFAULT_RO_MOUNTS; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_RUNC_DEFAULT_RW_MOUNTS; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_RUNC_LAYER_MOUNTS_TO_KEEP; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@RunWith(Parameterized.class) +public class TestRuncContainerRuntime { + private static final Logger LOG = + LoggerFactory.getLogger(TestRuncContainerRuntime.class); + private Configuration conf; + private PrivilegedOperationExecutor mockExecutor; + private CGroupsHandler mockCGroupsHandler; + private String containerId; + private Container container; + private ContainerId cId; + private ApplicationAttemptId appAttemptId; + private ApplicationId mockApplicationId; + private ContainerLaunchContext context; + private Context nmContext; + private HashMap env; + private String image; + private String runAsUser = System.getProperty("user.name"); + private String user; + private String appId; + private String containerIdStr; + private Path containerWorkDir; + private Path nmPrivateContainerScriptPath; + private Path nmPrivateTokensPath; + private Path nmPrivateKeystorePath; + private Path nmPrivateTruststorePath; + private Path pidFilePath; + private List localDirs; + private List logDirs; + private List filecacheDirs; + private List userFilecacheDirs; + private List applicationLocalDirs; + private List containerLogDirs; + private Map> localizedResources; + private String resourcesOptions; + private ContainerRuntimeContext.Builder builder; + private final String submittingUser = "anakin"; + private ObjectMapper mapper; + private RuncContainerRuntime.ContainerRuntimeObject containerRuntimeObject; + private LocalResource localResource; + private URL mockUrl; + private Resource resource; + private int layersToKeep; + private int cpuShares; + private List expectedMounts; + private String tmpPath; + private LocalResource config; + private List layers; + + private RuncImageTagToManifestPlugin mockRuncImageTagToManifestPlugin = + mock(ImageTagToManifestPlugin.class); + private RuncManifestToResourcesPlugin mockRuncManifestToResourcesPlugin = + mock(HdfsManifestToResourcesPlugin.class); + + @Rule + public TemporaryFolder tempDir = new TemporaryFolder(); + + @Parameterized.Parameters(name = "https={0}") + public static Collection data() { + return Arrays.asList(new Object[][] { + {true}, {false} + }); + } + + @Parameterized.Parameter + public boolean https; + + @Before + public void setup() { + mockExecutor = Mockito + .mock(PrivilegedOperationExecutor.class); + mockCGroupsHandler = Mockito.mock(CGroupsHandler.class); + tmpPath = new StringBuffer(System.getProperty("test.build.data")) + .append('/').append("hadoop.tmp.dir").toString(); + containerId = "container_e11_1518975676334_14532816_01_000001"; + container = mock(Container.class); + cId = mock(ContainerId.class); + appAttemptId = mock(ApplicationAttemptId.class); + mockApplicationId = mock(ApplicationId.class); + context = mock(ContainerLaunchContext.class); + env = new HashMap<>(); + env.put("FROM_CLIENT", "1"); + image = "busybox:latest"; + nmContext = createMockNMContext(); + containerRuntimeObject = + mock(RuncContainerRuntime.ContainerRuntimeObject.class); + localResource = mock(LocalResource.class); + mockUrl = mock(URL.class); + resource = mock(Resource.class); + appId = "app_id"; + layersToKeep = 5; + cpuShares = 10; + + + conf = new Configuration(); + conf.set("hadoop.tmp.dir", tmpPath); + conf.setInt(NM_RUNC_LAYER_MOUNTS_TO_KEEP, layersToKeep); + + env.put(RuncContainerRuntime.ENV_RUNC_CONTAINER_IMAGE, image); + when(container.getContainerId()).thenReturn(cId); + when(cId.toString()).thenReturn(containerId); + when(mockApplicationId.toString()).thenReturn(appId); + when(appAttemptId.getApplicationId()).thenReturn(mockApplicationId); + when(cId.getApplicationAttemptId()).thenReturn(appAttemptId); + when(container.getLaunchContext()).thenReturn(context); + when(context.getEnvironment()).thenReturn(env); + when(container.getUser()).thenReturn(submittingUser); + when(container.getContainerRuntimeData()) + .thenReturn(containerRuntimeObject); + when(container.getResource()).thenReturn(resource); + when(resource.getVirtualCores()).thenReturn(cpuShares); + when(containerRuntimeObject.getConfig()).thenReturn(localResource); + when(localResource.getResource()).thenReturn(mockUrl); + try { + when(mockUrl.toPath()).thenReturn(new Path("/test_user_filecache_dir")); + } catch (URISyntaxException use) { + throw new RuntimeException(use); + } + + user = submittingUser; + containerIdStr = containerId; + containerWorkDir = new Path("/test_container_work_dir"); + nmPrivateContainerScriptPath = new Path("/test_script_path"); + nmPrivateTokensPath = new Path("/test_private_tokens_path"); + if (https) { + nmPrivateKeystorePath = new Path("/test_private_keystore_path"); + nmPrivateTruststorePath = new Path("/test_private_truststore_path"); + } else { + nmPrivateKeystorePath = null; + nmPrivateTruststorePath = null; + } + pidFilePath = new Path("/test_pid_file_path"); + localDirs = new ArrayList<>(); + logDirs = new ArrayList<>(); + filecacheDirs = new ArrayList<>(); + resourcesOptions = "cgroups=none"; + userFilecacheDirs = new ArrayList<>(); + applicationLocalDirs = new ArrayList<>(); + containerLogDirs = new ArrayList<>(); + localizedResources = new HashMap<>(); + expectedMounts = new ArrayList<>(); + + String filecachePath = new StringBuffer( + System.getProperty("test.build.data")) + .append('/').append("filecache").toString(); + String userFilecachePath = new StringBuffer( + System.getProperty("test.build.data")) + .append('/').append("userFilecache").toString(); + + localDirs.add("/test_local_dir"); + logDirs.add("/test_log_dir"); + filecacheDirs.add(filecachePath); + userFilecacheDirs.add(userFilecachePath); + applicationLocalDirs.add("/test_application_local_dir"); + containerLogDirs.add("/test_container_log_dir"); + localizedResources.put(new Path("/test_local_dir/test_resource_file"), + Collections.singletonList("test_dir/test_resource_file")); + + File tmpDir = new File(tmpPath); + tmpDir.mkdirs(); + + List rwOptions = new ArrayList<>(); + rwOptions.add("rw"); + rwOptions.add("rbind"); + rwOptions.add("rprivate"); + + List roOptions = new ArrayList<>(); + roOptions.add("ro"); + roOptions.add("rbind"); + roOptions.add("rprivate"); + + for (String containerLogDir : containerLogDirs) { + expectedMounts.add(new OCIMount( + containerLogDir, "bind", containerLogDir, rwOptions)); + } + + for (String applicationLocalDir : applicationLocalDirs) { + expectedMounts.add(new OCIMount( + applicationLocalDir, "bind", applicationLocalDir, rwOptions)); + } + + for (String filecacheDir : filecacheDirs) { + File filecacheDirFile = new File(filecacheDir); + filecacheDirFile.mkdirs(); + expectedMounts.add(new OCIMount( + filecacheDir, "bind", filecacheDir, roOptions)); + } + for (String userFilecacheDir : userFilecacheDirs) { + File userFilecacheDirFile = new File(userFilecacheDir); + userFilecacheDirFile.mkdirs(); + expectedMounts.add(new OCIMount( + userFilecachePath, "bind", userFilecachePath, roOptions)); + } + + expectedMounts.add(new OCIMount( + "/tmp", "bind", containerWorkDir + "/private_slash_tmp", rwOptions)); + expectedMounts.add(new OCIMount( + "/var/tmp", "bind", containerWorkDir + "/private_var_slash_tmp", + rwOptions)); + + mapper = new ObjectMapper(); + + builder = new ContainerRuntimeContext + .Builder(container); + + builder.setExecutionAttribute(RUN_AS_USER, runAsUser) + .setExecutionAttribute(USER, user) + .setExecutionAttribute(APPID, appId) + .setExecutionAttribute(CONTAINER_ID_STR, containerIdStr) + .setExecutionAttribute(CONTAINER_WORK_DIR, containerWorkDir) + .setExecutionAttribute(NM_PRIVATE_CONTAINER_SCRIPT_PATH, + nmPrivateContainerScriptPath) + .setExecutionAttribute(NM_PRIVATE_TOKENS_PATH, nmPrivateTokensPath) + .setExecutionAttribute(NM_PRIVATE_KEYSTORE_PATH, nmPrivateKeystorePath) + .setExecutionAttribute(NM_PRIVATE_TRUSTSTORE_PATH, + nmPrivateTruststorePath) + .setExecutionAttribute(PID_FILE_PATH, pidFilePath) + .setExecutionAttribute(LOCAL_DIRS, localDirs) + .setExecutionAttribute(LOG_DIRS, logDirs) + .setExecutionAttribute(FILECACHE_DIRS, filecacheDirs) + .setExecutionAttribute(USER_FILECACHE_DIRS, userFilecacheDirs) + .setExecutionAttribute(APPLICATION_LOCAL_DIRS, applicationLocalDirs) + .setExecutionAttribute(CONTAINER_LOG_DIRS, containerLogDirs) + .setExecutionAttribute(LOCALIZED_RESOURCES, localizedResources) + .setExecutionAttribute(RESOURCES_OPTIONS, resourcesOptions); + } + + public class MockRuncContainerRuntime extends RuncContainerRuntime { + MockRuncContainerRuntime( + PrivilegedOperationExecutor privilegedOperationExecutor, + CGroupsHandler cGroupsHandler) { + super(privilegedOperationExecutor, cGroupsHandler); + } + + @Override + protected RuncImageTagToManifestPlugin chooseImageTagToManifestPlugin() + throws ContainerExecutionException { + ImageManifest mockImageManifest = mock(ImageManifest.class); + try { + when(mockRuncImageTagToManifestPlugin.getManifestFromImageTag(any())) + .thenReturn(mockImageManifest); + } catch (IOException ioe) { + throw new ContainerExecutionException(ioe); + } + return mockRuncImageTagToManifestPlugin; + } + + @Override + protected RuncManifestToResourcesPlugin chooseManifestToResourcesPlugin() + throws ContainerExecutionException { + URL configUrl = URL.fromPath(new Path(tmpPath + "config")); + URL layer1Url = URL.fromPath(new Path(tmpPath + "layer1")); + URL layer2Url = URL.fromPath(new Path(tmpPath + "layer2")); + + long size = 1234; + long timestamp = 5678; + + config = LocalResource.newInstance(configUrl, + LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, + size, timestamp); + + LocalResource layer1 = LocalResource.newInstance(layer1Url, + LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, + size, timestamp); + LocalResource layer2 = LocalResource.newInstance(layer2Url, + LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, + size, timestamp); + + layers = new ArrayList<>(); + + layers.add(layer1); + layers.add(layer2); + + try { + when(mockRuncManifestToResourcesPlugin.getConfigResource(any())) + .thenReturn(config); + when(mockRuncManifestToResourcesPlugin.getLayerResources(any())) + .thenReturn(layers); + } catch (IOException ioe) { + throw new ContainerExecutionException(ioe); + } + return mockRuncManifestToResourcesPlugin; + } + + @Override + protected List extractImageEnv(File config) { + return new ArrayList<>(); + } + + @Override + protected List extractImageEntrypoint(File config) { + return new ArrayList<>(); + } + } + + public Context createMockNMContext() { + Context mockNMContext = mock(Context.class); + LocalDirsHandlerService localDirsHandler = + mock(LocalDirsHandlerService.class); + ResourcePluginManager resourcePluginManager = + mock(ResourcePluginManager.class); + + ConcurrentMap containerMap = + mock(ConcurrentMap.class); + + when(mockNMContext.getLocalDirsHandler()).thenReturn(localDirsHandler); + when(mockNMContext.getResourcePluginManager()) + .thenReturn(resourcePluginManager); + when(mockNMContext.getContainers()).thenReturn(containerMap); + when(containerMap.get(any())).thenReturn(container); + + ContainerManager mockContainerManager = mock(ContainerManager.class); + ResourceLocalizationService mockLocalzationService = + mock(ResourceLocalizationService.class); + + LocalizedResource mockLocalizedResource = mock(LocalizedResource.class); + + when(mockLocalizedResource.getLocalPath()).thenReturn( + new Path("/local/layer1")); + when(mockLocalzationService.getLocalizedResource(any(), anyString(), any())) + .thenReturn(mockLocalizedResource); + when(mockContainerManager.getResourceLocalizationService()) + .thenReturn(mockLocalzationService); + when(mockNMContext.getContainerManager()).thenReturn(mockContainerManager); + + try { + when(localDirsHandler.getLocalPathForWrite(anyString())) + .thenReturn(new Path(tmpPath)); + } catch (IOException ioe) { + LOG.info("LocalDirsHandler failed" + ioe); + } + return mockNMContext; + } + + private File captureRuncConfigFile() + throws PrivilegedOperationException { + PrivilegedOperation op = capturePrivilegedOperation(1); + + Assert.assertEquals(PrivilegedOperation.OperationType + .RUN_RUNC_CONTAINER, op.getOperationType()); + return new File(op.getArguments().get(0)); + } + + private PrivilegedOperation capturePrivilegedOperation(int invocations) + throws PrivilegedOperationException { + ArgumentCaptor opCaptor = ArgumentCaptor.forClass( + PrivilegedOperation.class); + + verify(mockExecutor, times(invocations)) + .executePrivilegedOperation(any(), opCaptor.capture(), any(), + any(), anyBoolean(), anyBoolean()); + + //verification completed. we need to isolate specific invocations. + // hence, reset mock here + Mockito.reset(mockExecutor); + + return opCaptor.getValue(); + } + + private ContainerRuntimeObject captureContainerRuntimeObject( + int invocations) { + + ArgumentCaptor opCaptor = ArgumentCaptor.forClass( + ContainerRuntimeObject.class); + + verify(container, times(invocations)) + .setContainerRuntimeData(opCaptor.capture()); + + //verification completed. we need to isolate specific invocations. + // hence, reset mock here + Mockito.reset(container); + + return opCaptor.getValue(); + } + + @SuppressWarnings("unchecked") + private OCIContainerExecutorConfig verifyRuncConfig(File config) + throws IOException { + int configSize; + String configVersion; + String configUser; + String configContainerId; + String configAppId; + String configPidFile; + String configContainerScriptPath; + String configContainerCredentialsPath; + int configHttps; + String configKeystorePath; + String configTruststorePath; + List configLocalDirsList; + List configLogDirsList; + List configLayersList; + int configLayersToKeep; + String configContainerWorkDir; + int expectedConfigSize; + long configCpuShares; + + JsonNode configNode = mapper.readTree(config); + + OCIContainerExecutorConfig ociContainerExecutorConfig = + mapper.readValue(configNode, OCIContainerExecutorConfig.class); + configSize = configNode.size(); + + OCIRuntimeConfig ociRuntimeConfig = + ociContainerExecutorConfig.getOciRuntimeConfig(); + OCIProcessConfig ociProcessConfig = ociRuntimeConfig.getProcess(); + + configVersion = ociContainerExecutorConfig.getVersion(); + configUser = ociContainerExecutorConfig.getUsername(); + configContainerId = ociContainerExecutorConfig.getContainerId(); + configAppId = ociContainerExecutorConfig.getApplicationId(); + configPidFile = ociContainerExecutorConfig.getPidFile(); + configContainerScriptPath = + ociContainerExecutorConfig.getContainerScriptPath(); + configContainerCredentialsPath = + ociContainerExecutorConfig.getContainerCredentialsPath(); + configHttps = ociContainerExecutorConfig.getHttps(); + configKeystorePath = ociContainerExecutorConfig.getKeystorePath(); + configTruststorePath = ociContainerExecutorConfig.getTruststorePath(); + configLocalDirsList = ociContainerExecutorConfig.getLocalDirs(); + configLogDirsList = ociContainerExecutorConfig.getLogDirs(); + configLayersList = ociContainerExecutorConfig.getLayers(); + configLayersToKeep = ociContainerExecutorConfig.getReapLayerKeepCount(); + configContainerWorkDir = ociRuntimeConfig.getProcess().getCwd(); + configCpuShares = + ociRuntimeConfig.getLinux().getResources().getCPU().getShares(); + + expectedConfigSize = (https) ? 15 : 12; + + Assert.assertEquals(expectedConfigSize, configSize); + Assert.assertEquals("0.1", configVersion); + Assert.assertEquals(user, configUser); + Assert.assertEquals(containerId, configContainerId); + Assert.assertEquals(appId, configAppId); + Assert.assertEquals(pidFilePath.toString(), configPidFile); + Assert.assertEquals(nmPrivateContainerScriptPath.toUri().toString(), + configContainerScriptPath); + Assert.assertEquals(nmPrivateTokensPath.toUri().getPath(), + configContainerCredentialsPath); + + if (https) { + Assert.assertEquals(1, configHttps); + Assert.assertEquals(nmPrivateKeystorePath.toUri().toString(), + configKeystorePath); + Assert.assertEquals(nmPrivateTruststorePath.toUri().toString(), + configTruststorePath); + } else { + Assert.assertEquals(0, configHttps); + Assert.assertNull(configKeystorePath); + Assert.assertNull(configTruststorePath); + } + + Assert.assertEquals(localDirs, configLocalDirsList); + Assert.assertEquals(logDirs, configLogDirsList); + Assert.assertEquals(0, configLayersList.size()); + Assert.assertEquals(layersToKeep, configLayersToKeep); + + List configMounts = ociRuntimeConfig.getMounts(); + verifyRuncMounts(expectedMounts, configMounts); + + List processArgsList = ociProcessConfig.getArgs(); + String configArgs = "".join(",", processArgsList); + + Assert.assertEquals(containerWorkDir.toString(), configContainerWorkDir); + Assert.assertEquals("bash," + containerWorkDir + "/launch_container.sh", + configArgs); + Assert.assertEquals(cpuShares, configCpuShares); + + return ociContainerExecutorConfig; + } + + + private void verifyRuncMounts(List expectedMounts, + List configMounts) throws IOException { + Assert.assertEquals(expectedMounts.size(), configMounts.size()); + boolean found; + for (OCIMount expectedMount : expectedMounts) { + found = false; + + for (OCIMount configMount : configMounts) { + if (expectedMount.getDestination().equals(configMount.getDestination()) + && expectedMount.getSource().equals(configMount.getSource()) + && expectedMount.getType().equals(configMount.getType()) + && expectedMount.getOptions(). + containsAll(configMount.getOptions())) { + found = true; + break; + } + } + + if (!found) { + String expectedMountString = expectedMount.getSource() + ":" + + expectedMount.getDestination() + ", " + expectedMount.getType() + + ", " + expectedMount.getOptions().toString(); + throw new IOException("Expected mount not found: " + + expectedMountString); + } + } + } + + @Test + public void testSelectRuncContainerType() { + Map envRuncType = new HashMap<>(); + Map envOtherType = new HashMap<>(); + + envRuncType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, + ContainerRuntimeConstants.CONTAINER_RUNTIME_RUNC); + envOtherType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, "other"); + + Assert.assertFalse(RuncContainerRuntime + .isRuncContainerRequested(conf, null)); + Assert.assertTrue(RuncContainerRuntime + .isRuncContainerRequested(conf, envRuncType)); + Assert.assertFalse(RuncContainerRuntime + .isRuncContainerRequested(conf, envOtherType)); + } + + @Test + public void testSelectRuncContainerTypeWithRuncAsDefault() { + Map envRuncType = new HashMap<>(); + Map envOtherType = new HashMap<>(); + + conf.set(YarnConfiguration.LINUX_CONTAINER_RUNTIME_TYPE, + ContainerRuntimeConstants.CONTAINER_RUNTIME_RUNC); + envRuncType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, + ContainerRuntimeConstants.CONTAINER_RUNTIME_RUNC); + envOtherType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, "other"); + + Assert.assertTrue(RuncContainerRuntime + .isRuncContainerRequested(conf, null)); + Assert.assertTrue(RuncContainerRuntime + .isRuncContainerRequested(conf, envRuncType)); + Assert.assertFalse(RuncContainerRuntime + .isRuncContainerRequested(conf, envOtherType)); + } + + @Test + public void testSelectRuncContainerTypeWithDefaultSet() { + Map envRuncType = new HashMap<>(); + Map envOtherType = new HashMap<>(); + + conf.set(YarnConfiguration.LINUX_CONTAINER_RUNTIME_TYPE, "default"); + envRuncType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, + ContainerRuntimeConstants.CONTAINER_RUNTIME_RUNC); + envOtherType.put(ContainerRuntimeConstants.ENV_CONTAINER_TYPE, "other"); + + Assert.assertFalse(RuncContainerRuntime + .isRuncContainerRequested(conf, null)); + Assert.assertTrue(RuncContainerRuntime + .isRuncContainerRequested(conf, envRuncType)); + Assert.assertFalse(RuncContainerRuntime + .isRuncContainerRequested(conf, envOtherType)); + } + + @Test + public void testRuncContainerLaunch() + throws ContainerExecutionException, PrivilegedOperationException, + IOException { + MockRuncContainerRuntime runtime = new MockRuncContainerRuntime( + mockExecutor, mockCGroupsHandler); + + runtime.initialize(conf, nmContext); + runtime.launchContainer(builder.build()); + + File config = captureRuncConfigFile(); + verifyRuncConfig(config); + } + + @Test + public void testRuncContainerLaunchWithDefaultImage() + throws ContainerExecutionException, IOException { + String runcImage = "busybox:1.2.3"; + conf.set(YarnConfiguration.NM_RUNC_IMAGE_NAME, runcImage); + env.remove(RuncContainerRuntime.ENV_RUNC_CONTAINER_IMAGE); + + MockRuncContainerRuntime runtime = new MockRuncContainerRuntime( + mockExecutor, mockCGroupsHandler); + runtime.initialize(conf, nmContext); + runtime.getLocalResources(container); + + Mockito.verify(mockRuncImageTagToManifestPlugin) + .getManifestFromImageTag(runcImage); + } + + @Test + public void testCGroupParent() throws ContainerExecutionException, + PrivilegedOperationException, IOException { + // Case 1: neither hierarchy nor resource options set, + // so cgroup should not be set + MockRuncContainerRuntime runtime = new MockRuncContainerRuntime( + mockExecutor, mockCGroupsHandler); + + runtime.initialize(conf, nmContext); + runtime.launchContainer(builder.build()); + + File config = captureRuncConfigFile(); + OCIContainerExecutorConfig ociContainerExecutorConfig = + verifyRuncConfig(config); + + String configCgroupsPath = ociContainerExecutorConfig + .getOciRuntimeConfig().getLinux().getCgroupsPath(); + Assert.assertNull(configCgroupsPath); + + // Case 2: hierarchy set, but resource options not, + // so cgroup should not be set + String hierarchy = "hadoop-yarn-test"; + when(mockCGroupsHandler.getRelativePathForCGroup(any())) + .thenReturn(hierarchy); + + runtime = new MockRuncContainerRuntime( + mockExecutor, mockCGroupsHandler); + + runtime.initialize(conf, nmContext); + runtime.launchContainer(builder.build()); + + config = captureRuncConfigFile(); + ociContainerExecutorConfig = verifyRuncConfig(config); + + configCgroupsPath = ociContainerExecutorConfig.getOciRuntimeConfig() + .getLinux().getCgroupsPath(); + Assert.assertNull(configCgroupsPath); + + // Case 3: resource options set, so cgroup should be set + String resourceOptionsCpu = "/sys/fs/cgroup/cpu/" + hierarchy + + containerIdStr; + + builder.setExecutionAttribute(RESOURCES_OPTIONS, resourceOptionsCpu); + + runtime = new MockRuncContainerRuntime( + mockExecutor, mockCGroupsHandler); + + runtime.initialize(conf, nmContext); + runtime.launchContainer(builder.build()); + + config = captureRuncConfigFile(); + ociContainerExecutorConfig = verifyRuncConfig(config); + + configCgroupsPath = ociContainerExecutorConfig.getOciRuntimeConfig() + .getLinux().getCgroupsPath(); + Assert.assertEquals("/" + hierarchy, configCgroupsPath); + + // Case 4: cgroupsHandler is null, so cgroup should not be set + resourceOptionsCpu = "/sys/fs/cgroup/cpu/" + hierarchy + + containerIdStr; + + builder.setExecutionAttribute(RESOURCES_OPTIONS, resourceOptionsCpu); + + runtime = new MockRuncContainerRuntime( + mockExecutor, null); + + runtime.initialize(conf, nmContext); + runtime.launchContainer(builder.build()); + + config = captureRuncConfigFile(); + ociContainerExecutorConfig = verifyRuncConfig(config); + + configCgroupsPath = ociContainerExecutorConfig.getOciRuntimeConfig() + .getLinux().getCgroupsPath(); + Assert.assertNull(configCgroupsPath); + } + + + @Test + public void testDefaultROMounts() + throws ContainerExecutionException, PrivilegedOperationException, + IOException { + String roMount1 = new StringBuffer(System.getProperty("test.build.data")) + .append('/').append("foo").toString(); + File roMountFile1 = new File (roMount1); + roMountFile1.mkdirs(); + + String roMount2 = new StringBuffer(System.getProperty("test.build.data")) + .append('/').append("bar").toString(); + File roMountFile2 = new File (roMount2); + roMountFile2.mkdirs(); + + conf.setStrings(NM_RUNC_DEFAULT_RO_MOUNTS, + roMount1 + ":" + roMount1 + "," + roMount2 + ":" + roMount2); + + List roOptions = new ArrayList<>(); + roOptions.add("ro"); + roOptions.add("rbind"); + roOptions.add("rprivate"); + + expectedMounts.add(new OCIMount( + roMount1, "bind", roMount1, roOptions)); + expectedMounts.add(new OCIMount( + roMount2, "bind", roMount2, roOptions)); + + MockRuncContainerRuntime runtime = new MockRuncContainerRuntime( + mockExecutor, mockCGroupsHandler); + + runtime.initialize(conf, nmContext); + runtime.launchContainer(builder.build()); + + File config = captureRuncConfigFile(); + verifyRuncConfig(config); + } + + @Test + public void testDefaultROMountsInvalid() throws ContainerExecutionException { + conf.setStrings(NM_RUNC_DEFAULT_RO_MOUNTS, + "source,target"); + RuncContainerRuntime runtime = new MockRuncContainerRuntime( + mockExecutor, mockCGroupsHandler); + runtime.initialize(conf, nmContext); + + try { + runtime.launchContainer(builder.build()); + Assert.fail("Expected a launch container failure due to invalid mount."); + } catch (ContainerExecutionException e) { + LOG.info("Caught expected exception : " + e); + } + } + + @Test + public void testDefaultRWMounts() + throws ContainerExecutionException, PrivilegedOperationException, + IOException { + String rwMount1 = new StringBuffer(System.getProperty("test.build.data")) + .append('/').append("foo").toString(); + File rwMountFile1 = new File (rwMount1); + rwMountFile1.mkdirs(); + + String rwMount2 = new StringBuffer(System.getProperty("test.build.data")) + .append('/').append("bar").toString(); + File rwMountFile2 = new File (rwMount2); + rwMountFile2.mkdirs(); + + conf.setStrings(NM_RUNC_DEFAULT_RW_MOUNTS, + rwMount1 + ":" + rwMount1 + "," + rwMount2 + ":" + rwMount2); + + List rwOptions = new ArrayList<>(); + rwOptions.add("rw"); + rwOptions.add("rbind"); + rwOptions.add("rprivate"); + + expectedMounts.add(new OCIMount( + rwMount1, "bind", rwMount1, rwOptions)); + expectedMounts.add(new OCIMount( + rwMount2, "bind", rwMount2, rwOptions)); + + MockRuncContainerRuntime runtime = new MockRuncContainerRuntime( + mockExecutor, mockCGroupsHandler); + + runtime.initialize(conf, nmContext); + runtime.launchContainer(builder.build()); + + File config = captureRuncConfigFile(); + verifyRuncConfig(config); + } + + @Test + public void testDefaultRWMountsInvalid() throws ContainerExecutionException { + conf.setStrings(NM_RUNC_DEFAULT_RW_MOUNTS, + "source,target"); + RuncContainerRuntime runtime = new MockRuncContainerRuntime( + mockExecutor, mockCGroupsHandler); + runtime.initialize(conf, nmContext); + + try { + runtime.launchContainer(builder.build()); + Assert.fail("Expected a launch container failure due to invalid mount."); + } catch (ContainerExecutionException e) { + LOG.info("Caught expected exception : " + e); + } + } + + @Test + public void testRuncHostnamePattern() throws Exception { + String[] validNames = {"ab", "a.b.c.d", "a1-b.cd.ef", "0AB.", "C_D-"}; + + String[] invalidNames = {"a", "a#.b.c", "-a.b.c", "a@b.c", "a/b/c"}; + + for (String name : validNames) { + RuncContainerRuntime.validateHostname(name); + } + + for (String name : invalidNames) { + try { + RuncContainerRuntime.validateHostname(name); + Assert.fail(name + " is an invalid hostname and should fail the regex"); + } catch (ContainerExecutionException ce) { + continue; + } + } + } + + @Test + public void testValidRuncHostnameLength() throws Exception { + String validLength = "example.test.site"; + RuncContainerRuntime.validateHostname(validLength); + } + + @Test(expected = ContainerExecutionException.class) + public void testInvalidRuncHostnameLength() throws Exception { + String invalidLength = + "exampleexampleexampleexampleexampleexampleexampleexample.test.site"; + RuncContainerRuntime.validateHostname(invalidLength); + } + + @Test + public void testGetLocalResources() throws Exception { + RuncContainerRuntime runtime = new MockRuncContainerRuntime( + mockExecutor, mockCGroupsHandler); + runtime.initialize(conf, nmContext); + runtime.getLocalResources(container); + + ContainerRuntimeObject containerRuntimeObject = + captureContainerRuntimeObject(1); + + LocalResource testConfig = containerRuntimeObject.getConfig(); + List testLayers = containerRuntimeObject.getOCILayers(); + + Assert.assertEquals(config, testConfig); + Assert.assertEquals(layers, testLayers); + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java index dcba179a28c..af7d8b14515 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java @@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerRuntime; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.TestDockerContainerRuntime; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; import org.junit.Assert; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java index 980f29b57c6..a7e4a56eef0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java @@ -278,4 +278,14 @@ public void sendPauseEvent(String description) { public List getLocalizationStatuses() { return null; } + + @Override + public void setContainerRuntimeData(Object object) { + + } + + @Override + public Object getContainerRuntimeData() { + return null; + } }