diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 3fb4a3765d1..0e980b95a6b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1952,6 +1952,8 @@ public static boolean isAclEnabled(Configuration conf) { /** Default runtime to be used. */ public static final String LINUX_CONTAINER_RUNTIME_TYPE = LINUX_CONTAINER_RUNTIME_PREFIX + "type"; + public static final String OCI_CONTAINER_RUNTIME_PREFIX = + LINUX_CONTAINER_RUNTIME_PREFIX + "oci."; public static final String DOCKER_CONTAINER_RUNTIME_PREFIX = LINUX_CONTAINER_RUNTIME_PREFIX + "docker."; @@ -2003,6 +2005,25 @@ public static boolean isAclEnabled(Configuration conf) { /** Set enable user remapping as false by default. */ public static final boolean DEFAULT_NM_DOCKER_ENABLE_USER_REMAPPING = true; + /** enable readonly mode for docker. */ + public static final String NM_DOCKER_ENABLE_READONLY = + DOCKER_CONTAINER_RUNTIME_PREFIX + "enable-readonly"; + + /** Set enable readonly as false by default */ + public static final boolean DEFAULT_NM_DOCKER_ENABLE_READONLY = false; + + /** Path to the seccomp profile to use with Docker containers */ + public static final String NM_DOCKER_SECCOMP_PROFILE = + DOCKER_CONTAINER_RUNTIME_PREFIX + "seccomp-profile"; + + /** Whether to run containers with the no-new-privileges flag */ + public static final String NM_DOCKER_ENABLE_NO_NEW_PRIVILEGES = + DOCKER_CONTAINER_RUNTIME_PREFIX + "enable-no-new-privileges"; + + /** Whether to run containers with the no-new-privileges flag */ + public static final boolean DEFAULT_NM_DOCKER_ENABLE_NO_NEW_PRIVILEGES = + false; + /** lower limit for acceptable uids of user remapped user. */ public static final String NM_DOCKER_USER_REMAPPING_UID_THRESHOLD = DOCKER_CONTAINER_RUNTIME_PREFIX + "userremapping-uid-threshold"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 9506509acf4..8d1270a9604 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.nodemanager; import java.io.File; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; @@ -48,6 +49,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ConfigurationException; @@ -127,6 +130,10 @@ public Configuration getConf() { */ public abstract void init(Context nmContext) throws IOException; + public void start() {} + + public void stop() {} + /** * This function localizes the JAR file on-demand. * On Windows the ContainerLaunch creates a temporary special JAR manifest of @@ -259,6 +266,11 @@ public abstract void symLink(String target, String symlink) public abstract boolean isContainerAlive(ContainerLivenessContext ctx) throws IOException; + + public Map getLocalResources(Container container) throws IOException { + return container.getLaunchContext().getLocalResources(); + } + /** * Update cluster information inside container. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java index 06a32be9d5b..bf326fa6ef0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java @@ -31,6 +31,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ConfigurationException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; @@ -70,6 +71,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.regex.Pattern; import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; @@ -343,6 +345,18 @@ public void init(Context context) throws IOException { resourcesHandler.init(this); } + @Override + public void start() { + super.start(); + linuxContainerRuntime.start(); + } + + @Override + public void stop() { + super.stop(); + linuxContainerRuntime.stop(); + } + @Override public void startLocalizer(LocalizerStartContext ctx) throws IOException, InterruptedException { @@ -1042,4 +1056,9 @@ public String getExposedPorts(Container container) throws ContainerExecutionException { return linuxContainerRuntime.getExposedPorts(container); } + + @Override + public Map getLocalResources(Container container) throws IOException { + return linuxContainerRuntime.getLocalResources(container); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 9eff3a9213e..cb97dbb7312 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -422,7 +422,10 @@ protected void serviceInit(Configuration conf) throws Exception { exec.init(context); } catch (IOException e) { throw new YarnRuntimeException("Failed to initialize container executor", e); - } + } + + ((NMContext) context).setContainerExecutor(exec); + DeletionService del = createDeletionService(exec); addService(del); @@ -512,6 +515,7 @@ protected void serviceInit(Configuration conf) throws Exception { registerMXBean(); + context.getContainerExecutor().start(); super.serviceInit(conf); // TODO add local dirs to del } @@ -524,6 +528,7 @@ protected void serviceStop() throws Exception { try { super.serviceStop(); DefaultMetricsSystem.shutdown(); + context.getContainerExecutor().stop(); // Cleanup ResourcePluginManager if (null != context) { @@ -684,6 +689,8 @@ protected void reregisterCollectors() { private AuxServices auxServices; + private ContainerExecutor exec; + public NMContext(NMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInNM nmTokenSecretManager, LocalDirsHandlerService dirsHandler, ApplicationACLsManager aclsManager, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java index 356c2e094e7..c2bc3db7958 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManager.java @@ -32,6 +32,8 @@ /** * The ContainerManager is an entity that manages the life cycle of Containers. */ +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; + public interface ContainerManager extends ServiceStateChangeListener, ContainerManagementProtocol, EventHandler { @@ -45,4 +47,7 @@ ContainerScheduler getContainerScheduler(); void handleCredentialUpdate(); + + ResourceLocalizationService getResourceLocalizationService(); + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index c43b82539d4..71548005feb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -2008,4 +2008,7 @@ public GetLocalizationStatusesResponse getLocalizationStatuses( return container.getLocalizationStatuses(); } + public ResourceLocalizationService getResourceLocalizationService() { + return rsrcLocalizationSrvc; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java index 5a457c9015e..db8112720ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java @@ -112,6 +112,12 @@ boolean isRecovering(); + void setContainerRuntimeData(Object object); + + Object getContainerRuntimeData(); + + + /** * Get assigned resource mappings to the container. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index b79c305a0e6..3d94efed5cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -180,6 +180,7 @@ private ReInitializationContext createContextForRollback() { private volatile ReInitializationContext reInitContext; private volatile boolean isReInitializing = false; private volatile boolean isMarkeForKilling = false; + private Object containerRuntimeData; /** The NM-wide configuration - not specific to this container */ private final Configuration daemonConf; @@ -1211,26 +1212,28 @@ public ContainerState transition(ContainerImpl container, container.containerLocalizationStartTime = clock.getTime(); // Send requests for public, private resources - Map cntrRsrc = ctxt.getLocalResources(); - if (!cntrRsrc.isEmpty()) { - try { + Map cntrRsrc; + try { + cntrRsrc = container.context + .getContainerExecutor().getLocalResources(container); + if (!cntrRsrc.isEmpty()) { Map> req = container.resourceSet.addResources(ctxt.getLocalResources()); container.dispatcher.getEventHandler().handle( new ContainerLocalizationRequestEvent(container, req)); - } catch (URISyntaxException e) { + return ContainerState.LOCALIZING; + } else { + container.sendScheduleEvent(); + container.metrics.endInitingContainer(); + return ContainerState.SCHEDULED; + } + } catch (URISyntaxException | IOException e) { // malformed resource; abort container launch LOG.warn("Failed to parse resource-request", e); container.cleanup(); container.metrics.endInitingContainer(); return ContainerState.LOCALIZATION_FAILED; } - return ContainerState.LOCALIZING; - } else { - container.sendScheduleEvent(); - container.metrics.endInitingContainer(); - return ContainerState.SCHEDULED; - } } } @@ -2283,4 +2286,13 @@ public void setExposedPorts(String ports) { this.readLock.unlock(); } } + + public void setContainerRuntimeData(Object containerRuntimeData) { + this.containerRuntimeData = containerRuntimeData; + } + + + public Object getContainerRuntimeData() { + return containerRuntimeData; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java index a17daede2a8..866bd8a6b40 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java @@ -59,7 +59,9 @@ ADD_NUMA_PARAMS(""), // no CLI switch supported yet. REMOVE_DOCKER_CONTAINER("--remove-docker-container"), INSPECT_DOCKER_CONTAINER("--inspect-docker-container"), - SYNC_YARN_SYSFS(""); + SYNC_YARN_SYSFS(""), + RUN_OCI_CONTAINER("--run-oci-container"), + REAP_OCI_LAYER_MOUNTS("--reap-oci-layer-mounts"); private final String option; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java index acbfe9c3ba8..171469b8d49 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; @@ -37,6 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -60,6 +62,7 @@ LoggerFactory.getLogger(DelegatingLinuxContainerRuntime.class); private DefaultLinuxContainerRuntime defaultLinuxContainerRuntime; private DockerLinuxContainerRuntime dockerLinuxContainerRuntime; + private RuncContainerRuntime runcContainerRuntime; private JavaSandboxLinuxContainerRuntime javaSandboxLinuxContainerRuntime; private Set allowedRuntimes = new HashSet<>(); private List pluggableRuntimes = new ArrayList<>(); @@ -92,6 +95,12 @@ public void initialize(Configuration conf, Context nmContext) PrivilegedOperationExecutor.getInstance(conf)); dockerLinuxContainerRuntime.initialize(conf, nmContext); } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.FSIMAGE.name())) { + runcContainerRuntime = new RuncContainerRuntime( + PrivilegedOperationExecutor.getInstance(conf)); + runcContainerRuntime.initialize(conf, nmContext); + } if (isRuntimeAllowed( LinuxContainerRuntimeConstants.RuntimeType.DEFAULT.name())) { defaultLinuxContainerRuntime = new DefaultLinuxContainerRuntime( @@ -116,6 +125,9 @@ LinuxContainerRuntime pickContainerRuntime( } else if (dockerLinuxContainerRuntime != null && dockerLinuxContainerRuntime.isRuntimeRequested(environment)) { runtime = dockerLinuxContainerRuntime; + } else if (runcContainerRuntime != null && + runcContainerRuntime.isRuntimeRequested(environment)) { + runtime = runcContainerRuntime; } else { LinuxContainerRuntime pluggableRuntime = pickPluggableRuntime( environment); @@ -244,4 +256,58 @@ public IOStreamPair execContainer(ContainerExecContext ctx) LinuxContainerRuntime runtime = pickContainerRuntime(container); return runtime.execContainer(ctx); } + + + @Override + public Map getLocalResources(Container container) throws IOException { + try { + LinuxContainerRuntime runtime = pickContainerRuntime(container); + return runtime.getLocalResources(container); + } catch (ContainerExecutionException e) { + throw new IOException(e); + } + } + + @Override + public void start() { + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.JAVASANDBOX.name())) { + javaSandboxLinuxContainerRuntime.start(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.DOCKER.name())) { + dockerLinuxContainerRuntime.start(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.FSIMAGE.name())) { + runcContainerRuntime.start(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.DEFAULT.name())) { + defaultLinuxContainerRuntime.start(); + } + + } + + @Override + public void stop() { + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.JAVASANDBOX.name())) { + javaSandboxLinuxContainerRuntime.stop(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.DOCKER.name())) { + dockerLinuxContainerRuntime.stop(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.FSIMAGE.name())) { + runcContainerRuntime.stop(); + } + if (isRuntimeAllowed( + LinuxContainerRuntimeConstants.RuntimeType.DEFAULT.name())) { + defaultLinuxContainerRuntime.stop(); + } + + } + } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/HdfsManifestToResourcesPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/HdfsManifestToResourcesPlugin.java new file mode 100644 index 00000000000..6a128c4b3a7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/HdfsManifestToResourcesPlugin.java @@ -0,0 +1,211 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.api.records.URL; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.OCI_CONTAINER_RUNTIME_PREFIX; + +public class HdfsManifestToResourcesPlugin extends AbstractService implements + OCIManifestToResourcesPlugin { + private Configuration conf; + private String layersDir; + private String configDir; + private FileSystem fs; + private LoadingCache statCache; + + private static String HDFS_MANIFEST_TO_RESOURCES_PLUGIN_PREFIX = + OCI_CONTAINER_RUNTIME_PREFIX + "hdfs-manifest-to-resources-plugin."; + + /** + * The timeout value in seconds for the values in the stat cache + */ + private static String NM_OCI_STAT_CACHE_TIMEOUT = + HDFS_MANIFEST_TO_RESOURCES_PLUGIN_PREFIX + "stat-cache-timeout-interval-secs"; + + private static int DEFAULT_NM_OCI_STAT_CACHE_TIMEOUT = 60 * 60; + + /** + * The size of the stat cache which stores stats of the layers and config + */ + private static String NM_OCI_STAT_CACHE_SIZE = + HDFS_MANIFEST_TO_RESOURCES_PLUGIN_PREFIX + "stat-cache-size"; + + private static int DEFAULT_OCI_STAT_CACHE_SIZE = 500; + + /** + * The HDFS location under which the oci image manifests, layers, + * and configs directories exist + */ + private static String NM_OCI_IMAGE_TOPLEVEL_DIR = + OCI_CONTAINER_RUNTIME_PREFIX + "image-toplevel-dir"; + + private static String CONFIG_MEDIA_TYPE = + "application/vnd.docker.container.image.v1+json"; + + private static String LAYER_TAR_GZIP_MEDIA_TYPE = + "application/vnd.docker.image.rootfs.diff.tar.gzip"; + + private static String SHA_256 = "sha256"; + + private static String CONFIG_HASH_ALGORITHM = + SHA_256; + + private static String LAYER_HASH_ALGORITHM = + SHA_256; + + private static int SHA256_HASH_LENGTH = 64; + + private static String ALPHA_NUMERIC = "[a-zA-Z0-9]+"; + + HdfsManifestToResourcesPlugin() { + super(HdfsManifestToResourcesPlugin.class.getName()); + } + + @Override + public void serviceInit(Configuration conf) { + this.conf = conf; + this.layersDir = conf.get(NM_OCI_IMAGE_TOPLEVEL_DIR) + "/layers/"; + this.configDir = conf.get(NM_OCI_IMAGE_TOPLEVEL_DIR) + "/config/"; + CacheLoader cacheLoader = + new CacheLoader() { + @Override + public FileStatus load(@Nonnull Path path) throws Exception { + return statBlob(path); + } + }; + int statCacheSize = conf.getInt(NM_OCI_STAT_CACHE_SIZE, + DEFAULT_OCI_STAT_CACHE_SIZE); + int statCacheTimeout = conf.getInt(NM_OCI_STAT_CACHE_TIMEOUT, + DEFAULT_NM_OCI_STAT_CACHE_TIMEOUT); + this.statCache = CacheBuilder.newBuilder().maximumSize(statCacheSize) + .refreshAfterWrite(statCacheTimeout, TimeUnit.SECONDS) + .build(cacheLoader); + } + + @Override + public void serviceStart() throws IOException { + Path path = new Path(layersDir); + this.fs = path.getFileSystem(conf); + } + + @Override + public List getLayerResources(ImageManifest manifest) + throws IOException { + List localRsrc = new ArrayList<>(); + + for(ImageManifest.Blob b : manifest.getLayers()) { + String mediaType = b.getMediaType(); + if (!mediaType.equals(LAYER_TAR_GZIP_MEDIA_TYPE)) { + throw new IOException("Invalid config mediaType: " + mediaType); + } + + String[] layerDigest = b.getDigest().split(":", 2); + + String algorithm = layerDigest[0]; + if (!algorithm.equals(LAYER_HASH_ALGORITHM)) { + throw new IOException("Invalid config digest algorithm: " + algorithm); + } + + String hash = layerDigest[1]; + if (!hash.matches(ALPHA_NUMERIC) || hash.length() != SHA256_HASH_LENGTH) { + throw new IOException("Malformed layer digest: " + hash); + } + + long size = b.getSize(); + Path path = new Path(layersDir, hash + ".sqsh"); + + try { + FileStatus stat = statCache.get(path); + long timestamp = stat.getModificationTime(); + URL url = URL.fromPath(path); + + LocalResource rsrc = LocalResource.newInstance(url, + LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, + size, timestamp); + localRsrc.add(rsrc); + } catch (ExecutionException e) { + throw new IOException(e); + } + } + return localRsrc; + } + + public LocalResource getConfigResource(ImageManifest manifest) + throws IOException { + ImageManifest.Blob config = manifest.getConfig(); + + String mediaType = config.getMediaType(); + if (!mediaType.equals(CONFIG_MEDIA_TYPE)) { + throw new IOException("Invalid config mediaType: " + mediaType); + } + + String[] configDigest = config.getDigest().split(":", 2); + + String algorithm = configDigest[0]; + if (!algorithm.equals(CONFIG_HASH_ALGORITHM)) { + throw new IOException("Invalid config digest algorithm: " + algorithm); + } + + String hash = configDigest[1]; + if (!hash.matches(ALPHA_NUMERIC) || hash.length() != SHA256_HASH_LENGTH) { + throw new IOException("Malformed layer digest: " + hash); + } + + long size = config.getSize(); + Path path = new Path(configDir + hash); + LocalResource rsrc; + + try { + FileStatus stat = statCache.get(path); + long timestamp = stat.getModificationTime(); + URL url = URL.fromPath(path); + + rsrc = LocalResource.newInstance(url, + LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, + size, timestamp); + } catch (ExecutionException e) { + throw new IOException(e); + } + return rsrc; + } + + private FileStatus statBlob(Path path) throws IOException { + return fs.getFileStatus(path); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageManifest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageManifest.java new file mode 100644 index 00000000000..74c89f418a2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageManifest.java @@ -0,0 +1,124 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import java.util.ArrayList; +import java.util.Map; + +public class ImageManifest { + final private int schemaVersion; + final private String mediaType; + final private Blob config; + final private ArrayList layers; + final private Map annotations; + + public ImageManifest() { + this(0, null, null, null, null); + } + + public ImageManifest(int schemaVersion, String mediaType, Blob config, + ArrayList layers, Map annotations) { + this.schemaVersion = schemaVersion; + this.mediaType = mediaType; + this.config = config; + this.layers = layers; + this.annotations = annotations; + } + + public int getSchemaVersion() { + return schemaVersion; + } + + public String getMediaType() { + return mediaType; + } + + public Blob getConfig() { + return config; + } + + public ArrayList getLayers() { + return layers; + } + + public Map getAnnotations() { + return annotations; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("schemaVersion: " + schemaVersion + "\n"); + sb.append("mediaType: " + mediaType + "\n"); + sb.append(config.toString()); + for(Blob b : layers) { + sb.append(b.toString()); + } + return sb.toString(); + } + + static class Blob { + final private String mediaType; + final private String digest; + final private long size; + final private ArrayList urls; + final private Map annotations; + + public Blob() { + this(null, null, 0, null, null); + } + + public Blob(String mediaType, String digest, long size, + ArrayList urls, Map annotations) { + this.mediaType = mediaType; + this.digest = digest; + this.size = size; + this.urls = urls; + this.annotations = annotations; + } + + public String getMediaType() { + return mediaType; + } + + public String getDigest() { + return digest; + } + + public long getSize() { + return size; + } + + public ArrayList getUrls() { + return urls; + } + + public Map getAnnotations() { + return annotations; + } + + @Override + public String toString() { + return "mediaType: " + mediaType + "\n" + "size: " + size + "\n" + + "digest: " + digest + "\n"; + } + } +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageTagToManifestPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageTagToManifestPlugin.java new file mode 100644 index 00000000000..ac7d8fe57b8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/ImageTagToManifestPlugin.java @@ -0,0 +1,353 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.commons.io.IOUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.codehaus.jackson.map.ObjectMapper; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.OCI_CONTAINER_RUNTIME_PREFIX; + +public class ImageTagToManifestPlugin extends AbstractService + implements OCIImageTagToManifestPlugin { + + private Map manifestCache; + private ObjectMapper objMapper; + private AtomicReference> localImageToHashCache = + new AtomicReference<>(new HashMap<>()); + private AtomicReference> hdfsImageToHashCache = + new AtomicReference<>(new HashMap<>()); + private Configuration conf; + private ScheduledExecutorService exec; + private long hdfsModTime; + private long localModTime; + private String hdfsImageToHashFile; + private String manifestDir; + private String localImageTagToHashFile; + + private static final Log LOG = LogFactory.getLog( + ImageTagToManifestPlugin.class); + + private static String IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX = + OCI_CONTAINER_RUNTIME_PREFIX + "image-tag-to-manifest-plugin."; + + /** + * The HDFS location where the oci image tag to hash file exists + */ + private static String NM_HDFS_OCI_IMAGE_TAG_TO_HASH_FILE = + IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "hdfs-hash-file"; + + /** + * The local file system location where the oci image tag to hash file exists + */ + private static String NM_LOCAL_OCI_IMAGE_TAG_TO_HASH_FILE = + IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "local-hash-file"; + + /** + * The interval in seconds between refreshing the hdfs image Tag to + * hash cache. + */ + private static String NM_OCI_CACHE_REFRESH_INTERVAL = + IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "cache-refresh-interval-secs"; + + private static int DEFAULT_NM_OCI_CACHE_REFRESH_INTERVAL = 60; + + /** + * The interval in seconds between refreshing the hdfs image Tag to + * hash cache. + */ + private static String NM_OCI_NUM_MANIFESTS_TO_CACHE = + IMAGE_TAG_TO_MANIFEST_PLUGIN_PREFIX + "num-manifests-to-cache"; + + private static int DEFAULT_NUM_MANIFESTS_TO_CACHE = 10; + + /** + * The HDFS location under which the oci image manifests, layers, + * and configs directories exist + */ + private static String NM_OCI_IMAGE_TOPLEVEL_DIR = + OCI_CONTAINER_RUNTIME_PREFIX + "image-toplevel-dir"; + + private static int SHA256_HASH_LENGTH = 64; + + + private static String ALPHA_NUMERIC = "[a-zA-Z0-9]+"; + + ImageTagToManifestPlugin() { + super("ImageTagToManifestPluginService"); + } + + @Override + public ImageManifest getManifestFromImageTag(String imageTag) + throws IOException { + String hash = getHashFromImageTag(imageTag); + ImageManifest manifest = manifestCache.get(hash); + if (manifest != null) { + return manifest; + } + + Path manifestPath = new Path(manifestDir + hash); + FileSystem fs = manifestPath.getFileSystem(conf); + FSDataInputStream input; + try { + input = fs.open(manifestPath); + } catch (IllegalArgumentException iae) { + throw new IOException("Manifest file is not a valid HDFS file: " + + manifestPath.toString(), iae); + } + + byte[] bytes = IOUtils.toByteArray(input); + manifest = objMapper.readValue(bytes, ImageManifest.class); + + manifestCache.put(hash, manifest); + return manifest; + } + + @Override + public String getHashFromImageTag(String imageTag) { + String hash; + Map localImageToHashCacheMap = localImageToHashCache.get(); + Map hdfsImageToHashCacheMap = hdfsImageToHashCache.get(); + + // 1) Go to local file + // 2) Go to HDFS + // 3) Use tag as is/Assume tag is the hash + if ((hash = localImageToHashCacheMap.get(imageTag)) != null) { + return hash; + } else if ((hash = hdfsImageToHashCacheMap.get(imageTag)) != null) { + return hash; + } else { + return imageTag; + } + } + + private BufferedReader getLocalImageToHashReader() throws IOException { + if (localImageTagToHashFile == null) { + LOG.debug("Did not load local image to hash file, " + + "file is null"); + return null; + } + + File imageTagToHashFile = new File(localImageTagToHashFile); + if(!imageTagToHashFile.exists()) { + LOG.debug("Did not load local image to hash file, " + + "file doesn't exist"); + return null; + } + + long newLocalModTime = imageTagToHashFile.lastModified(); + if (newLocalModTime == localModTime) { + LOG.debug("Did not load local image to hash file, " + + "file is unmodified"); + return null; + } + localModTime = newLocalModTime; + + return new BufferedReader(new FileReader(imageTagToHashFile)); + } + + private BufferedReader getHdfsImageToHashReader() throws IOException { + if (hdfsImageToHashFile == null) { + LOG.debug("Did not load hdfs image to hash file, " + + "file is null"); + return null; + } + + Path imageToHash = new Path(hdfsImageToHashFile); + FileSystem fs = imageToHash.getFileSystem(conf); + if (!fs.exists(imageToHash)) { + LOG.debug("Did not load hdfs image to hash file, " + + "file doesn't exist"); + return null; + } + + long newHdfsModTime = fs.getFileStatus(imageToHash).getModificationTime(); + if (newHdfsModTime == hdfsModTime) { + LOG.debug("Did not load hdfs image to hash file, " + + "file is unmodified"); + return null; + } + hdfsModTime = newHdfsModTime; + + return new BufferedReader(new InputStreamReader(fs.open(imageToHash))); + } + + // You may specify multiple tags per hash all on the same line. + // Comments are allowed using #. Anything after this character will not + // be read + // Example file: + // foo/bar:current,fizz/gig:latest:123456789 + // #this/line:wont,be:parsed:2378590895 + // + // This will map both foo/bar:current and fizz/gig:latest to 123456789 + private static Map readImageToHashFile( + BufferedReader br) throws IOException { + if (br == null) { + return null; + } + + String line; + Map imageToHashCache = new HashMap<>(); + while ((line = br.readLine()) != null) { + int index; + index = line.indexOf("#"); + if (index == 0) { + continue; + } else if (index != -1) { + line = line.substring(0, index); + } + + index = line.lastIndexOf(":"); + if (index == -1) { + LOG.warn("Malformed imageTagToManifest entry: " + line); + continue; + } + String imageTags = line.substring(0, index); + String[] imageTagArray = imageTags.split(","); + String hash = line.substring(index + 1); + if (!hash.matches(ALPHA_NUMERIC) || hash.length() != SHA256_HASH_LENGTH) { + LOG.warn("Malformed image hash: " + hash); + continue; + } + + for (String imageTag : imageTagArray) { + imageToHashCache.put(imageTag, hash); + } + } + return imageToHashCache; + } + + private boolean loadImageToHashFiles() throws IOException { + boolean ret = false; + try ( + BufferedReader localBr = getLocalImageToHashReader(); + BufferedReader hdfsBr = getHdfsImageToHashReader(); + ) { + Map localImageToHash = readImageToHashFile(localBr); + Map hdfsImageToHash = readImageToHashFile(hdfsBr); + + Map tmpLocalImageToHash = localImageToHashCache.get(); + Map tmpHdfsImageToHash = hdfsImageToHashCache.get(); + + if (localImageToHash != null && !localImageToHash.equals(tmpLocalImageToHash)) { + localImageToHashCache.set(localImageToHash); + LOG.info("Reloaded local image tag to hash cache"); + ret = true; + } + if (hdfsImageToHash != null && !hdfsImageToHash.equals(tmpHdfsImageToHash)) { + hdfsImageToHashCache.set(hdfsImageToHash); + LOG.info("Reloaded hdfs image tag to hash cache"); + ret = true; + } + } + return ret; + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + super.serviceInit(conf); + localImageTagToHashFile = conf.get(NM_LOCAL_OCI_IMAGE_TAG_TO_HASH_FILE); + if (localImageTagToHashFile == null) { + LOG.debug("Failed to load local oci image to hash file. " + + "Config not set"); + } + hdfsImageToHashFile = conf.get(NM_HDFS_OCI_IMAGE_TAG_TO_HASH_FILE); + if (hdfsImageToHashFile == null) { + LOG.debug("Failed to load HDFS oci image to hash file. Config not set"); + } + if(hdfsImageToHashFile == null && localImageTagToHashFile == null) { + throw new ContainerExecutionException("No valid image-tag-to-hash files"); + } + manifestDir = conf.get(NM_OCI_IMAGE_TOPLEVEL_DIR) + "/manifests/"; + int numManifestsToCache = conf.getInt(NM_OCI_NUM_MANIFESTS_TO_CACHE, + DEFAULT_NUM_MANIFESTS_TO_CACHE); + this.objMapper = new ObjectMapper(); + this.manifestCache = Collections.synchronizedMap( + new LRUCache(numManifestsToCache, 0.75f)); + this.conf = conf; + + exec = HadoopExecutors.newScheduledThreadPool(1); + } + + @Override + protected void serviceStart() throws Exception { + super.serviceStart(); + if(!loadImageToHashFiles()) { + throw new RuntimeException("Couldn't load any image-tag-to-hash-files"); + } + int ociCacheRefreshInterval = conf.getInt(NM_OCI_CACHE_REFRESH_INTERVAL, + DEFAULT_NM_OCI_CACHE_REFRESH_INTERVAL); + exec = HadoopExecutors.newScheduledThreadPool(1); + ScheduledFuture refreshHdfsCacheThread = exec.scheduleWithFixedDelay( + new Runnable() { + @Override + public void run() { + try { + loadImageToHashFiles(); + } catch (Exception e) { + LOG.warn("OCI cache refresh thread caught an exception: ", e); + } + } + }, ociCacheRefreshInterval, ociCacheRefreshInterval, TimeUnit.SECONDS); + } + + @Override + protected void serviceStop() throws Exception { + super.serviceStop(); + exec.shutdownNow(); + } + + private class LRUCache extends LinkedHashMap { + private int cacheSize; + + LRUCache(int initialCapacity, float loadFactor) { + super(initialCapacity, loadFactor, true); + this.cacheSize = initialCapacity; + } + + @Override + protected boolean removeEldestEntry( + Map.Entry eldest) { + return this.size() > cacheSize; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java index 7381f7a2e2a..e7718c9ad64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntime.java @@ -1,5 +1,4 @@ /* - * * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +14,6 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * / */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; @@ -23,10 +21,13 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntime; +import java.io.IOException; import java.util.Map; /** @@ -55,5 +56,13 @@ * @return whether this runtime is requested */ boolean isRuntimeRequested(Map env); + + default void start() {} + + default void stop() {} + + default Map getLocalResources(Container container) throws IOException { + return container.getLaunchContext().getLocalResources(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java index fc86b17ed9e..2f221ae0b57 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/LinuxContainerRuntimeConstants.java @@ -37,7 +37,8 @@ private LinuxContainerRuntimeConstants() { public enum RuntimeType { DEFAULT, DOCKER, - JAVASANDBOX; + JAVASANDBOX, + FSIMAGE; } public static final Attribute LOCALIZED_RESOURCES = Attribute diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerExecutorConfig.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerExecutorConfig.java new file mode 100644 index 00000000000..cadd561646f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIContainerExecutorConfig.java @@ -0,0 +1,1316 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.codehaus.jackson.annotate.JsonRawValue; +import org.codehaus.jackson.map.annotate.JsonSerialize; + +import java.util.List; +import java.util.Map; + +@JsonSerialize(include=JsonSerialize.Inclusion.NON_DEFAULT) +public class OCIContainerExecutorConfig { + final private String version; + final private String username; + final private String containerId; + final private String applicationId; + final private String pidFile; + final private String containerScriptPath; + final private String containerCredentialsPath; + final private List localDirs; + final private List logDirs; + final private List layers; + final private int reapLayerKeepCount; + final private OCIRuntimeConfig ociRuntimeConfig; + + public String getVersion() { + return version; + } + + public String getUsername() { + return username; + } + + public String getContainerId() { + return containerId; + } + + public String getApplicationId() { + return applicationId; + } + + public String getPidFile() { + return pidFile; + } + + public String getContainerScriptPath() { + return containerScriptPath; + } + + public String getContainerCredentialsPath() { + return containerCredentialsPath; + } + + public List getLocalDirs() { + return localDirs; + } + + public List getLogDirs() { + return logDirs; + } + + public List getLayers() { + return layers; + } + + public int getReapLayerKeepCount() { + return reapLayerKeepCount; + } + + public OCIRuntimeConfig getOciRuntimeConfig() { + return ociRuntimeConfig; + } + + public OCIContainerExecutorConfig() { + this(null, null, null, null, null, null, null, null, null, null, 0, null); + } + + public OCIContainerExecutorConfig(String username, + String containerId, String applicationId, + String pidFile, String containerScriptPath, String containerCredentialsPath, + List localDirs, + List logDirs, List layers, int reapLayerKeepCount, + OCIRuntimeConfig ociRuntimeConfig) { + this("0.1", username, containerId, applicationId, pidFile, + containerScriptPath, containerCredentialsPath, localDirs, logDirs, + layers, reapLayerKeepCount, ociRuntimeConfig); + } + + public OCIContainerExecutorConfig(String version, String username, + String containerId, String applicationId, + String pidFile, String containerScriptPath, String containerCredentialsPath, + List localDirs, + List logDirs, List layers, int reapLayerKeepCount, + OCIRuntimeConfig ociRuntimeConfig) { + this.version = version; + this.username = username; + this.containerId = containerId; + this.applicationId = applicationId; + this.pidFile = pidFile; + this.containerScriptPath = containerScriptPath; + this.containerCredentialsPath = containerCredentialsPath; + this.localDirs = localDirs; + this.logDirs = logDirs; + this.layers = layers; + this.reapLayerKeepCount = reapLayerKeepCount; + this.ociRuntimeConfig = ociRuntimeConfig; + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + public static class OCILayer { + final private String mediaType; + final private String path; + + public String getMediaType() { + return mediaType; + } + + public String getPath() { + return path; + } + + public OCILayer(String mediaType, String path) { + this.mediaType = mediaType; + this.path = path; + } + + public OCILayer() { + this(null, null); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + public static class OCIRuntimeConfig { + final private OCIRootConfig root; + final private List mounts; + final private OCIProcessConfig process; + final private OCIHooksConfig hooks; + final private OCIAnnotationsConfig annotations; + final private OCILinuxConfig linux; + + public OCIRootConfig getRoot() { + return root; + } + + public List getMounts() { + return mounts; + } + + public OCIProcessConfig getProcess() { + return process; + } + + public String getHostname() { + return hostname; + } + + public OCIHooksConfig getHooks() { + return hooks; + } + + public OCIAnnotationsConfig getAnnotations() { + return annotations; + } + + public OCILinuxConfig getLinux() { + return linux; + } + + final private String hostname; + + public OCIRuntimeConfig() { + this(null, null, null, null, null, null, null); + } + + + public OCIRuntimeConfig(OCIRootConfig root, List mounts, + OCIProcessConfig process, String hostname, OCIHooksConfig hooks, OCIAnnotationsConfig annotations, + OCILinuxConfig linux) { + this.root = root; + this.mounts = mounts; + this.process = process; + this.hostname = hostname; + this.hooks = hooks; + this.annotations = annotations; + this.linux= linux; + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class OCIRootConfig { + public String getPath() { + return path; + } + + public boolean isReadonly() { + return readonly; + } + + final private String path; + final private boolean readonly; + + public OCIRootConfig(String path, boolean readonly) { + this.path = path; + this.readonly = readonly; + } + + public OCIRootConfig() { + this(null, false); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class OCIMount { + final private String destination; + final private String type; + final private String source; + final private List options; + + public String getDestination() { + return destination; + } + + public String getType() { + return type; + } + + public String getSource() { + return source; + } + + public List getOptions() { + return options; + } + + public OCIMount(String destination, String type, String source, List options) { + this.destination = destination; + this.type = type; + this.source = source; + this.options = options; + } + + public OCIMount(String destination, String source, List options) { + this.destination = destination; + this.type = null; + this.source = source; + this.options = options; + } + + public OCIMount() { + this(null, null, null, null); + } + } + + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class OCIProcessConfig { + final private boolean terminal; + final private ConsoleSize consoleSize; + final private String cwd; + final private List env; + final private List args; + final private RLimits rlimits; + final private String apparmorProfile; + final private Capabilities capabilities; + final private boolean noNewPrivileges; + final private int oomScoreAdj; + final private String selinuxLabel; + final private User user; + + public boolean isTerminal() { + return terminal; + } + + public ConsoleSize getConsoleSize() { + return consoleSize; + } + + public String getCwd() { + return cwd; + } + + public List getEnv() { + return env; + } + + public List getArgs() { + return args; + } + + public RLimits getRlimits() { + return rlimits; + } + + public String getApparmorProfile() { + return apparmorProfile; + } + + public Capabilities getCapabilities() { + return capabilities; + } + + public boolean isNoNewPrivileges() { + return noNewPrivileges; + } + + public int getOomScoreAdj() { + return oomScoreAdj; + } + + public String getSelinuxLabel() { + return selinuxLabel; + } + + public User getUser() { + return user; + } + + + public OCIProcessConfig(boolean terminal, ConsoleSize consoleSize, String cwd, + List env, List args, RLimits rlimits, + String apparmorProfile, Capabilities capabilities, boolean noNewPrivileges, + int oomScoreAdj, String selinuxLabel, User user) { + this.terminal = terminal; + this.consoleSize = consoleSize; + this.cwd = cwd; + this.env = env; + this.args = args; + this.rlimits = rlimits; + this.apparmorProfile = apparmorProfile; + this.capabilities = capabilities; + this.noNewPrivileges = noNewPrivileges; + this.oomScoreAdj = oomScoreAdj; + this.selinuxLabel = selinuxLabel; + this.user = user; + } + + public OCIProcessConfig() { + this(false, null, null, null, null, null, null, null, false, 0, null, null); + } + + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class ConsoleSize { + public int getHeight() { + return height; + } + + public int getWidth() { + return width; + } + + final private int height; + + public ConsoleSize(int height, int width) { + this.height = height; + this.width = width; + } + + public ConsoleSize() { + this(0, 0); + } + + final private int width; + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class RLimits { + public String getType() { + return type; + } + + public long getSoft() { + return soft; + } + + public long getHard() { + return hard; + } + + final private String type; + + public RLimits(String type, long soft, long hard) { + this.type = type; + this.soft = soft; + this.hard = hard; + } + + public RLimits() { + this(null, 0, 0); + } + + final private long soft; + final private long hard; + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Capabilities { + final private List effective; + final private List bounding; + final private List inheritable; + final private List permitted; + final private List ambient; + + public List getEffective() { + return effective; + } + + public List getBounding() { + return bounding; + } + + public List getInheritable() { + return inheritable; + } + + public List getPermitted() { + return permitted; + } + + public List getAmbient() { + return ambient; + } + + + public Capabilities(List effective, List bounding, + List inheritable, List permitted, + List ambient) { + this.effective = effective; + this.bounding = bounding; + this.inheritable = inheritable; + this.permitted = permitted; + this.ambient = ambient; + } + + public Capabilities() { + this(null, null, null, null, null); + } + + } + + static class User { + final private int uid; + final private int gid; + final private int[] additionalGids; + + public User(int uid, int gid, int[] additionalGids) { + this.uid = uid; + this.gid = gid; + this.additionalGids = additionalGids; + } + + public User() { + this(0, 0, null); + } + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class OCIHooksConfig { + final private List prestart; + final private List poststart; + final private List poststop; + + public List getPrestart() { + return prestart; + } + + public List getPoststart() { + return poststart; + } + + public List getPoststop() { + return poststop; + } + + public OCIHooksConfig(List prestart, List poststart, List poststop) { + this.prestart = prestart; + this.poststart = poststart; + this.poststop = poststop; + } + + public OCIHooksConfig() { + this(null, null, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class HookType { + final private String path; + final private List args; + final private List env; + final private int timeout; + + public String getPath() { + return path; + } + + public List getArgs() { + return args; + } + + public List getEnv() { + return env; + } + + public int getTimeout() { + return timeout; + } + + public HookType(String path, List args, List env, int timeout) { + this.path = path; + this.args = args; + this.env = env; + this.timeout = timeout; + } + + public HookType() { + this(null, null, null, 0); + } + + } + } + + static class OCIAnnotationsConfig { + Map annotations; + + public OCIAnnotationsConfig(Map annotations) { + this.annotations = annotations; + } + public OCIAnnotationsConfig() { + this(null); + } + + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class OCILinuxConfig { + final private List namespaces; + final private List uidMappings; + final private List gidMappings; + final private List devices; + final private String cgroupsPath; + final private Resources resources; + final private IntelRdt intelRdt; + final private Sysctl sysctl; + @JsonRawValue + final private String seccomp; + final private String rootfsPropagation; + final private List maskedPaths; + final private List readonlyPaths; + final private String mountLabel; + + public List getNamespaces() { + return namespaces; + } + + public List getUidMappings() { + return uidMappings; + } + + public List getGidMappings() { + return gidMappings; + } + + public List getDevices() { + return devices; + } + + public String getCgroupsPath() { + return cgroupsPath; + } + + public Resources getResources() { + return resources; + } + + public IntelRdt getIntelRdt() { + return intelRdt; + } + + public Sysctl getSysctl() { + return sysctl; + } + + public String getSeccomp() { + return seccomp; + } + + public String getRootfsPropagation() { + return rootfsPropagation; + } + + public List getMaskedPaths() { + return maskedPaths; + } + + public List getReadonlyPaths() { + return readonlyPaths; + } + + public String getMountLabel() { + return mountLabel; + } + + public OCILinuxConfig(List namespaces, List uidMappings, + List gidMappings, List devices, + String cgroupsPath, Resources resources, IntelRdt intelRdt, + Sysctl sysctl, String seccomp, String rootfsPropagation, + List maskedPaths, List readonlyPaths, + String mountLabel) { + this.namespaces = namespaces; + this.uidMappings = uidMappings; + this.gidMappings = gidMappings; + this.devices = devices; + this.cgroupsPath = cgroupsPath; + this.resources = resources; + this.intelRdt = intelRdt; + this.sysctl = sysctl; + this.seccomp = seccomp; + this.rootfsPropagation = rootfsPropagation; + this.maskedPaths = maskedPaths; + this.readonlyPaths = readonlyPaths; + this.mountLabel = mountLabel; + } + + public OCILinuxConfig() { + this(null, null, null, null, null, null, null, null, null, null, null, null, null); + } + + static class Namespace { + final private String type; + final private String path; + + public Namespace(String type, String path) { + this.type = type; + this.path = path; + } + + public Namespace() { + this(null, null); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class IDMapping { + final private int containerID; + final private int hostID; + final private int size; + + public int getContainerID() { + return containerID; + } + + public int getHostID() { + return hostID; + } + + public int getSize() { + return size; + } + + public IDMapping(int containerID, int hostID, int size) { + this.containerID = containerID; + this.hostID = hostID; + this.size = size; + } + public IDMapping() { + this(0, 0, 0); + } + + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Device { + final private String type; + final private String path; + final private long major; + final private long minor; + final private int fileMode; + final private int uid; + final private int gid; + + public String getType() { + return type; + } + + public String getPath() { + return path; + } + + public long getMajor() { + return major; + } + + public long getMinor() { + return minor; + } + + public int getFileMode() { + return fileMode; + } + + public int getUid() { + return uid; + } + + public int getGid() { + return gid; + } + + public Device(String type, String path, long major, long minor, + int fileMode, int uid, int gid) { + this.type = type; + this.path = path; + this.major = major; + this.minor = minor; + this.fileMode = fileMode; + this.uid = uid; + this.gid = gid; + } + + public Device() { + this(null, null, 0, 0, 0, 0, 0); + } + + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Resources { + final private List device; + final private Memory memory; + final private CPU cpu; + final private BlockIO blockIO; + final private List hugePageLimits; + final private Network network; + final private PID pid; + final private RDMA rdma; + + public List getDevice() { + return device; + } + + public Memory getMemory() { + return memory; + } + + public CPU getCPU() { + return cpu; + } + + public BlockIO getBlockIO() { + return blockIO; + } + + public List getHugePageLimits() { + return hugePageLimits; + } + + public Network getNetwork() { + return network; + } + + public PID getPID() { + return pid; + } + + public RDMA getRDMA() { + return rdma; + } + + public Resources(List device, + Memory memory, CPU cpu, + BlockIO blockIO, List hugePageLimits, + Network network, PID pid, + RDMA rdma) { + this.device = device; + this.memory = memory; + this.cpu = cpu; + this.blockIO = blockIO; + this.hugePageLimits = hugePageLimits; + this.network = network; + this.pid = pid; + this.rdma = rdma; + } + + public Resources() { + this(null, null, null, null, null, null, null, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Device { + final private boolean allow; + final private String type; + final private long major; + final private long minor; + final private String access; + + public boolean isAllow() { + return allow; + } + + public String getType() { + return type; + } + + public long getMajor() { + return major; + } + + public long getMinor() { + return minor; + } + + public String getAccess() { + return access; + } + + public Device(boolean allow, String type, long major, long minor, String access) { + this.allow = allow; + this.type = type; + this.major = major; + this.minor = minor; + this.access = access; + } + + public Device() { + this(false, null, 0, 0, null); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Memory { + final private long limit; + final private long reservation; + final private long swap; + final private long kernel; + final private long kernelTCP; + final private long swappiness; + final private boolean disableOOMKiller; + + public long getLimit() { + return limit; + } + + public long getReservation() { + return reservation; + } + + public long getSwap() { + return swap; + } + + public long getKernel() { + return kernel; + } + + public long getKernelTCP() { + return kernelTCP; + } + + public long getSwappiness() { + return swappiness; + } + + public boolean isDisableOOMKiller() { + return disableOOMKiller; + } + + public Memory(long limit, long reservation, long swap, + long kernel, long kernelTCP, long swappiness, + boolean disableOOMKiller) { + this.limit = limit; + this.reservation = reservation; + this.swap = swap; + this.kernel = kernel; + this.kernelTCP = kernelTCP; + this.swappiness = swappiness; + this.disableOOMKiller = disableOOMKiller; + } + + public Memory() { + this(0, 0, 0, 0, 0, 0, false); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class CPU { + final private long quota; + final private long period; + final private long realtimeRuntime; + final private long realtimePeriod; + final private String cpus; + final private String mems; + + public long getShares() { + return shares; + } + + public long getQuota() { + return quota; + } + + public long getPeriod() { + return period; + } + + public long getRealtimeRuntime() { + return realtimeRuntime; + } + + public long getRealtimePeriod() { + return realtimePeriod; + } + + public String getCpus() { + return cpus; + } + + public String getMems() { + return mems; + } + + final private long shares; + + public CPU(long shares, long quota, long period, + long realtimeRuntime, long realtimePeriod, + String cpus, String mems) { + this.shares = shares; + this.quota = quota; + this.period = period; + this.realtimeRuntime = realtimeRuntime; + this.realtimePeriod = realtimePeriod; + this.cpus = cpus; + this.mems = mems; + } + + public CPU() { + this(0, 0, 0, 0, 0, null, null); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class BlockIO { + final private int weight; + final private int leafWeight; + final private List weightDevices; + final private List throttleReadBpsDevice; + final private List throttleWriteBpsDevice; + final private List throttleReadIOPSDevice; + final private List throttleWriteIOPSDevice; + + public int getWeight() { + return weight; + } + + public int getLeafWeight() { + return leafWeight; + } + + public List getWeightDevices() { + return weightDevices; + } + + public List getThrottleReadBpsDevice() { + return throttleReadBpsDevice; + } + + public List getThrottleWriteBpsDevice() { + return throttleWriteBpsDevice; + } + + public List getThrottleReadIOPSDevice() { + return throttleReadIOPSDevice; + } + + public List getThrottleWriteIOPSDevice() { + return throttleWriteIOPSDevice; + } + + public BlockIO(int weight, int leafWeight, List weightDevices, + List throttleReadBpsDevice, + List throttleWriteBpsDevice, + List throttleReadIOPSDevice, + List throttleWriteIOPSDevice) { + this.weight = weight; + this.leafWeight = leafWeight; + this.weightDevices = weightDevices; + this.throttleReadBpsDevice = throttleReadBpsDevice; + this.throttleWriteBpsDevice = throttleWriteBpsDevice; + this.throttleReadIOPSDevice = throttleReadIOPSDevice; + this.throttleWriteIOPSDevice = throttleWriteIOPSDevice; + } + + public BlockIO() { + this(0, 0, null, null, null, null, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class WeightDevice { + final private long major; + final private long minor; + final private int weight; + final private int leafWeight; + + public long getMajor() { + return major; + } + + public long getMinor() { + return minor; + } + + public int getWeight() { + return weight; + } + + public int getLeafWeight() { + return leafWeight; + } + + public WeightDevice(long major, long minor, int weight, int leafWeight) { + this.major = major; + this.minor = minor; + this.weight = weight; + this.leafWeight = leafWeight; + } + + public WeightDevice() { + this(0, 0, 0, 0); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class ThrottleDevice { + final private long major; + final private long minor; + final private long rate; + + public long getMajor() { + return major; + } + + public long getMinor() { + return minor; + } + + public long getRate() { + return rate; + } + + public ThrottleDevice(long major, long minor, long rate) { + this.major = major; + this.minor = minor; + this.rate = rate; + } + + public ThrottleDevice() { + this(0, 0, 0); + } + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class HugePageLimits { + final private String pageSize; + final private long limit; + + public String getPageSize() { + return pageSize; + } + + public long getLimit() { + return limit; + } + + public HugePageLimits(String pageSize, long limit) { + this.pageSize = pageSize; + this.limit = limit; + } + + public HugePageLimits() { + this(null, 0); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Network { + final private int classID; + final private List priorities; + + public int getClassID() { + return classID; + } + + public List getPriorities() { + return priorities; + } + + public Network(int classID, List priorities) { + this.classID = classID; + this.priorities = priorities; + } + + public Network() { + this(0, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class NetworkPriority { + final private String name; + final private int priority; + + public String getName() { + return name; + } + + public int getPriority() { + return priority; + } + + public NetworkPriority(String name, int priority) { + this.name = name; + this.priority = priority; + } + + public NetworkPriority() { + this(null, 0); + } + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class PID { + final private long limit; + + public long getLimit() { + return limit; + } + + public PID(long limit) { + this.limit = limit; + } + + public PID() { + this(0); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class RDMA { + final private int hcaHandles; + final private int hcaObjects; + + public int getHcaHandles() { + return hcaHandles; + } + + public int getHcaObjects() { + return hcaObjects; + } + + public RDMA(int hcaHandles, int hcaObjects) { + this.hcaHandles = hcaHandles; + this.hcaObjects = hcaObjects; + } + + public RDMA() { + this(0, 0); + } + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class IntelRdt { + final private String closID; + final private String l3CacheSchema; + final private String memBwSchema; + + public String getClosID() { + return closID; + } + + public String getL3CacheSchema() { + return l3CacheSchema; + } + + public String getMemBwSchema() { + return memBwSchema; + } + + public IntelRdt(String closID, String l3CacheSchema, String memBwSchema) { + this.closID = closID; + this.l3CacheSchema = l3CacheSchema; + this.memBwSchema = memBwSchema; + } + + public IntelRdt() { + this(null, null, null); + } + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Sysctl { + // for kernel params + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Seccomp { + final private String defaultAction; + final private List architectures; + final private List syscalls; + + public String getDefaultAction() { + return defaultAction; + } + + public List getArchitectures() { + return architectures; + } + + public List getSyscalls() { + return syscalls; + } + + public Seccomp(String defaultAction, List architectures, List syscalls) { + this.defaultAction = defaultAction; + this.architectures = architectures; + this.syscalls = syscalls; + } + + public Seccomp() { + this(null, null, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class Syscall { + final private List names; + final private String action; + final private List args; + + public List getNames() { + return names; + } + + public String getAction() { + return action; + } + + public List getArgs() { + return args; + } + + public Syscall(List names, String action, List args) { + this.names = names; + this.action = action; + this.args = args; + } + + public Syscall() { + this(null, null, null); + } + + @JsonSerialize(include = JsonSerialize.Inclusion.NON_DEFAULT) + static class SeccompArg { + final private int index; + final private long value; + final private long valueTwo; + final private String op; + + public int getIndex() { + return index; + } + + public long getValue() { + return value; + } + + public long getValueTwo() { + return valueTwo; + } + + public String getOp() { + return op; + } + + public SeccompArg(int index, long value, long valueTwo, String op) { + this.index = index; + this.value = value; + this.valueTwo = valueTwo; + this.op = op; + } + + public SeccompArg() { + this(0, 0, 0, null); + } + } + } + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIImageTagToManifestPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIImageTagToManifestPlugin.java new file mode 100644 index 00000000000..a9385574378 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIImageTagToManifestPlugin.java @@ -0,0 +1,32 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.service.Service; + +import java.io.IOException; + +@InterfaceStability.Unstable +public interface OCIImageTagToManifestPlugin extends Service { + ImageManifest getManifestFromImageTag(String imageTag) throws IOException; + + String getHashFromImageTag(String imageTag); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIManifestToResourcesPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIManifestToResourcesPlugin.java new file mode 100644 index 00000000000..5e2bafe56cf --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/OCIManifestToResourcesPlugin.java @@ -0,0 +1,35 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.service.Service; +import org.apache.hadoop.yarn.api.records.LocalResource; + +import java.io.IOException; +import java.util.List; + +@InterfaceStability.Unstable +public interface OCIManifestToResourcesPlugin extends Service { + //The layers should be returned in the order in which they appear in the manifest + List getLayerResources(ImageManifest manifest) throws IOException; + + LocalResource getConfigResource(ImageManifest manifest) throws IOException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java new file mode 100644 index 00000000000..d0be0ab6242 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java @@ -0,0 +1,795 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; +import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCIRuntimeConfig; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCILayer; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCIRuntimeConfig.OCILinuxConfig; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCIRuntimeConfig.OCIMount; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerExecutorConfig.OCIRuntimeConfig.OCIProcessConfig; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.volume.csi.ContainerVolumePublisher; +import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext; +import org.codehaus.jackson.JsonNode; +import org.codehaus.jackson.map.ObjectMapper; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.OCI_CONTAINER_RUNTIME_PREFIX; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*; + +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class RuncContainerRuntime extends OCIContainerRuntime { + + private static final Log LOG = LogFactory.getLog( + RuncContainerRuntime.class); + + /** + * The oci image tag to manifest plugin class that should be used + */ + public static final String NM_OCI_IMAGE_TAG_TO_MANIFEST_PLUGIN = + OCI_CONTAINER_RUNTIME_PREFIX + "image-tag-to-manifest-plugin"; + + /** + * The oci manifest to resources plugin class that should be used + */ + public static final String NM_OCI_MANIFEST_TO_RESOURCES_PLUGIN = + OCI_CONTAINER_RUNTIME_PREFIX + "manifest-to-resources-plugin"; + + /** + * Target count of OCI layer mounts that we should keep on disk at one time. + */ + public static final String NM_OCI_LAYER_MOUNTS_TO_KEEP = + OCI_CONTAINER_RUNTIME_PREFIX + "layer-mounts-to-keep"; + + public static final int DEFAULT_NM_OCI_LAYER_MOUNTS_TO_KEEP = 100; + + /** + * The interval in seconds between executions of reaping oci layer mounts + */ + public static final String NM_REAP_OCI_LAYER_MOUNTS_INTERVAL = + OCI_CONTAINER_RUNTIME_PREFIX + "layer-mounts-interval-secs"; + + public static final int DEFAULT_NM_REAP_OCI_LAYER_MOUNTS_INTERVAL = 600; + + + @InterfaceAudience.Private + public static final String ENV_RUNC_CONTAINER_IMAGE = + "YARN_CONTAINER_RUNTIME_RUNC_IMAGE"; + @InterfaceAudience.Private + public static final String ENV_RUNC_CONTAINER_HOSTNAME = + "YARN_CONTAINER_RUNTIME_RUNC_CONTAINER_HOSTNAME"; + + @InterfaceAudience.Private + private static final String RUNTIME_TYPE = "RUNC"; + @InterfaceAudience.Private + private final static String ENV_OCI_CONTAINER_PID_NAMESPACE = + formatOciEnvKey(RUNTIME_TYPE, CONTAINER_PID_NAMESPACE_SUFFIX); + @InterfaceAudience.Private + private final static String ENV_OCI_CONTAINER_RUN_PRIVILEGED_CONTAINER = + formatOciEnvKey(RUNTIME_TYPE, RUN_PRIVILEGED_CONTAINER_SUFFIX); + + private Configuration conf; + private Context nmContext; + private PrivilegedOperationExecutor privilegedOperationExecutor; + private CGroupsHandler cGroupsHandler; + private OCIImageTagToManifestPlugin imageTagToManifestPlugin; + private OCIManifestToResourcesPlugin manifestToResourcesPlugin; + private ObjectMapper mapper; + private String seccomp; + private int layersToKeep; + private String defaultRuncImage; + private ScheduledExecutorService exec; + private String seccompProfile; + private Set defaultROMounts = new HashSet<>(); + private Set defaultRWMounts = new HashSet<>(); + private Set allowedNetworks = new HashSet<>(); + private Set allowedRuntimes = new HashSet<>(); + private AccessControlList privilegedContainersAcl; + + public RuncContainerRuntime(PrivilegedOperationExecutor + privilegedOperationExecutor) { + this(privilegedOperationExecutor, ResourceHandlerModule + .getCGroupsHandler()); + } + + //A constructor with an injected cGroupsHandler primarily used for testing. + @VisibleForTesting + public RuncContainerRuntime(PrivilegedOperationExecutor + privilegedOperationExecutor, CGroupsHandler cGroupsHandler) { + super(privilegedOperationExecutor, cGroupsHandler); + this.privilegedOperationExecutor = privilegedOperationExecutor; + + if (cGroupsHandler == null) { + LOG.info("cGroupsHandler is null - cgroups not in use."); + } else { + this.cGroupsHandler = cGroupsHandler; + } + } + + @Override + public void initialize(Configuration conf, Context nmContext) + throws ContainerExecutionException { + super.initialize(conf, nmContext); + this.conf = conf; + this.nmContext = nmContext; + imageTagToManifestPlugin = chooseImageTagToManifestPlugin(); + imageTagToManifestPlugin.init(conf); + manifestToResourcesPlugin = chooseManifestToResourcesPlugin(); + manifestToResourcesPlugin.init(conf); + mapper = new ObjectMapper(); + defaultRuncImage = conf.get(YarnConfiguration.NM_DOCKER_IMAGE_NAME); + + allowedNetworks.clear(); + allowedRuntimes.clear(); + + allowedNetworks.addAll(Arrays.asList( + conf.getTrimmedStrings( + YarnConfiguration.NM_DOCKER_ALLOWED_CONTAINER_NETWORKS, + YarnConfiguration.DEFAULT_NM_DOCKER_ALLOWED_CONTAINER_NETWORKS))); + + allowedRuntimes.addAll(Arrays.asList( + conf.getTrimmedStrings( + YarnConfiguration.NM_DOCKER_ALLOWED_CONTAINER_RUNTIMES, + YarnConfiguration.DEFAULT_NM_DOCKER_ALLOWED_CONTAINER_RUNTIMES))); + + privilegedContainersAcl = new AccessControlList(conf.getTrimmed( + YarnConfiguration.NM_DOCKER_PRIVILEGED_CONTAINERS_ACL, + YarnConfiguration.DEFAULT_NM_DOCKER_PRIVILEGED_CONTAINERS_ACL)); + + seccompProfile = conf.get(YarnConfiguration.NM_DOCKER_SECCOMP_PROFILE); + + defaultROMounts.addAll(Arrays.asList( + conf.getTrimmedStrings( + YarnConfiguration.NM_DOCKER_DEFAULT_RO_MOUNTS))); + + defaultRWMounts.addAll(Arrays.asList( + conf.getTrimmedStrings( + YarnConfiguration.NM_DOCKER_DEFAULT_RW_MOUNTS))); + + try { + //TODO Remove whitespace in seccomp that gets output to config.json + if (seccompProfile != null) { + seccomp = new String(Files.readAllBytes(Paths.get(seccompProfile))); + } + } catch (IOException ioe) { + throw new ContainerExecutionException(ioe); + } + + layersToKeep = conf.getInt(NM_OCI_LAYER_MOUNTS_TO_KEEP, + DEFAULT_NM_OCI_LAYER_MOUNTS_TO_KEEP); + + } + + @Override + public void start() { + int reapOciOCILayerOCIMountsInterval = + conf.getInt(NM_REAP_OCI_LAYER_MOUNTS_INTERVAL, + DEFAULT_NM_REAP_OCI_LAYER_MOUNTS_INTERVAL); + exec = HadoopExecutors.newScheduledThreadPool(1); + exec.scheduleAtFixedRate(new Runnable() { + @Override + public void run() { + try { + PrivilegedOperation launchOp = new PrivilegedOperation( + PrivilegedOperation.OperationType.REAP_OCI_LAYER_MOUNTS); + launchOp.appendArgs(Integer.toString(layersToKeep)); + try { + String stdout = privilegedOperationExecutor + .executePrivilegedOperation(null, + launchOp, null, null, false, false); + if(stdout != null) { + LOG.info("Reap layer mounts thread: " + stdout); + } + } catch (PrivilegedOperationException e) { + LOG.warn("Failed to reap old oci layer mounts", e); + } + } catch (Exception e) { + LOG.warn("Reap layer mount thread caught an exception: ", e); + } + } + }, 0, reapOciOCILayerOCIMountsInterval, TimeUnit.SECONDS); + imageTagToManifestPlugin.start(); + manifestToResourcesPlugin.start(); + } + + @Override + public void stop() { + exec.shutdownNow(); + imageTagToManifestPlugin.stop(); + manifestToResourcesPlugin.stop(); + } + + @Override + public void launchContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + List env = new ArrayList<>(); + Container container = ctx.getContainer(); + String user = container.getUser(); + ContainerId containerId = container.getContainerId(); + ApplicationId appId = containerId.getApplicationAttemptId() + .getApplicationId(); + + Map environment = container.getLaunchContext() + .getEnvironment(); + ArrayList mounts = new ArrayList<>(); + ArrayList layers = new ArrayList<>(); + String hostname = environment.get(ENV_RUNC_CONTAINER_HOSTNAME); + + validateHostname(hostname); + + String containerIdStr = containerId.toString(); + String applicationId = appId.toString(); + Path containerWorkDir = ctx.getExecutionAttribute(CONTAINER_WORK_DIR); + + ContainerRuntimeObject containerRuntimeObject = + (ContainerRuntimeObject) container.getContainerRuntimeData(); + List layerResources = containerRuntimeObject.getOCILayers(); + + ResourceLocalizationService localizationService = + nmContext.getContainerManager().getResourceLocalizationService(); + + List args = new ArrayList<>(); + + try { + try { + LocalResource rsrc = containerRuntimeObject.getConfig(); + LocalResourceRequest req = new LocalResourceRequest(rsrc); + LocalizedResource localRsrc = localizationService + .getLocalizedResource(req, user, appId); + if (localRsrc == null) { + throw new ContainerExecutionException("Could not successfully " + + "localize layers. rsrc: " + rsrc.getResource().getFile()); + } + + File file = new File(localRsrc.getLocalPath().toString()); + List imageEnv = extractImageEnv(file); + if (imageEnv != null && !imageEnv.isEmpty()) { + env.addAll(imageEnv); + } + List entrypoint = extractImageEntrypoint(file); + if (entrypoint != null && !entrypoint.isEmpty()) { + args.addAll(entrypoint); + } + } catch (IOException ioe) { + throw new ContainerExecutionException(ioe); + } + + for (LocalResource rsrc : layerResources) { + LocalResourceRequest req = new LocalResourceRequest(rsrc); + LocalizedResource localRsrc = localizationService + .getLocalizedResource(req, user, appId); + + OCILayer layer = new OCILayer("application/vnd.squashfs", + localRsrc.getLocalPath().toString()); + layers.add(layer); + } + } catch (URISyntaxException e) { + throw new ContainerExecutionException(e); + } + + setContainerMounts(mounts, ctx, containerWorkDir); + + String resourcesOpts = ctx.getExecutionAttribute(RESOURCES_OPTIONS); + + Path nmPrivateContainerScriptPath = ctx.getExecutionAttribute( + NM_PRIVATE_CONTAINER_SCRIPT_PATH); + + Path nmPrivateTokensPath = + ctx.getExecutionAttribute(NM_PRIVATE_TOKENS_PATH); + + int cpuShares = container.getResource().getVirtualCores(); + + // Zero sets to default of 1024. 2 is the minimum value otherwise + if (cpuShares < 2) { + cpuShares = 2; + } + + Path launchDst = + new Path(containerWorkDir, ContainerLaunch.CONTAINER_SCRIPT); + + args.add("bash"); + args.add(launchDst.toUri().getPath()); + + String cgroupPath = getCgroupPath(resourcesOpts, "oci-" + containerIdStr); + + String pidFile = ctx.getExecutionAttribute(PID_FILE_PATH).toString(); + + @SuppressWarnings("unchecked") + List localDirs = ctx.getExecutionAttribute(LOCAL_DIRS); + @SuppressWarnings("unchecked") + List logDirs = ctx.getExecutionAttribute(LOG_DIRS); + + + OCIProcessConfig processConfig = createOCIProcessConfig( + containerWorkDir.toString(), env, args); + OCILinuxConfig linuxConfig = createOCILinuxConfig(cpuShares, + cgroupPath, seccomp); + + OCIRuntimeConfig ociRuntimeConfig = new OCIRuntimeConfig(null, mounts, + processConfig, hostname, null, null, linuxConfig); + + OCIContainerExecutorConfig ociContainerExecutorConfig = + createOCIContainerExecutorConfig(user, containerIdStr, applicationId, + pidFile, nmPrivateContainerScriptPath.toString(), + nmPrivateTokensPath.toString(), localDirs, logDirs, layers, + ociRuntimeConfig); + + String commandFile = writeCommandToFile(mapper, + ociContainerExecutorConfig, container, nmContext); + PrivilegedOperation launchOp = new PrivilegedOperation( + PrivilegedOperation.OperationType.RUN_OCI_CONTAINER); + + launchOp.appendArgs(commandFile); + + try { + privilegedOperationExecutor.executePrivilegedOperation(null, + launchOp, null, null, false, false); + } catch (PrivilegedOperationException e) { + LOG.info("Launch container failed: ", e); + try { + LOG.debug("config.json used: " + + mapper.writeValueAsString(ociContainerExecutorConfig)); + } catch (IOException ioe) { + LOG.info("Json Generation Exception", ioe); + } + + throw new ContainerExecutionException("Launch container failed", e + .getExitCode(), e.getOutput(), e.getErrorOutput()); + } + } + + private String getCgroupPath(String resourcesOptions, String containerIdStr) { + if (cGroupsHandler == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("cGroupsHandler is null. cgroups are not in use. nothing to" + + " do."); + } + return null; + } + + if (resourcesOptions.equals( + (PrivilegedOperation.CGROUP_ARG_PREFIX + PrivilegedOperation + .CGROUP_ARG_NO_TASKS))) { + if (LOG.isDebugEnabled()) { + LOG.debug("no resource restrictions specified. not using runc's " + + "cgroup options"); + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("using runc's cgroups options"); + } + + String cGroupPath = "/" + cGroupsHandler.getRelativePathForCGroup( + containerIdStr); + + if (LOG.isDebugEnabled()) { + LOG.debug("using cgroup parent: " + cGroupPath); + } + + return cGroupPath; + } + return null; + } + + private void addDefaultMountLocation(List mounts, + Set defaultMounts, boolean createSource, boolean isReadWrite) + throws ContainerExecutionException { + if(defaultMounts != null && !defaultMounts.isEmpty()) { + for (String mount : defaultMounts) { + String[] dir = StringUtils.split(mount, ':'); + if (dir.length != 2) { + throw new ContainerExecutionException("Invalid mount : " + + mount); + } + String src = dir[0]; + String dst = dir[1]; + addOCIMountLocation(mounts, src, dst, createSource, isReadWrite); + } + } + } + + private void addOCIMountLocation(List mounts, String srcPath, + String dstPath, boolean createSource, boolean isReadWrite) { + if (!createSource) { + boolean sourceExists = new File(srcPath).exists(); + if (!sourceExists) { + return; + } + } + + ArrayList options = new ArrayList<>(); + if (isReadWrite) { + options.add("rw"); + } else { + options.add("ro"); + } + options.add("rbind"); + options.add("rprivate"); + mounts.add(new OCIMount(dstPath, "bind", srcPath, options)); + } + + private void addAllOCIMountLocations(List mounts, + List paths, boolean createSource, boolean isReadWrite) { + for (String dir: paths) { + this.addOCIMountLocation(mounts, dir, dir, createSource, isReadWrite); + } + } + + public Map getLocalResources( + Container container) throws IOException { + Map containerLocalRsrc = + container.getLaunchContext().getLocalResources(); + long layerCount = 0; + Map environment = + container.getLaunchContext().getEnvironment(); + String imageName = environment.get(ENV_RUNC_CONTAINER_IMAGE); + if (imageName == null || imageName.isEmpty()) { + environment.put(ENV_RUNC_CONTAINER_IMAGE, + defaultRuncImage); + imageName = defaultRuncImage; + } + + ImageManifest manifest = + imageTagToManifestPlugin.getManifestFromImageTag(imageName); + LocalResource config = + manifestToResourcesPlugin.getConfigResource(manifest); + List layers = + manifestToResourcesPlugin.getLayerResources(manifest); + + ContainerRuntimeObject containerRuntimeObject = + new ContainerRuntimeObject(config, layers); + container.setContainerRuntimeData(containerRuntimeObject); + + for (LocalResource localRsrc : layers) { + while(containerLocalRsrc.putIfAbsent("oci-layer" + + Long.toString(layerCount++), localRsrc) != null); + } + + while(containerLocalRsrc.putIfAbsent("oci-config" + + Long.toString(layerCount++), config) != null); + + return containerLocalRsrc; + } + + private OCIImageTagToManifestPlugin chooseImageTagToManifestPlugin() + throws ContainerExecutionException { + String pluginName = + conf.get(NM_OCI_IMAGE_TAG_TO_MANIFEST_PLUGIN); + OCIImageTagToManifestPlugin ociImageTagToManifestPlugin; + try { + Class clazz = Class.forName(pluginName); + ociImageTagToManifestPlugin = + (OCIImageTagToManifestPlugin) clazz.newInstance(); + } catch (Exception e) { + throw new ContainerExecutionException(e); + } + return ociImageTagToManifestPlugin; + } + + private OCIManifestToResourcesPlugin chooseManifestToResourcesPlugin() + throws ContainerExecutionException { + String pluginName = + conf.get(NM_OCI_MANIFEST_TO_RESOURCES_PLUGIN); + LOG.info("pluginName = " + pluginName); + OCIManifestToResourcesPlugin ociManifestToResourcesPlugin; + try { + Class clazz = Class.forName(pluginName); + ociManifestToResourcesPlugin = + (OCIManifestToResourcesPlugin) clazz.newInstance(); + } catch (Exception e) { + throw new ContainerExecutionException(e); + } + return ociManifestToResourcesPlugin; + } + + @SuppressWarnings("unchecked") + private List extractImageEnv(File config) throws IOException { + JsonNode node = mapper.readTree(config); + JsonNode envNode = node.path("config").path("Env"); + if (envNode.isMissingNode()) { + return null; + } + return mapper.readValue(envNode, List.class); + } + + @SuppressWarnings("unchecked") + private List extractImageEntrypoint(File config) throws IOException { + JsonNode node = mapper.readTree(config); + JsonNode entrypointNode = node.path("config").path("Entrypoint"); + if (entrypointNode.isMissingNode()) { + return null; + } + return mapper.readValue(entrypointNode, List.class); + } + + private OCIContainerExecutorConfig createOCIContainerExecutorConfig( + String username, String containerId, String applicationId, String pidFile, + String containerScriptPath, String containerCredentialsPath, + List localDirs, List logDirs, + List layers, OCIRuntimeConfig ociRuntimeConfig) { + + return new OCIContainerExecutorConfig(username, containerId, applicationId, + pidFile, containerScriptPath, containerCredentialsPath, + localDirs, logDirs, layers, layersToKeep, ociRuntimeConfig); + } + + private OCIProcessConfig createOCIProcessConfig(String cwd, + List env, List args) { + return new OCIProcessConfig(false, null, cwd, env, + args, null, null, null, false, 0, null, null); + } + + private OCILinuxConfig createOCILinuxConfig(long cpuShares, + String cgroupsPath, String seccomp) { + OCILinuxConfig.Resources.CPU cgroupCPU = + new OCILinuxConfig.Resources.CPU(cpuShares, 0, 0, 0, 0, + null, null); + OCILinuxConfig.Resources cgroupResources = + new OCILinuxConfig.Resources(null, null, cgroupCPU, null, null, null, + null, null); + + return new OCILinuxConfig(null, null, null, null, + cgroupsPath, cgroupResources, null, null, seccomp, null, null, + null, null); + } + + private void setContainerMounts(ArrayList mounts, + ContainerRuntimeContext ctx, Path containerWorkDir) + throws ContainerExecutionException { + @SuppressWarnings("unchecked") + List filecacheDirs = ctx.getExecutionAttribute(FILECACHE_DIRS); + @SuppressWarnings("unchecked") + List containerLogDirs = ctx.getExecutionAttribute( + CONTAINER_LOG_DIRS); + @SuppressWarnings("unchecked") + List userFilecacheDirs = + ctx.getExecutionAttribute(USER_FILECACHE_DIRS); + @SuppressWarnings("unchecked") + List applicationLocalDirs = + ctx.getExecutionAttribute(APPLICATION_LOCAL_DIRS); + + addOCIMountLocation(mounts, containerWorkDir.toString() + + "/private_slash_tmp", "/tmp", true, true); + addOCIMountLocation(mounts, containerWorkDir.toString() + + "/private_var_slash_tmp", "/var/tmp", true, true); + + addAllOCIMountLocations(mounts, containerLogDirs, true, true); + addAllOCIMountLocations(mounts, applicationLocalDirs, true, true); + addAllOCIMountLocations(mounts, filecacheDirs, false, false); + addAllOCIMountLocations(mounts, userFilecacheDirs, false, false); + addDefaultMountLocation(mounts, defaultROMounts, false, false); + addDefaultMountLocation(mounts, defaultRWMounts, false, true); + } + + public String writeCommandToFile(ObjectMapper mapper, + OCIContainerExecutorConfig ociContainerExecutorConfig, + Container container, Context nmContext) + throws ContainerExecutionException { + ContainerId containerId = container.getContainerId(); + String filePrefix = containerId.toString(); + ApplicationId appId = containerId.getApplicationAttemptId() + .getApplicationId(); + File commandFile; + try { + File cmdDir = null; + + if(nmContext != null && nmContext.getLocalDirsHandler() != null) { + String cmdDirStr = nmContext.getLocalDirsHandler().getLocalPathForWrite( + ResourceLocalizationService.NM_PRIVATE_DIR + Path.SEPARATOR + + appId + Path.SEPARATOR + filePrefix + Path.SEPARATOR).toString(); + cmdDir = new File(cmdDirStr); + if (!cmdDir.mkdirs() && !cmdDir.exists()) { + throw new IOException("Cannot create container private directory " + + cmdDir); + } + } + commandFile = new File(cmdDir + "/oci-config.json"); + + try { + mapper.writeValue(commandFile, ociContainerExecutorConfig); + } catch (IOException ioe) { + throw new ContainerExecutionException(ioe); + } + + return commandFile.getAbsolutePath(); + } catch (IOException e) { + LOG.warn("Unable to write docker command to temporary file!"); + throw new ContainerExecutionException(e); + } + } + + public String getExposedPorts(Container container) { + return null; + } + + public String[] getIpAndHost(Container container) { + return null; + } + + public IOStreamPair execContainer(ContainerExecContext ctx) + throws ContainerExecutionException { + return null; + } + + public void reapContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + + } + + public void relaunchContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + } + + + public boolean isRuntimeRequested(Map env) { + String type = (env == null) + ? null : env.get(ContainerRuntimeConstants.ENV_CONTAINER_TYPE); + if (type == null) { + type = conf.get(YarnConfiguration.LINUX_CONTAINER_RUNTIME_TYPE); + } + return type != null && type.equals( + ContainerRuntimeConstants.CONTAINER_RUNTIME_FSIMAGE); + } + + protected void handleContainerRemove(String containerId, + Map env) throws ContainerExecutionException { + + } + + @Override + public void signalContainer(ContainerRuntimeContext ctx) + throws ContainerExecutionException { + ContainerExecutor.Signal signal = ctx.getExecutionAttribute(SIGNAL); + Container container = ctx.getContainer(); + + if (signal == ContainerExecutor.Signal.KILL || + signal == ContainerExecutor.Signal.TERM) { + ContainerVolumePublisher publisher = new ContainerVolumePublisher( + container, container.getCsiVolumesRootDir(), this); + try { + publisher.unpublishVolumes(); + } catch (YarnException | IOException e) { + throw new ContainerExecutionException(e); + } + } + + PrivilegedOperation signalOp = new PrivilegedOperation( + PrivilegedOperation.OperationType.SIGNAL_CONTAINER); + + signalOp.appendArgs(ctx.getExecutionAttribute(RUN_AS_USER), + ctx.getExecutionAttribute(USER), + Integer.toString(PrivilegedOperation.RunAsUserCommand + .SIGNAL_CONTAINER.getValue()), + ctx.getExecutionAttribute(PID), + Integer.toString(signal.getValue())); + + //Some failures here are acceptable. Let the calling executor decide. + signalOp.disableFailureLogging(); + + try { + PrivilegedOperationExecutor executor = PrivilegedOperationExecutor + .getInstance(conf); + + executor.executePrivilegedOperation(null, + signalOp, null, null, false, false); + } catch (PrivilegedOperationException e) { + //Don't log the failure here. Some kinds of signaling failures are + // acceptable. Let the calling executor decide what to do. + throw new ContainerExecutionException("Signal container failed", e + .getExitCode(), e.getOutput(), e.getErrorOutput()); + } + } + + static class ContainerRuntimeObject { + final List layers; + final LocalResource config; + + public ContainerRuntimeObject(LocalResource config, + List layers) { + this.config = config; + this.layers = layers; + } + + public LocalResource getConfig() { + return this.config; + } + + public List getOCILayers() { + return this.layers; + } + } + + boolean getHostPidNamespaceEnabled() { + return conf.getBoolean( + YarnConfiguration.NM_DOCKER_ALLOW_HOST_PID_NAMESPACE, + YarnConfiguration.DEFAULT_NM_DOCKER_ALLOW_HOST_PID_NAMESPACE); + } + + boolean getPrivilegedContainersEnabledOnCluster() { + return conf.getBoolean( + YarnConfiguration.NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS, + YarnConfiguration.DEFAULT_NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS); + } + + Set getAllowedNetworks() { + return allowedNetworks; + } + + Set getAllowedRuntimes() { + return allowedRuntimes; + } + + AccessControlList getPrivilegedContainersAcl() { + return privilegedContainersAcl; + } + + String getEnvOciContainerPidNamespace() { + return ENV_OCI_CONTAINER_PID_NAMESPACE; + } + + String getEnvOciContainerRunPrivilegedContainer() { + return ENV_OCI_CONTAINER_RUN_PRIVILEGED_CONTAINER; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index 0494c2d96bb..f8a4b7e49f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -1673,4 +1673,13 @@ private boolean checkLocalDir(String localDir) { localDirPathFsPermissionsMap.put(sysDir, nmPrivatePermission); return localDirPathFsPermissionsMap; } + + public LocalizedResource getLocalizedResource(LocalResourceRequest req, String user, + ApplicationId appId) { + LocalResourcesTracker tracker = getLocalResourcesTracker(req.getVisibility(), user, appId); + if (tracker == null) { + return null; + } + return tracker.getLocalizedResource(req); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java index a8b295ddb32..49f3b1b039e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntimeConstants.java @@ -34,4 +34,8 @@ @Private public static final String CONTAINER_RUNTIME_DOCKER = "docker"; + + @Private + public static final String CONTAINER_RUNTIME_FSIMAGE = + "fsimage"; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java index 3fec9596e68..e70f6c5c2cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/volume/csi/ContainerVolumePublisher.java @@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerRuntime; import org.apache.hadoop.yarn.server.volume.csi.CsiConstants; import org.apache.hadoop.yarn.server.volume.csi.VolumeMetaData; import org.apache.hadoop.yarn.server.volume.csi.exception.InvalidVolumeException; @@ -51,10 +52,10 @@ private final Container container; private final String localMountRoot; - private final DockerLinuxContainerRuntime runtime; + private final OCIContainerRuntime runtime; public ContainerVolumePublisher(Container container, String localMountRoot, - DockerLinuxContainerRuntime runtime) { + OCIContainerRuntime runtime) { LOG.info("Initiate container volume publisher, containerID={}," + " volume local mount rootDir={}", container.getContainerId().toString(), localMountRoot); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java index 15c1cac9cb8..09c836e596f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java @@ -223,6 +223,7 @@ public void setup() throws IOException { nodeHealthChecker.init(conf); containerManager = createContainerManager(delSrvc); ((NMContext)context).setContainerManager(containerManager); + ((NMContext)context).setContainerExecutor(exec); nodeStatusUpdater.init(conf); containerManager.init(conf); nodeStatusUpdater.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java index e920105abf9..de20abf6682 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManagerRecovery.java @@ -794,6 +794,7 @@ public int getHttpPort() { .byteValue() })); context.getContainerTokenSecretManager().setMasterKey(masterKey); context.getNMTokenSecretManager().setMasterKey(masterKey); + context.setContainerExecutor(exec); return context; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java index ea3acca35e1..3972d9b4470 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java @@ -1316,6 +1316,7 @@ public boolean matches(LocalizationEvent e) { new HashMap<>(); private final Map eventToFinalState = new HashMap<>(); + final ContainerExecutor exec; WrappedContainer(int appId, long timestamp, int id, String user) throws IOException { @@ -1352,6 +1353,7 @@ protected void scheduleContainer(Container container) { container.sendLaunchEvent(); } }; + exec = mock(ContainerExecutor.class); dispatcher.register(LocalizationEventType.class, localizerBus); dispatcher.register(ContainersLauncherEventType.class, launcherBus); dispatcher.register(ContainersMonitorEventType.class, monitorBus); @@ -1412,6 +1414,9 @@ protected void scheduleContainer(Container container) { } when(ctxt.getLocalResources()).thenReturn(localResources); + when(exec.getLocalResources(any())).thenReturn(localResources); + when(context.getContainerExecutor()).thenReturn(exec); + if (withServiceData) { Random r = new Random(); long seed = r.nextLong(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java index 8ab9df6321b..01ff4c8c3f0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java @@ -30,14 +30,19 @@ import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.util.DockerClientConfigHandler; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.TestDockerClientConfigHandler; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManager; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperation; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationException; @@ -49,6 +54,9 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.DockerCommandPlugin; import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePlugin; import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.ResourcePluginManager; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeConstants; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; @@ -70,6 +78,7 @@ import java.io.FileWriter; import java.io.IOException; import java.nio.ByteBuffer; +import java.net.URISyntaxException; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Paths; @@ -136,6 +145,7 @@ private Container container; private ContainerId cId; private ApplicationAttemptId appAttemptId; + private ApplicationId mockApplicationId; private ContainerLaunchContext context; private Context nmContext; private HashMap env; @@ -201,15 +211,21 @@ public void setup() { container = mock(Container.class); cId = mock(ContainerId.class); appAttemptId = mock(ApplicationAttemptId.class); + mockApplicationId = mock(ApplicationId.class); context = mock(ContainerLaunchContext.class); env = new HashMap(); env.put("FROM_CLIENT", "1"); image = "busybox:latest"; nmContext = createMockNMContext(); + List localRsrc = new ArrayList<>(); + localRsrc.add(LocalResource.newInstance(URL.fromPath(new Path("/local/layer1")), null, null, 12345, 12345)); + localRsrc.add(LocalResource.newInstance(URL.fromPath(new Path("/local/layer2")), null, null, 12345, 12345)); env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_IMAGE, image); when(container.getContainerId()).thenReturn(cId); when(cId.toString()).thenReturn(containerId); + when(mockApplicationId.toString()).thenReturn("applicationId"); + when(appAttemptId.getApplicationId()).thenReturn(mockApplicationId); when(cId.getApplicationAttemptId()).thenReturn(appAttemptId); when(container.getLaunchContext()).thenReturn(context); when(context.getEnvironment()).thenReturn(env); @@ -280,6 +296,9 @@ public void setup() { localizedResources.put(new Path("/test_local_dir/test_resource_file"), Collections.singletonList("test_dir/test_resource_file")); + File tmpDir = new File(tmpPath); + tmpDir.mkdirs(); + testCapabilities = new String[] {"NET_BIND_SERVICE", "SYS_CHROOT"}; conf.setStrings(YarnConfiguration.NM_DOCKER_CONTAINER_CAPABILITIES, testCapabilities); @@ -310,6 +329,10 @@ public void setup() { } public Context createMockNMContext() { + List localRsrcs = new ArrayList<>(); + LocalResource rsrc; + LocalResourceRequest req = null; + Context mockNMContext = mock(Context.class); LocalDirsHandlerService localDirsHandler = mock(LocalDirsHandlerService.class); @@ -328,6 +351,27 @@ public Context createMockNMContext() { when(mockNMContext.getContainers()).thenReturn(containerMap); when(containerMap.get(any())).thenReturn(container); + ContainerManager mockContainerManager = mock(ContainerManager.class); + ResourceLocalizationService mockLocalzationService = mock(ResourceLocalizationService.class); + + + rsrc = LocalResource.newInstance(URL.fromPath(new Path("/local/layer1")), null, null, 12345, 12345); + localRsrcs.add(rsrc); + localRsrcs.add(LocalResource.newInstance(URL.fromPath(new Path("/local/layer2")), null, null, 12345, 12345)); + + try { + req = new LocalResourceRequest(rsrc); + } catch (URISyntaxException e) { + e.printStackTrace(); + } + LocalizedResource mockLocalizedResource = mock(LocalizedResource.class); + + when(mockLocalizedResource.getLocalPath()).thenReturn(new Path("/local/layer1")); + when(mockLocalzationService.getLocalizedResource(any(), anyString(), any())) + .thenReturn(mockLocalizedResource); + when(mockContainerManager.getResourceLocalizationService()).thenReturn(mockLocalzationService); + when(mockNMContext.getContainerManager()).thenReturn(mockContainerManager); + try { when(localDirsHandler.getLocalPathForWrite(anyString())) .thenReturn(new Path(tmpPath)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java index dcba179a28c..af7d8b14515 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java @@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.OCIContainerRuntime; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.TestDockerContainerRuntime; import org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerRuntimeContext; import org.junit.Assert; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java index 980f29b57c6..a7e4a56eef0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java @@ -278,4 +278,14 @@ public void sendPauseEvent(String description) { public List getLocalizationStatuses() { return null; } + + @Override + public void setContainerRuntimeData(Object object) { + + } + + @Override + public Object getContainerRuntimeData() { + return null; + } }