diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml index e9239e4..228f041 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml @@ -347,10 +347,13 @@ definitions: description: The absolute path that this configuration file should be mounted as, in the application container. src_file: type: string - description: Required for type template. This provides the source location of the template which needs to be mounted as dest_file post property substitutions. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, etc. + description: Required only for type template. This provides the source location of the template which needs to be mounted as dest_file post property substitutions. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, etc. props: type: object - description: A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If the type is template then the attribute src_file is mandatory and the src_file content is dumped to dest_file post property substitutions. + description: A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If the type is template then the attribute src_file is mandatory and the src_file content is dumped to dest_file, and if the property exists in src_file, the property value in src_file will be substituted, if the property doesn't exist in src_file, it will be appended as a new property in dest_file. + envs: + type: object + description: Required only for type template. A blob of key-value pairs for substituting enviroment variables in the template. Container: description: An instance of a running application container. properties: diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java index abad34e..88122ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/services/api/impl/TestApplicationApiService.java @@ -63,7 +63,7 @@ public void testValidateApplicationPostPayload() throws Exception { // no name try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); Assert.fail(EXCEPTION_PREFIX + "application with no name"); } catch (IllegalArgumentException e) { Assert.assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage()); @@ -74,7 +74,7 @@ public void testValidateApplicationPostPayload() throws Exception { for (String badName : badNames) { app.setName(badName); try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); Assert.fail(EXCEPTION_PREFIX + "application with bad name " + badName); } catch (IllegalArgumentException e) { Assert.assertEquals(ERROR_APPLICATION_NAME_INVALID_FORMAT, @@ -85,7 +85,7 @@ public void testValidateApplicationPostPayload() throws Exception { // no artifact app.setName("finance_home"); try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); Assert.fail(EXCEPTION_PREFIX + "application with no artifact"); } catch (IllegalArgumentException e) { Assert.assertEquals(ERROR_ARTIFACT_INVALID, e.getMessage()); @@ -95,7 +95,7 @@ public void testValidateApplicationPostPayload() throws Exception { Artifact artifact = new Artifact(); app.setArtifact(artifact); try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); Assert.fail(EXCEPTION_PREFIX + "application with no artifact id"); } catch (IllegalArgumentException e) { Assert.assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); @@ -106,7 +106,7 @@ public void testValidateApplicationPostPayload() throws Exception { artifact.setId("app.io/hbase:facebook_0.2"); app.setNumberOfContainers(5l); try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); } catch (IllegalArgumentException e) { logger.error("application attributes specified should be valid here", e); Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); @@ -128,7 +128,7 @@ public void testValidateApplicationPostPayload() throws Exception { // resource not specified artifact.setId("docker.io/centos:centos7"); try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); Assert.fail(EXCEPTION_PREFIX + "application with no resource"); } catch (IllegalArgumentException e) { Assert.assertEquals(ERROR_RESOURCE_INVALID, e.getMessage()); @@ -138,7 +138,7 @@ public void testValidateApplicationPostPayload() throws Exception { Resource res = new Resource(); app.setResource(res); try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); Assert.fail(EXCEPTION_PREFIX + "application with no memory"); } catch (IllegalArgumentException e) { Assert.assertEquals(ERROR_RESOURCE_MEMORY_INVALID, e.getMessage()); @@ -149,7 +149,7 @@ public void testValidateApplicationPostPayload() throws Exception { res.setMemory("100mb"); res.setCpus(-2); try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); Assert.fail( EXCEPTION_PREFIX + "application with invalid no of cpups"); } catch (IllegalArgumentException e) { @@ -159,7 +159,7 @@ public void testValidateApplicationPostPayload() throws Exception { // number of containers not specified res.setCpus(2); try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); Assert.fail( EXCEPTION_PREFIX + "application with no container count"); } catch (IllegalArgumentException e) { @@ -169,7 +169,7 @@ public void testValidateApplicationPostPayload() throws Exception { // specifying profile along with cpus/memory raises exception res.setProfile("hbase_finance_large"); try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); Assert.fail(EXCEPTION_PREFIX + "application with resource profile along with cpus/memory"); } catch (IllegalArgumentException e) { @@ -182,7 +182,7 @@ public void testValidateApplicationPostPayload() throws Exception { res.setCpus(null); res.setMemory(null); try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); Assert.fail(EXCEPTION_PREFIX + "application with resource profile only - NOT SUPPORTED"); } catch (IllegalArgumentException e) { @@ -198,7 +198,7 @@ public void testValidateApplicationPostPayload() throws Exception { // everything valid here app.setNumberOfContainers(5l); try { - ServiceApiUtil.validateApplicationPostPayload(app); + ServiceApiUtil.validateApplicationPayload(app, null); } catch (IllegalArgumentException e) { logger.error("application attributes specified should be valid here", e); Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java new file mode 100644 index 0000000..403fb4a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ServiceApiConstants.java @@ -0,0 +1,28 @@ +package org.apache.slider.api; + +import static org.apache.slider.util.ServiceApiUtil.$; + +/** + * This class defines constants that can be used for variable substitutions + */ +public interface ServiceApiConstants { + + // Constants for service + String SERVICE_NAME = $("SERVICE_NAME"); + + String SERVICE_NAME_LC = $("SERVICE_NAME.lc"); + + // Constants for component + String COMPONENT_NAME = $("COMPONENT_NAME"); + + String COMPONENT_NAME_LC = $("COMPONENT_NAME.lc"); + + String COMPONENT_INSTANCE_NAME = $("COMPONENT_INSTANCE_NAME"); + + String COMPONENT_ID = $("COMPONENT_ID"); + + // Constants for system + String DEFAULT_DATA_DIR = $("DEFAULT_DATA_DIR"); + + String CONTAINER_ID = $("CONTAINER_ID"); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java index cdc96b8..92c20a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/ConfigFile.java @@ -17,20 +17,19 @@ package org.apache.slider.api.resource; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonValue; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; import java.io.Serializable; +import java.util.HashMap; import java.util.Map; import java.util.Objects; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; - -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.fasterxml.jackson.annotation.JsonValue; - /** * A config file that needs to be created and made available as a volume in an * application component container. @@ -44,8 +43,9 @@ private static final long serialVersionUID = -7009402089417704612L; public enum TypeEnum { - XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE( - "TEMPLATE"), ENV("ENV"), HADOOP_XML("HADOOP_XML"); + XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML( + "YAML"), HADOOP_XML_TEMPLATE("HADOOP_XML_TEMPLATE"), ENV( + "ENV"), HADOOP_XML("HADOOP_XML"); private String value; @@ -63,7 +63,18 @@ public String toString() { private TypeEnum type = null; private String destFile = null; private String srcFile = null; - private Map props = null; + private Map props = new HashMap<>(); + + public ConfigFile copy() { + ConfigFile copy = new ConfigFile(); + copy.setType(this.getType()); + copy.setSrcFile(this.getSrcFile()); + copy.setDestFile(this.getDestFile()); + if (this.getProps() != null && !this.getProps().isEmpty()) { + copy.getProps().putAll(this.getProps()); + } + return copy; + } /** * Config file in the standard format like xml, properties, json, yaml, @@ -105,7 +116,6 @@ public void setDestFile(String destFile) { } /** - * TODO this probably is not required for non-template configs. It is now used as symlink for localization for non-template configs - we could infer the name from destFile instead * * Required for type template. This provides the source location of the * template which needs to be mounted as dest_file post property @@ -117,7 +127,7 @@ public ConfigFile srcFile(String srcFile) { return this; } - @ApiModelProperty(example = "null", value = "Required for type template. This provides the source location of the template which needs to be mounted as dest_file post property substitutions. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, etc.") + @ApiModelProperty(example = "null", value = "Required only for type template. This provides the source location of the template which needs to be mounted as dest_file post property substitutions. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, etc.") @JsonProperty("src_file") public String getSrcFile() { return srcFile; @@ -139,7 +149,7 @@ public ConfigFile props(Map props) { return this; } - @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If the type is template then the attribute src_file is mandatory and the src_file content is dumped to dest_file post property substitutions.") + @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If the type is template then the attribute src_file is mandatory and the src_file content is dumped to dest_file, and if the property exists in src_file, the property value in src_file will be substituted, if the property doesn't exist in src_file, it will be appended as a new property in dest_file.") @JsonProperty("props") public Map getProps() { return props; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java index c43bd64..4318572 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Configuration.java @@ -17,8 +17,11 @@ package org.apache.slider.api.resource; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; +import org.apache.commons.lang.StringUtils; import java.io.Serializable; import java.util.ArrayList; @@ -27,10 +30,6 @@ import java.util.Map; import java.util.Objects; -import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; -import org.apache.commons.lang.StringUtils; - /** * Set of configuration properties that can be injected into the application * components via envs, files and custom pluggable helper docker containers. @@ -134,6 +133,13 @@ public String getProperty(String name) { return properties.get(name.trim()); } + public String getEnv(String name) { + if (name == null) { + return null; + } + return env.get(name.trim()); + } + @Override public boolean equals(java.lang.Object o) { if (this == o) { @@ -176,4 +182,27 @@ private String toIndentedString(java.lang.Object o) { } return o.toString().replace("\n", "\n "); } + + /** + * Merge all properties and envs from that configuration to this configration. + * For ConfigFiles, all properties and envs of the ConfigFile are merged into + * the same ConfigFile named by Dest_file + */ + public synchronized void mergeFrom(Configuration that) { + this.properties.putAll(that.getProperties()); + this.env.putAll(that.getEnv()); + Map thatMap = new HashMap<>(); + for (ConfigFile file : that.getFiles()) { + thatMap.put(file.getDestFile(), file.copy()); + } + for (ConfigFile thisFile : files) { + if(thatMap.containsKey(thisFile.getDestFile())) { + ConfigFile thatFile = thatMap.get(thisFile.getDestFile()); + thisFile.getProps().putAll(thatFile.getProps()); + thatMap.remove(thisFile.getDestFile()); + } + } + // add remaing new files from that Configration + files.addAll(thatMap.values()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java index 8bceddf..e5df687 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java @@ -67,7 +67,6 @@ import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Times; -import org.apache.slider.api.ClusterNode; import org.apache.slider.api.SliderClusterProtocol; import org.apache.slider.api.proto.Messages; import org.apache.slider.api.resource.Application; @@ -122,7 +121,6 @@ import org.apache.slider.core.exceptions.SliderException; import org.apache.slider.core.exceptions.UnknownApplicationInstanceException; import org.apache.slider.core.exceptions.UsageException; -import org.apache.slider.core.exceptions.WaitTimeoutException; import org.apache.slider.core.launch.ClasspathConstructor; import org.apache.slider.core.launch.CredentialUtils; import org.apache.slider.core.launch.JavaCommandLineBuilder; @@ -177,8 +175,6 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Locale; import java.util.Map; @@ -651,7 +647,8 @@ public AbstractClientProvider createClientProvider(String provider) public ApplicationId actionCreate(Application application) throws IOException, YarnException { - ServiceApiUtil.validateApplicationPostPayload(application); + ServiceApiUtil.validateApplicationPayload(application, + sliderFileSystem.getFileSystem()); String appName = application.getName(); validateClusterName(appName); verifyNoLiveApp(appName, "Create"); @@ -699,7 +696,7 @@ private ApplicationId submitApp(Application app) //TODO SliderAMClientProvider#copyEnvVars //TODO localResource putEnv - Map env = addAMEnv(conf, tempPath); + Map env = addAMEnv(conf); // create AM CLI String cmdStr = @@ -798,7 +795,7 @@ private String buildCommandLine(String appName, Configuration conf, return cmdStr; } - private Map addAMEnv(Configuration conf, Path tempPath) + private Map addAMEnv(Configuration conf) throws IOException { Map env = new HashMap(); ClasspathConstructor classpath = @@ -812,7 +809,15 @@ private String buildCommandLine(String appName, Configuration conf, if (jaas != null) { env.put(HADOOP_JAAS_DEBUG, jaas); } + if (!UserGroupInformation.isSecurityEnabled()) { + String userName = UserGroupInformation.getCurrentUser().getUserName(); + log.info("Run as user " + userName); + // HADOOP_USER_NAME env is used by UserGroupInformation when log in + // This setting makes AM run as this user + env.put("HADOOP_USER_NAME", userName); + } env.putAll(getAmLaunchEnv(conf)); + log.info("AM env: \n{}", stringifyMap(env)); return env; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java index 9a4fa6c..968a90b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java @@ -240,7 +240,7 @@ String STDERR_AM = "slider-err.txt"; String DEFAULT_GC_OPTS = ""; - String HADOOP_USER_NAME = ApplicationConstants.Environment.USER.toString(); + String HADOOP_USER_NAME = "HADOOP_USER_NAME"; String HADOOP_PROXY_USER = "HADOOP_PROXY_USER"; String SLIDER_PASSPHRASE = "SLIDER_PASSPHRASE"; @@ -306,7 +306,7 @@ String CERT_FILE_LOCALIZATION_PATH = INFRA_RUN_SECURITY_DIR + "ca.crt"; String AM_CONFIG_GENERATION = "am.config.generation"; - String APP_CONF_DIR = "app/conf"; + String APP_CONF_DIR = "conf"; String APP_RESOURCES = "application.resources"; String APP_RESOURCES_DIR = "app/resources"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigurationResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigurationResolver.java deleted file mode 100644 index 88bac77..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigurationResolver.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.core.registry.docstore; - -public class ConfigurationResolver { - - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java index e0299e7..6e6aabe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java @@ -20,6 +20,8 @@ import org.apache.slider.api.resource.Component; +import java.util.concurrent.atomic.AtomicLong; + /** * Provider role and key for use in app requests. * @@ -35,32 +37,7 @@ public final long placementTimeoutSeconds; public final String labelExpression; public final Component component; - - - /** - * Create a provider role - * @param name role/component name - * @param id ID. This becomes the YARN priority - * @param policy placement policy - * @param nodeFailureThreshold threshold for node failures (within a reset interval) - * after which a node failure is considered an app failure - * @param placementTimeoutSeconds for lax placement, timeout in seconds before - * @param labelExpression label expression for requests; may be null - */ - public ProviderRole(String name, - int id, - int policy, - int nodeFailureThreshold, - long placementTimeoutSeconds, - String labelExpression) { - this(name, - name, - id, - policy, - nodeFailureThreshold, - placementTimeoutSeconds, - labelExpression, null); - } + public AtomicLong componentIdCounter = null; /** * Create a provider role with a role group @@ -93,7 +70,9 @@ public ProviderRole(String name, this.placementTimeoutSeconds = placementTimeoutSeconds; this.labelExpression = labelExpression; this.component = component; - + if(component.getUniqueComponentSupport()) { + componentIdCounter = new AtomicLong(0); + } } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java index c31b2ac..7e92bfa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderService.java @@ -26,6 +26,7 @@ import org.apache.slider.common.tools.SliderFileSystem; import org.apache.slider.core.exceptions.SliderException; import org.apache.slider.core.launch.ContainerLauncher; +import org.apache.slider.server.appmaster.state.RoleInstance; import org.apache.slider.server.appmaster.state.StateAccessForProviders; import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders; @@ -38,7 +39,8 @@ */ void buildContainerLaunchContext(ContainerLauncher containerLauncher, Application application, Container container, ProviderRole providerRole, - SliderFileSystem sliderFileSystem) throws IOException, SliderException; + SliderFileSystem sliderFileSystem, RoleInstance roleInstance) + throws IOException, SliderException; void setAMState(StateAccessForProviders stateAccessForProviders); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java index f8ec976..4aa5e37 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java @@ -18,8 +18,10 @@ package org.apache.slider.providers; +import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; @@ -29,6 +31,7 @@ import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies; import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.slider.api.ClusterNode; @@ -36,6 +39,7 @@ import org.apache.slider.api.OptionKeys; import org.apache.slider.api.ResourceKeys; import org.apache.slider.api.RoleKeys; +import org.apache.slider.api.ServiceApiConstants; import org.apache.slider.api.resource.Application; import org.apache.slider.api.resource.Component; import org.apache.slider.api.resource.ConfigFile; @@ -59,6 +63,7 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.OutputStream; import java.net.URI; import java.util.ArrayList; import java.util.Collection; @@ -68,6 +73,9 @@ import java.util.Map; import java.util.regex.Pattern; +import static org.apache.slider.api.ServiceApiConstants.*; +import static org.apache.slider.util.ServiceApiUtil.$; + /** * This is a factoring out of methods handy for providers. It's bonded to a log * at construction time. @@ -89,7 +97,7 @@ public ProviderUtils(Logger log) { * Add oneself to the classpath. This does not work * on minicluster test runs where the JAR is not built up. * @param providerResources map of provider resources to add these entries to - * @param provider provider to add + * @param providerClass provider to add * @param jarName name of the jar to use * @param sliderFileSystem target filesystem * @param tempPath path in the cluster FS for temp files @@ -157,14 +165,10 @@ public static void addAllDependencyJars( libDir, libLocalSrcDir); } - // Build key -> value map - // value will be substituted by corresponding data in tokenMap - public Map substituteConfigs(Map configs, + // configs will be substituted by corresponding env in tokenMap + public void substituteConfigs(Map configs, Map tokenMap) { - String format = "${%s}"; - Map filteredOptions = new HashMap<>(); for (Map.Entry entry : configs.entrySet()) { - String key = entry.getKey(); String value = entry.getValue(); if (tokenMap != null) { for (Map.Entry token : tokenMap.entrySet()) { @@ -172,10 +176,8 @@ public static void addAllDependencyJars( value.replaceAll(Pattern.quote(token.getKey()), token.getValue()); } } - filteredOptions.put(String.format(format, key), value); + entry.setValue(value); } - - return filteredOptions; } /** @@ -249,42 +251,69 @@ public void localizeServiceKeytabs(ContainerLauncher launcher, } } + public static void addEnvForSubstitution(Map env, + Map tokensForSubstitution) { + if (env == null || env.isEmpty() || tokensForSubstitution == null + || tokensForSubstitution.isEmpty()) { + return; + } + for (Map.Entry entry : env.entrySet()) { + tokensForSubstitution.put($(entry.getKey()), entry.getValue()); + } + } + // 1. Create all config files for a component on hdfs for localization // 2. Add the config file to localResource - //TODO handle Template format config file - public void createConfigFileAndAddLocalResource(ContainerLauncher launcher, - SliderFileSystem fs, Component component, - Map tokensForSubstitution, - StateAccessForProviders amState) throws IOException { + public synchronized void createConfigFileAndAddLocalResource( + ContainerLauncher launcher, SliderFileSystem fs, Component component, + Map tokensForSubstitution, RoleInstance roleInstance) + throws IOException { Path compDir = new Path(new Path(fs.getAppDir(), "components"), component.getName()); - if (!fs.getFileSystem().exists(compDir)) { - fs.getFileSystem().mkdirs(compDir, - new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)); - log.info("Creating component dir: " + compDir); + Path compInstanceDir = + new Path(compDir, roleInstance.getComponentInstanceName()); + if (!fs.getFileSystem().exists(compInstanceDir)) { + fs.getFileSystem().mkdirs(compInstanceDir, + new FsPermission("777")); + log.info("Creating component instance dir: " + compInstanceDir); } else { - log.info("Component conf dir already exists: " + compDir); - return; + log.info("Component instance conf dir already exists: " + compInstanceDir); } - for (ConfigFile configFile : component.getConfiguration().getFiles()) { - String fileName = configFile.getSrcFile(); + // add envs for substitution + tokensForSubstitution.putAll(component.getConfiguration().getEnv()); + + log.info("Tokens substitution for component: " + roleInstance + .getComponentInstanceName() + System.lineSeparator() + + tokensForSubstitution); + + for (ConfigFile originalFile : component.getConfiguration().getFiles()) { + ConfigFile configFile = originalFile.copy(); + String fileName = new Path(configFile.getDestFile()).getName(); + // substitute file name for (Map.Entry token : tokensForSubstitution.entrySet()) { configFile.setDestFile(configFile.getDestFile() .replaceAll(Pattern.quote(token.getKey()), token.getValue())); } - // substitute configs - substituteConfigs(configFile.getProps(), tokensForSubstitution); - // write configs onto hdfs - PublishedConfiguration publishedConfiguration = - new PublishedConfiguration(fileName, - configFile.getProps().entrySet()); - Path remoteFile = new Path(compDir, fileName); + Path remoteFile = new Path(compInstanceDir, fileName); if (!fs.getFileSystem().exists(remoteFile)) { - synchronized (this) { + log.info("Saving config file on hdfs for component " + + roleInstance.getComponentInstanceName() + ": " + remoteFile); + if (configFile.getType() + .equals(ConfigFile.TypeEnum.HADOOP_XML_TEMPLATE)) { + substituteHadoopXmlTemplateAndSaveOnHdfs(fs.getFileSystem(), + tokensForSubstitution, configFile, remoteFile); + } else { + // substitute non-template configs + substituteConfigs(configFile.getProps(), tokensForSubstitution); + + // write configs onto hdfs + PublishedConfiguration publishedConfiguration = + new PublishedConfiguration(fileName, + configFile.getProps().entrySet()); if (!fs.getFileSystem().exists(remoteFile)) { PublishedConfigurationOutputter configurationOutputter = PublishedConfigurationOutputter.createOutputter( @@ -293,34 +322,33 @@ public void createConfigFileAndAddLocalResource(ContainerLauncher launcher, FSDataOutputStream os = null; try { os = fs.getFileSystem().create(remoteFile); + fs.getFileSystem() + .setPermission(remoteFile, new FsPermission("777")); configurationOutputter.save(os); os.flush(); - log.info("Created config file on hdfs: " + remoteFile); } finally { IOUtils.closeStream(os); } + } else { + log.info("Component instance = " + roleInstance + .getComponentInstanceName() + ", config file already exists: " + + remoteFile); } } } - // Publish configs - amState.getPublishedSliderConfigurations() - .put(configFile.getSrcFile(), publishedConfiguration); - // Add resource for localization LocalResource configResource = fs.createAmResource(remoteFile, LocalResourceType.FILE); File destFile = new File(configFile.getDestFile()); - //TODO why to we need to differetiate RESOURCE_DIR vs APP_CONF_DIR + String symlink = APP_CONF_DIR + "/" + fileName; if (destFile.isAbsolute()) { - String symlink = RESOURCE_DIR + "/" + fileName; launcher.addLocalResource(symlink, configResource, configFile.getDestFile()); log.info("Add config file for localization: " + symlink + " -> " + configResource.getResource().getFile() + ", dest mount path: " + configFile.getDestFile()); } else { - String symlink = APP_CONF_DIR + "/" + fileName; launcher.addLocalResource(symlink, configResource); log.info("Add config file for localization: " + symlink + " -> " + configResource.getResource().getFile()); @@ -328,22 +356,61 @@ public void createConfigFileAndAddLocalResource(ContainerLauncher launcher, } } + // 1. substitute config template - only handle hadoop_xml format + // 2. save on hdfs + private void substituteHadoopXmlTemplateAndSaveOnHdfs(FileSystem fs, + Map tokensForSubstitution, ConfigFile configFile, + Path remoteFile) throws IOException { + FSDataInputStream input = null; + OutputStream output = null; + try { + org.apache.hadoop.conf.Configuration confRead = + new org.apache.hadoop.conf.Configuration(false); + input = fs.open(new Path(configFile.getSrcFile())); + confRead.addResource(input); + // substitute properties + for (Map.Entry entry : configFile.getProps().entrySet()) { + confRead.set(entry.getKey(), entry.getValue()); + } + + // substitute env variables + for (Map.Entry entry : confRead) { + String val = entry.getValue(); + if (val != null) { + for (Map.Entry token : tokensForSubstitution + .entrySet()) { + val = + val.replaceAll(Pattern.quote(token.getKey()), token.getValue()); + } + } + entry.setValue(val); + } + output = fs.create(remoteFile); + fs.setPermission(remoteFile, new FsPermission("777")); + confRead.writeXml(output); + } finally { + IOUtils.closeStream(input); + IOUtils.closeStream(output); + } + + } + /** * Get initial token map to be substituted into config values. * @param appConf app configurations * @param componentName component name - * @param componentGroup component group * @param containerId container ID * @param clusterName app name * @return tokens to replace */ - public Map getStandardTokenMap( - Configuration appConf, Configuration componentConf, String componentName, - String componentGroup, String containerId, String clusterName) { + public Map getStandardTokenMap(Configuration appConf, + Configuration componentConf, String componentName, + String componentInstanceName, String containerId, + String clusterName) { Map tokens = new HashMap<>(); if (containerId != null) { - tokens.put("${CONTAINER_ID}", containerId); + tokens.put(CONTAINER_ID, containerId); } String nnuri = appConf.getProperty("fs.defaultFS"); if (nnuri != null && !nnuri.isEmpty()) { @@ -363,23 +430,14 @@ public void createConfigFileAndAddLocalResource(ContainerLauncher launcher, appConf.getProperty(InternalKeys.INTERNAL_DATA_DIR_PATH) + dataDirSuffix); tokens.put("${JAVA_HOME}", appConf.getProperty(JAVA_HOME)); - tokens.put("${COMPONENT_NAME}", componentName); - tokens.put("${COMPONENT_NAME.lc}", componentName.toLowerCase()); tokens.put("${COMPONENT_PREFIX}", prefix); tokens.put("${COMPONENT_PREFIX.lc}", prefix.toLowerCase()); - if (!componentName.equals(componentGroup) && - componentName.startsWith(componentGroup)) { - tokens.put("${COMPONENT_ID}", - componentName.substring(componentGroup.length())); - } - if (clusterName != null) { - tokens.put("${CLUSTER_NAME}", clusterName); - tokens.put("${CLUSTER_NAME.lc}", clusterName.toLowerCase()); - tokens.put("${APP_NAME}", clusterName); - tokens.put("${APP_NAME.lc}", clusterName.toLowerCase()); - } - tokens.put("${APP_COMPONENT_NAME}", componentName); - tokens.put("${APP_COMPONENT_NAME.lc}", componentName.toLowerCase()); + tokens.put(DEFAULT_DATA_DIR, "data"); + tokens.put(SERVICE_NAME_LC, clusterName.toLowerCase()); + tokens.put(SERVICE_NAME, clusterName); + tokens.put(COMPONENT_NAME, componentName); + tokens.put(COMPONENT_NAME_LC, componentName.toLowerCase()); + tokens.put(COMPONENT_INSTANCE_NAME, componentInstanceName); return tokens; } @@ -443,7 +501,8 @@ public void updateServiceRecord(StateAccessForProviders amState, // create and publish updated service record (including hostname & ip) ServiceRecord record = new ServiceRecord(); record.set(YarnRegistryAttributes.YARN_ID, containerId); - record.description = roleName.replaceAll("_", "-"); + String componentInstanceName = role.getComponentInstanceName(); + record.description = componentInstanceName.replaceAll("_", "-"); record.set(YarnRegistryAttributes.YARN_PERSISTENCE, PersistencePolicies.CONTAINER); // TODO: use constants from YarnRegistryAttributes diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java index 511f7bc..c61dcb9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java @@ -46,6 +46,8 @@ import java.util.Map; import java.util.Map.Entry; +import static org.apache.slider.util.ServiceApiUtil.$; + public class DockerProviderService extends AbstractService implements ProviderService, DockerKeys, SliderKeys { @@ -70,14 +72,14 @@ public void bindToYarnRegistry(YarnRegistryViewForProviders yarnRegistry) { this.yarnRegistry = yarnRegistry; } + public void buildContainerLaunchContext(ContainerLauncher launcher, Application application, Container container, ProviderRole providerRole, - SliderFileSystem fileSystem) + SliderFileSystem fileSystem, RoleInstance roleInstance) throws IOException, SliderException { String roleName = providerRole.name; String roleGroup = providerRole.group; - Component component = providerRole.component; launcher.setYarnDockerMode(true); launcher.setDockerImage(component.getArtifact().getId()); @@ -86,16 +88,14 @@ public void buildContainerLaunchContext(ContainerLauncher launcher, launcher.setRunPrivilegedContainer(component.getRunPrivilegedContainer()); // Generate tokens (key-value pair) for config substitution. - Map standardTokens = providerUtils + // Get pre-defined tokens + Map tokensForSubstitution = providerUtils .getStandardTokenMap(application.getConfiguration(), - component.getConfiguration(), roleName, roleGroup, + component.getConfiguration(), roleName, + roleInstance.getComponentInstanceName(), container.getId().toString(), application.getName()); - Map tokensForSubstitution = providerUtils.substituteConfigs( - component.getConfiguration().getProperties(), standardTokens); - - tokensForSubstitution.putAll(standardTokens); - // Set the environment variables + // Set the environment variables in launcher launcher.putEnv(SliderUtils .buildEnvMap(component.getConfiguration(), tokensForSubstitution)); launcher.setEnv("WORK_DIR", ApplicationConstants.Environment.PWD.$()); @@ -108,21 +108,13 @@ public void buildContainerLaunchContext(ContainerLauncher launcher, launcher.setEnv("LANGUAGE", "en_US.UTF-8"); for (Entry entry : launcher.getEnv().entrySet()) { - tokensForSubstitution.put("${" + entry.getKey() + "}", entry.getValue()); + tokensForSubstitution.put($(entry.getKey()), entry.getValue()); } - providerUtils.addRoleHostTokens(tokensForSubstitution, amState); - log.info("Token for substitution: " + tokensForSubstitution); - - if (SliderUtils.isHadoopClusterSecure(getConfig())) { - //TODO localize key tabs, WHY is this code needed ? WHY DOES CONTAINER REQUIRE AM KEYTAB?? - providerUtils.localizeServiceKeytabs(launcher, fileSystem, application); - } - // create config file on hdfs and add local resource providerUtils.createConfigFileAndAddLocalResource(launcher, fileSystem, - component, tokensForSubstitution, amState); + component, tokensForSubstitution, roleInstance); CommandLineBuilder operation = new CommandLineBuilder(); operation.add(component.getLaunchCommand()); @@ -132,7 +124,6 @@ public void buildContainerLaunchContext(ContainerLauncher launcher, launcher.addCommand(operation.build()); // publish exports - // TODO move this to app level, no need to do this for every container launch providerUtils .substituteConfigs(application.getQuicklinks(), tokensForSubstitution); PublishedConfiguration pubconf = new PublishedConfiguration(QUICK_LINKS, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java index 70eab71..6dc9996 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/RoleLaunchService.java @@ -150,24 +150,21 @@ public void run() { containerLauncher.setupUGI(); containerLauncher.putEnv(envVars); - log.info("Launching container {} into RoleName = {}, RoleGroup = {}", - container.getId(), role.name, role.group); - - provider.buildContainerLaunchContext(containerLauncher, application, - container, role, fs); - - RoleInstance instance = new RoleInstance(container); + RoleInstance instance = new RoleInstance(container, role); String[] envDescription = containerLauncher.dumpEnvToString(); - String commandsAsString = containerLauncher.getCommandsAsString(); + log.info("Launching container {} into component = {}, component instance = {}", + container.getId(), role.name, instance.getComponentInstanceName()); log.info("Starting container with command: {}", commandsAsString); - - instance.providerRole = role; instance.command = commandsAsString; instance.role = role.name; instance.group = role.group; instance.roleId = role.id; instance.environment = envDescription; + + provider.buildContainerLaunchContext(containerLauncher, application, + container, role, fs, instance); + long delay = role.component.getConfiguration() .getPropertyLong(AgentKeys.KEY_CONTAINER_LAUNCH_DELAY, 0); long maxDelay = getConfig() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java index eca07e6..71dc149 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java @@ -85,6 +85,7 @@ import org.apache.slider.api.proto.Messages; import org.apache.slider.api.proto.SliderClusterAPI; import org.apache.slider.api.resource.Application; +import org.apache.slider.api.resource.Component; import org.apache.slider.common.SliderExitCodes; import org.apache.slider.common.SliderKeys; import org.apache.slider.common.params.AbstractActionArgs; @@ -109,7 +110,6 @@ import org.apache.slider.core.persist.JsonSerDeser; import org.apache.slider.core.registry.info.CustomRegistryConstants; import org.apache.slider.providers.ProviderCompleted; -import org.apache.slider.providers.ProviderRole; import org.apache.slider.providers.ProviderService; import org.apache.slider.providers.SliderProviderFactory; import org.apache.slider.server.appmaster.actions.ActionHalt; @@ -136,7 +136,6 @@ import org.apache.slider.server.appmaster.rpc.RpcBinder; import org.apache.slider.server.appmaster.rpc.SliderClusterProtocolPBImpl; import org.apache.slider.server.appmaster.rpc.SliderIPCService; -import org.apache.slider.server.appmaster.security.SecurityConfiguration; import org.apache.slider.server.appmaster.state.AppState; import org.apache.slider.server.appmaster.state.AppStateBindingInfo; import org.apache.slider.server.appmaster.state.ContainerAssignment; @@ -170,7 +169,6 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -701,10 +699,7 @@ private int createAndRunCluster(String appName) throws Throwable { registryOperations = startRegistryOperationsService(); log.info(registryOperations.toString()); - //build the role map - List providerRoles = Collections.EMPTY_LIST; // Start up the WebApp and track the URL for it - // Web service endpoints: initialize WebAppApiImpl webAppApi = new WebAppApiImpl( @@ -815,7 +810,6 @@ private int createAndRunCluster(String appName) throws Throwable { //build the instance AppStateBindingInfo binding = new AppStateBindingInfo(); binding.serviceConfig = serviceConf; - binding.roles = providerRoles; binding.fs = fs.getFileSystem(); binding.historyPath = historyDir; binding.liveContainers = liveContainers; @@ -873,6 +867,11 @@ private int createAndRunCluster(String appName) throws Throwable { scheduleFailureWindowResets(application.getConfiguration()); scheduleEscalation(application.getConfiguration()); + // Merge app-level configuration into component level configuration + for (Component component : application.getComponents()) { + component.getConfiguration().mergeFrom(application.getConfiguration()); + } + try { // schedule YARN Registry registration queue(new ActionRegisterServiceInstance(appName, appid, application)); @@ -1170,22 +1169,22 @@ protected void setupInitialRegistryPaths() throws IOException { * Handler for {@link RegisterComponentInstance action} * Register/re-register an ephemeral container that is already in the application state * @param id the component - * @param description component description - * @param type component type * @return true if the component is registered */ - public boolean registerComponent(ContainerId id, String description, - String type) throws IOException { + public boolean registerComponent(ContainerId id, RoleInstance roleInstance) + throws IOException { RoleInstance instance = appState.getOwnedContainer(id); if (instance == null) { return false; } // this is where component registrations go - log.info("Registering component {}", id); String cid = RegistryPathUtils.encodeYarnID(id.toString()); ServiceRecord record = new ServiceRecord(); record.set(YarnRegistryAttributes.YARN_ID, cid); - record.description = description; + + record.description = roleInstance.getComponentInstanceName(); + log.info("Registering component " + roleInstance.getComponentInstanceName() + + ", containerId = " + id); record.set(YarnRegistryAttributes.YARN_PERSISTENCE, PersistencePolicies.CONTAINER); setUserProvidedServiceRecordAttributes( @@ -1194,7 +1193,7 @@ public boolean registerComponent(ContainerId id, String description, yarnRegistryOperations.putComponent(cid, record); } catch (IOException e) { log.warn("Failed to register container {}/{}: {}", - id, description, e, e); + id, roleInstance.role, e, e); return false; } org.apache.slider.api.resource.Container container = @@ -1948,7 +1947,7 @@ public void onContainerStarted(ContainerId containerId, nmClientAsync.getContainerStatusAsync(containerId, cinfo.container.getNodeId()); // push out a registration - queue(new RegisterComponentInstance(containerId, cinfo.role, cinfo.group, + queue(new RegisterComponentInstance(containerId, cinfo, 0, TimeUnit.MILLISECONDS)); } else { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/RegisterComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/RegisterComponentInstance.java index 4cf4981..3c1bed8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/RegisterComponentInstance.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/RegisterComponentInstance.java @@ -22,6 +22,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.slider.server.appmaster.SliderAppMaster; import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.appmaster.state.RoleInstance; import java.util.concurrent.TimeUnit; @@ -33,18 +34,15 @@ public class RegisterComponentInstance extends AsyncAction { public final ContainerId containerId; - public final String description; - public final String type; + public final RoleInstance roleInstance; public RegisterComponentInstance(ContainerId containerId, - String description, - String type, + RoleInstance roleInstance, long delay, TimeUnit timeUnit) { super("RegisterComponentInstance :" + containerId, delay, timeUnit); - this.description = description; - this.type = type; + this.roleInstance = roleInstance; Preconditions.checkArgument(containerId != null); this.containerId = containerId; } @@ -54,6 +52,6 @@ public void execute(SliderAppMaster appMaster, QueueAccess queueService, AppState appState) throws Exception { - appMaster.registerComponent(containerId, description, type); + appMaster.registerComponent(containerId, roleInstance); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java index 3d73f3b..d612e50 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java @@ -51,6 +51,7 @@ import org.apache.slider.core.exceptions.BadConfigException; import org.apache.slider.core.exceptions.ErrorStrings; import org.apache.slider.core.exceptions.NoSuchNodeException; +import org.apache.slider.core.exceptions.SliderException; import org.apache.slider.core.exceptions.SliderInternalStateException; import org.apache.slider.core.exceptions.TriggerClusterTeardownException; import org.apache.slider.providers.PlacementPolicy; @@ -107,9 +108,11 @@ private Application app; + // priority_id -> RoleStatus private final Map roleStatusMap = new ConcurrentSkipListMap<>(); + // component_name -> ProviderRole private final Map roles = new ConcurrentHashMap<>(); @@ -299,37 +302,23 @@ public synchronized void buildInstance(AppStateBindingInfo binding) // set the cluster specification (once its dependency the client properties // is out the way this.app = binding.application; - appMetrics = SliderMetrics.register(app.getName(), - "Metrics for service"); - appMetrics - .tag("type", "Metrics type [component or service]", "service"); - appMetrics - .tag("appId", "Application id for service", app.getId()); - - //build the initial role list - List roleList = new ArrayList<>(binding.roles); - for (ProviderRole providerRole : roleList) { - buildRole(providerRole); - } + appMetrics = SliderMetrics.register(app.getName(), "Metrics for service"); + appMetrics.tag("type", "Metrics type [component or service]", "service"); + appMetrics.tag("appId", "Application id for service", app.getId()); + //build the initial component list int priority = 1; for (Component component : app.getComponents()) { String name = component.getName(); if (roles.containsKey(name)) { continue; } - if (component.getUniqueComponentSupport()) { - log.info("Skipping group " + name + ", as it's unique component"); - continue; - } log.info("Adding component: " + name); - ProviderRole dynamicRole = - createComponent(name, name, component, priority++); - buildRole(dynamicRole); - roleList.add(dynamicRole); + createComponent(name, name, component, priority++); } + //then pick up the requirements - buildRoleRequirementsFromResources(); +// buildRoleRequirementsFromResources(); org.apache.slider.api.resource.Configuration conf = app.getConfiguration(); startTimeThreshold = @@ -356,10 +345,8 @@ public synchronized void buildInstance(AppStateBindingInfo binding) app.setState(STARTED); } - //TODO WHY do we need to create the component for AM ? public ProviderRole createComponent(String name, String group, Component component, int priority) throws BadConfigException { - org.apache.slider.api.resource.Configuration conf = component.getConfiguration(); long placementTimeout = conf.getPropertyLong(PLACEMENT_ESCALATE_DELAY, @@ -371,130 +358,143 @@ public ProviderRole createComponent(String name, String group, ProviderRole newRole = new ProviderRole(name, group, priority, (int)placementPolicy, threshold, placementTimeout, "", component); - + buildRole(newRole, component); log.info("Created a new role " + newRole); return newRole; } + // TODO support adding new component dynamically public synchronized void updateComponents( - Messages.FlexComponentRequestProto requestProto) - throws BadConfigException { + Messages.FlexComponentRequestProto requestProto) throws SliderException { + boolean found = false; for (Component component : app.getComponents()) { if (component.getName().equals(requestProto.getName())) { component .setNumberOfContainers((long) requestProto.getNumberOfContainers()); + if (roles.containsKey(component.getName())) { + setDesiredContainers( + roleStatusMap.get(roles.get(component.getName()).id), + component.getNumberOfContainers().intValue()); + found = true; + } } } - //TODO update cluster description - buildRoleRequirementsFromResources(); + if (!found) { + throw new SliderException( + "Component " + requestProto.getName() + " doesn't exist. "); + } +// buildRoleRequirementsFromResources(); } /** * build the role requirements from the cluster specification * @return a list of any dynamically added provider roles */ - private List buildRoleRequirementsFromResources() - throws BadConfigException { - - List newRoles = new ArrayList<>(0); - - // now update every role's desired count. - // if there are no instance values, that role count goes to zero - // Add all the existing roles - // component name -> number of containers - Map groupCounts = new HashMap<>(); - - for (RoleStatus roleStatus : getRoleStatusMap().values()) { - if (roleStatus.isExcludeFromFlexing()) { - // skip inflexible roles, e.g AM itself - continue; - } - long currentDesired = roleStatus.getDesired(); - String role = roleStatus.getName(); - String roleGroup = roleStatus.getGroup(); - Component component = roleStatus.getProviderRole().component; - int desiredInstanceCount = component.getNumberOfContainers().intValue(); - - int newDesired = desiredInstanceCount; - if (component.getUniqueComponentSupport()) { - Integer groupCount = 0; - if (groupCounts.containsKey(roleGroup)) { - groupCount = groupCounts.get(roleGroup); - } - - newDesired = desiredInstanceCount - groupCount; - - if (newDesired > 0) { - newDesired = 1; - groupCounts.put(roleGroup, groupCount + newDesired); - } else { - newDesired = 0; - } - } - - if (newDesired == 0) { - log.info("Role {} has 0 instances specified", role); - } - if (currentDesired != newDesired) { - log.info("Role {} flexed from {} to {}", role, currentDesired, - newDesired); - setDesiredContainers(roleStatus, newDesired); - } - } - - // now the dynamic ones. Iterate through the the cluster spec and - // add any role status entries not in the role status - - List list = new ArrayList<>(getRoleStatusMap().values()); - for (RoleStatus roleStatus : list) { - String name = roleStatus.getName(); - Component component = roleStatus.getProviderRole().component; - if (roles.containsKey(name)) { - continue; - } - if (component.getUniqueComponentSupport()) { - // THIS NAME IS A GROUP - int desiredInstanceCount = component.getNumberOfContainers().intValue(); - Integer groupCount = 0; - if (groupCounts.containsKey(name)) { - groupCount = groupCounts.get(name); - } - for (int i = groupCount + 1; i <= desiredInstanceCount; i++) { - int priority = roleStatus.getPriority(); - // this is a new instance of an existing group - String newName = String.format("%s%d", name, i); - int newPriority = getNewPriority(priority + i - 1); - log.info("Adding new role {}", newName); - ProviderRole dynamicRole = - createComponent(newName, name, component, newPriority); - RoleStatus newRole = buildRole(dynamicRole); - incDesiredContainers(newRole); - log.info("New role {}", newRole); - if (roleHistory != null) { - roleHistory.addNewRole(newRole); - } - newRoles.add(dynamicRole); - } - } else { - // this is a new value - log.info("Adding new role {}", name); - ProviderRole dynamicRole = - createComponent(name, name, component, roleStatus.getPriority()); - RoleStatus newRole = buildRole(dynamicRole); - incDesiredContainers(roleStatus, - component.getNumberOfContainers().intValue()); - log.info("New role {}", newRole); - if (roleHistory != null) { - roleHistory.addNewRole(newRole); - } - newRoles.add(dynamicRole); - } - } - // and fill in all those roles with their requirements - buildRoleResourceRequirements(); - - return newRoles; - } +// private List buildRoleRequirementsFromResources() +// throws BadConfigException { +// +// List newRoles = new ArrayList<>(0); +// +// // now update every role's desired count. +// // if there are no instance values, that role count goes to zero +// // Add all the existing roles +// // component name -> number of containers +// Map groupCounts = new HashMap<>(); +// +// for (RoleStatus roleStatus : getRoleStatusMap().values()) { +// if (roleStatus.isExcludeFromFlexing()) { +// // skip inflexible roles, e.g AM itself +// continue; +// } +// long currentDesired = roleStatus.getDesired(); +// String role = roleStatus.getName(); +// String roleGroup = roleStatus.getGroup(); +// Component component = roleStatus.getProviderRole().component; +// int desiredInstanceCount = component.getNumberOfContainers().intValue(); +// +// int newDesired = desiredInstanceCount; +// if (component.getUniqueComponentSupport()) { +// Integer groupCount = 0; +// if (groupCounts.containsKey(roleGroup)) { +// groupCount = groupCounts.get(roleGroup); +// } +// +// newDesired = desiredInstanceCount - groupCount; +// +// if (newDesired > 0) { +// newDesired = 1; +// groupCounts.put(roleGroup, groupCount + newDesired); +// } else { +// newDesired = 0; +// } +// } +// +// if (newDesired == 0) { +// log.info("Role {} has 0 instances specified", role); +// } +// if (currentDesired != newDesired) { +// log.info("Role {} flexed from {} to {}", role, currentDesired, +// newDesired); +// setDesiredContainers(roleStatus, newDesired); +// } +// } +// +// log.info("Counts per component: " + groupCounts); +// // now the dynamic ones. Iterate through the the cluster spec and +// // add any role status entries not in the role status +// +// List list = new ArrayList<>(getRoleStatusMap().values()); +// for (RoleStatus roleStatus : list) { +// String name = roleStatus.getName(); +// Component component = roleStatus.getProviderRole().component; +// if (roles.containsKey(name)) { +// continue; +// } +// if (component.getUniqueComponentSupport()) { +// // THIS NAME IS A GROUP +// int desiredInstanceCount = component.getNumberOfContainers().intValue(); +// Integer groupCount = 0; +// if (groupCounts.containsKey(name)) { +// groupCount = groupCounts.get(name); +// } +// log.info("Component " + component.getName() + ", current count = " +// + groupCount + ", desired count = " + desiredInstanceCount); +// for (int i = groupCount + 1; i <= desiredInstanceCount; i++) { +// int priority = roleStatus.getPriority(); +// // this is a new instance of an existing group +// String newName = String.format("%s%d", name, i); +// int newPriority = getNewPriority(priority + i - 1); +// log.info("Adding new role {}", newName); +// ProviderRole dynamicRole = +// createComponent(newName, name, component, newPriority); +// RoleStatus newRole = buildRole(dynamicRole); +// incDesiredContainers(newRole); +// log.info("New role {}", newRole); +// if (roleHistory != null) { +// roleHistory.addNewRole(newRole); +// } +// newRoles.add(dynamicRole); +// } +// } else { +// // this is a new value +// log.info("Adding new role {}", name); +// ProviderRole dynamicRole = +// createComponent(name, name, component, roleStatus.getPriority()); +// RoleStatus newRole = buildRole(dynamicRole); +// incDesiredContainers(roleStatus, +// component.getNumberOfContainers().intValue()); +// log.info("New role {}", newRole); +// if (roleHistory != null) { +// roleHistory.addNewRole(newRole); +// } +// newRoles.add(dynamicRole); +// } +// } +// // and fill in all those roles with their requirements +// buildRoleResourceRequirements(); +// +// return newRoles; +// } private int getNewPriority(int start) { if (!rolePriorityMap.containsKey(start)) { @@ -512,7 +512,8 @@ private int getNewPriority(int start) { * @return the role status built up * @throws BadConfigException if a role of that priority already exists */ - public RoleStatus buildRole(ProviderRole providerRole) throws BadConfigException { + public RoleStatus buildRole(ProviderRole providerRole, Component component) + throws BadConfigException { // build role status map int priority = providerRole.id; if (roleStatusMap.containsKey(priority)) { @@ -521,6 +522,11 @@ public RoleStatus buildRole(ProviderRole providerRole) throws BadConfigException roleStatusMap.get(priority)); } RoleStatus roleStatus = new RoleStatus(providerRole); + roleStatus.setResourceRequirements(buildResourceRequirements(roleStatus)); + long prev = roleStatus.getDesired(); + setDesiredContainers(roleStatus, component.getNumberOfContainers().intValue()); + log.info("Set desired containers for component " + component.getName() + + " from " + prev + " to " + roleStatus.getDesired()); roleStatusMap.put(priority, roleStatus); String name = providerRole.name; roles.put(name, providerRole); @@ -755,7 +761,7 @@ private RoleInstance findNodeInCollection(String containerId, } /** - * Build a map of role->nodename->node-info + * Build a map of Component_name -> ContainerId -> ClusterNode * * @return the map of Role name to list of Cluster Nodes */ @@ -929,11 +935,6 @@ public Resource buildResourceRequirements(RoleStatus role) { // Set up resource requirements from role values String name = role.getName(); Component component = role.getProviderRole().component; - if (component == null) { - // this is for AM container - // TODO why do we need to create the component for AM ? - return Resource.newInstance(1, 512); - } int cores = Math.min(containerMaxCores, component.getResource().getCpus()); if (cores <= 0) { cores = DEF_YARN_CORES; @@ -1852,6 +1853,7 @@ private void addRestartedContainer(Container container) //update app state internal structures and maps + //TODO recover the component instance name from zk registry RoleInstance instance = new RoleInstance(container); instance.command = roleName; instance.role = roleName; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java index de52f4e..ea55518 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/RoleInstance.java @@ -42,6 +42,7 @@ public Container container; public ProviderRole providerRole; + public long componentId = -1; /** * Container ID */ @@ -114,10 +115,14 @@ private List endpoints = new ArrayList<>(2); - public RoleInstance(ContainerAssignment assignment) { - this(assignment.container); - placement = assignment.placement; + public RoleInstance(Container container, ProviderRole role) { + this(container); + if (role.componentIdCounter != null) { + componentId = role.componentIdCounter.getAndIncrement(); + } + this.providerRole = role; } + /** * Create an instance to track an allocated container * @param container a container which must be non null, and have a non-null Id field. @@ -322,4 +327,12 @@ public ContainerInformation serialize() { } return info; } + + public String getComponentInstanceName() { + if (componentId != -1 ) { + return role + componentId; + } else { + return role; + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java index 776ce00..e314cb3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java @@ -20,17 +20,29 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.slider.api.resource.Application; import org.apache.slider.api.resource.Artifact; import org.apache.slider.api.resource.Component; +import org.apache.slider.api.resource.ConfigFile; import org.apache.slider.api.resource.Configuration; import org.apache.slider.api.resource.Resource; import org.apache.slider.common.tools.SliderUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -public class ServiceApiUtil { +import java.io.IOException; +import java.net.URI; +import java.nio.file.Paths; +import java.util.List; +public class ServiceApiUtil { + private static final Logger log = + LoggerFactory.getLogger(ServiceApiUtil.class); @VisibleForTesting - public static void validateApplicationPostPayload(Application application) { + public static void validateApplicationPayload(Application application, + FileSystem fs) throws IOException { if (StringUtils.isEmpty(application.getName())) { throw new IllegalArgumentException( RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID); @@ -68,7 +80,7 @@ public static void validateApplicationPostPayload(Application application) { throw new IllegalArgumentException( RestApiErrorMessages.ERROR_CONTAINERS_COUNT_INVALID); } - + validateConfigFile(application.getConfiguration().getFiles(), fs); // Since it is a simple app with no components, create a default component application.getComponents().add(createDefaultComponent(application)); } else { @@ -119,6 +131,7 @@ public static void validateApplicationPostPayload(Application application) { RestApiErrorMessages.ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID, comp.getName())); } + validateConfigFile(comp.getConfiguration().getFiles(), fs); } } @@ -128,6 +141,37 @@ public static void validateApplicationPostPayload(Application application) { } } + // 1) Verify the src_file exists for Hadoop_Xml template config + // 2) dest_file is absolute path + private static void validateConfigFile(List list, FileSystem fs) + throws IOException { + for (ConfigFile file : list) { + // validate Hadoop_xml_template + if (file.getType().equals(ConfigFile.TypeEnum.HADOOP_XML_TEMPLATE)) { + if (StringUtils.isEmpty(file.getSrcFile())) { + throw new IllegalArgumentException("Src_file is empty for " + + ConfigFile.TypeEnum.HADOOP_XML_TEMPLATE); + } + Path p = new Path(file.getSrcFile()); + if (!fs.exists(p)) { + throw new IllegalArgumentException( + "Src_file does not exist for config template file: " + file + .getSrcFile()); + } + } + + if (StringUtils.isEmpty(file.getDestFile())) { + throw new IllegalArgumentException("Dest_file is empty."); + } + // validate dest_file is absolute + if (!Paths.get(file.getDestFile()).isAbsolute()) { + throw new IllegalArgumentException( + "Dest_file must be absolute path: " + file.getDestFile()); + } + } + } + + private static void validateApplicationResource(Resource resource, Component comp, Artifact.TypeEnum artifactType) { // Only apps/components of type APPLICATION can skip resource requirement @@ -200,4 +244,8 @@ public static Component createDefaultComponent(Application app) { comp.setLaunchCommand(app.getLaunchCommand()); return comp; } + + public static String $(String s) { + return "${" + s +"}"; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java index b9cde24..299eedf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; @@ -179,4 +180,9 @@ public void testContainerStatus() { status.setIPs(null); Assert.assertNull(status.getIPs()); } + + @Test + public void testPath() { + System.out.print(new Path("hdfs://localhost/tmp/destFile2").toUri().getScheme()); + } }