diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml index 088b50c..979883c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml @@ -244,6 +244,10 @@ definitions: queue: type: string description: The YARN queue that this service should be submitted to. + kerberos_principal: + description: The Kerberos Principal of the service + $ref: '#/definitions/KerberosPrincipal' + Resource: description: Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise. @@ -469,3 +473,15 @@ definitions: type: integer format: int32 description: An error code specific to a scenario which service owners should be able to use to understand the failure in addition to the diagnostic information. + KerberosPrincipal: + description: The kerberos principal info of the user who launches the service. + properties: + principal_name: + type: string + description: The principal name of the user who launches the service. + keytab: + type: string + description: | + The URI of the kerberos keytab. It supports two modes: + URI starts with "hdfs://": A path on hdfs where the keytab is stored. The keytab will be localized by YARN to each host. + URI starts with "file://": A path on the local host where the keytab is stored. It is assumed that the keytabs are pre-installed by admins before AM launches. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java index 94dbc6e..cd41ab7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java @@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.service.api.records.ConfigFile; import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import java.nio.ByteBuffer; + public class ServiceContext { public Service service = null; public SliderFileSystem fs; @@ -34,6 +36,12 @@ public ServiceScheduler scheduler; public ClientToAMTokenSecretManager secretManager; public ClientAMService clientAMService; + // tokens used for container launch + public ByteBuffer tokens; + // AM keytab principal + public String principal; + // AM keytab location + public String keytab; public ServiceContext() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java index b0b4f06..ef346ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java @@ -20,33 +20,47 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Options; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; + +import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.ShutdownHookManager; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; import org.apache.hadoop.yarn.service.monitor.ServiceMonitor; import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; -import org.apache.hadoop.yarn.service.utils.SliderFileSystem; import org.apache.hadoop.yarn.service.utils.ServiceUtils; -import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.ByteBuffer; +import java.util.Iterator; import java.util.Map; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConstants.KEYTAB_LOCATION; + public class ServiceMaster extends CompositeService { private static final Logger LOG = @@ -63,13 +77,7 @@ public ServiceMaster(String name) { @Override protected void serviceInit(Configuration conf) throws Exception { - //TODO Deprecate slider conf, make sure works with yarn conf printSystemEnv(); - if (UserGroupInformation.isSecurityEnabled()) { - UserGroupInformation.setConfiguration(conf); - } - LOG.info("Login user is {}", UserGroupInformation.getLoginUser()); - context = new ServiceContext(); Path appDir = getAppDir(); context.serviceHdfsDir = appDir.toString(); @@ -78,6 +86,10 @@ protected void serviceInit(Configuration conf) throws Exception { fs.setAppDir(appDir); loadApplicationJson(context, fs); + if (UserGroupInformation.isSecurityEnabled()) { + context.tokens = recordTokensForContainers(); + doSecureLogin(); + } // Take yarn config from YarnFile and merge them into YarnConfiguration for (Map.Entry entry : context.service .getConfiguration().getProperties().entrySet()) { @@ -111,6 +123,98 @@ protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); } + // Record the tokens and use them for launching containers. + // e.g. localization requires the hdfs delegation tokens + private ByteBuffer recordTokensForContainers() throws IOException { + Credentials copy = new Credentials(UserGroupInformation.getCurrentUser() + .getCredentials()); + DataOutputBuffer dob = new DataOutputBuffer(); + try { + copy.writeTokenStorageToStream(dob); + } finally { + dob.close(); + } + // Now remove the AM->RM token so that task containers cannot access it. + Iterator> iter = copy.getAllTokens().iterator(); + while (iter.hasNext()) { + Token token = iter.next(); + LOG.info(token.toString()); + if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { + iter.remove(); + } + } + return ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + } + + // 1. First try to use user specified keytabs + // 2. If not specified, then try to use pre-installed keytab at localhost + // 3. strip off hdfs delegation tokens to ensure use keytab to talk to hdfs + private void doSecureLogin() + throws IOException, URISyntaxException { + // read the localized keytab specified by user + File keytab = new File(String.format(KEYTAB_LOCATION, + context.service.getName())); + if (!keytab.exists()) { + LOG.info("No keytab localized at " + keytab); + // Check if there exists a pre-installed keytab at host + String preInstalledKeytab = context.service.getKerberosPrincipal() + .getKeytab(); + if (!StringUtils.isEmpty(preInstalledKeytab)) { + URI uri = new URI(preInstalledKeytab); + if (uri.getScheme().equals("file")) { + keytab = new File(uri); + LOG.info("Using pre-installed keytab from localhost: " + + preInstalledKeytab); + } + } + } + if (!keytab.exists()) { + LOG.info("No keytab exists: " + keytab); + return; + } + String principal = context.service.getKerberosPrincipal() + .getPrincipalName(); + if (StringUtils.isEmpty((principal))) { + principal = UserGroupInformation.getLoginUser().getShortUserName(); + LOG.info("No principal name specified. Will use AM " + + "login identity {} to attempt keytab-based login", principal); + } + + Credentials credentials = UserGroupInformation.getCurrentUser() + .getCredentials(); + LOG.info("User before logged in is: " + UserGroupInformation + .getCurrentUser()); + UserGroupInformation.loginUserFromKeytab(principal, + keytab.getAbsolutePath()); + // add back the credentials + UserGroupInformation.getCurrentUser().addCredentials(credentials); + LOG.info("User after logged in is: " + UserGroupInformation + .getCurrentUser()); + context.principal = principal; + context.keytab = keytab.getAbsolutePath(); + removeHdfsDelegationToken(UserGroupInformation.getLoginUser()); + } + + // Remove HDFS delegation token from login user and ensure AM to use keytab + // to talk to hdfs + private void removeHdfsDelegationToken(UserGroupInformation user) { + if (!user.isFromKeytab()) { + LOG.error("AM is not holding on a keytab in a secure deployment:" + + " service will fail when tokens expire"); + } + Credentials credentials = user.getCredentials(); + Iterator> iter = + credentials.getAllTokens().iterator(); + while (iter.hasNext()) { + Token token = iter.next(); + if (token.getKind().equals( + DelegationTokenIdentifier.HDFS_DELEGATION_KIND)) { + LOG.info("Remove HDFS delegation token {}.", token); + iter.remove(); + } + } + } + protected ContainerId getAMContainerId() throws BadClusterStateException { return ContainerId.fromString(ServiceUtils.mandatoryEnvVariable( ApplicationConstants.Environment.CONTAINER_ID.name())); @@ -146,7 +250,8 @@ private void printSystemEnv() { public static void main(String[] args) throws Exception { Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); - StringUtils.startupShutdownMessage(ServiceMaster.class, args, LOG); + org.apache.hadoop.util.StringUtils + .startupShutdownMessage(ServiceMaster.class, args, LOG); try { ServiceMaster serviceMaster = new ServiceMaster("Service Master"); ShutdownHookManager.get() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java index 6bc5673..3687d2a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java @@ -22,6 +22,7 @@ import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; @@ -145,8 +146,19 @@ public void buildInstance(ServiceContext context, Configuration configuration) throws YarnException { app = context.service; executorService = Executors.newScheduledThreadPool(10); - RegistryOperations registryClient = RegistryOperationsFactory - .createInstance("ServiceScheduler", configuration); + RegistryOperations registryClient = null; + if (UserGroupInformation.isSecurityEnabled() && + !StringUtils.isEmpty(context.principal) + && !StringUtils.isEmpty(context.keytab)) { + Configuration conf = getConfig(); + conf.set(KEY_REGISTRY_USER_ACCOUNTS, "sasl:" + context.principal); + registryClient = RegistryOperationsFactory + .createKerberosInstance(conf, + "Client", context.principal, context.keytab); + } else { + registryClient = RegistryOperationsFactory + .createInstance("ServiceScheduler", configuration); + } addIfService(registryClient); yarnRegistryOperations = createYarnRegistryOperations(context, registryClient); @@ -171,7 +183,7 @@ public void buildInstance(ServiceContext context, Configuration configuration) dispatcher.setDrainEventsOnStop(); addIfService(dispatcher); - containerLaunchService = new ContainerLaunchService(context.fs); + containerLaunchService = new ContainerLaunchService(context); addService(containerLaunchService); if (YarnConfiguration.timelineServiceV2Enabled(configuration)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/KerberosPrincipal.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/KerberosPrincipal.java new file mode 100644 index 0000000..a7eaf11 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/KerberosPrincipal.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import com.google.gson.annotations.SerializedName; +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.io.Serializable; +import java.util.Objects; + +/** + * The kerberos principal of the service. + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "The kerberos principal of the service.") +@javax.annotation.Generated(value = "io.swagger.codegen.languages.JavaClientCodegen", date = "2017-11-20T11:29:11.785-08:00") +public class KerberosPrincipal implements Serializable { + private static final long serialVersionUID = -6431667195287650037L; + + @SerializedName("principal_name") + private String principalName = null; + + @SerializedName("keytab") + private String keytab = null; + + public KerberosPrincipal principalName(String principalName) { + this.principalName = principalName; + return this; + } + + /** + * The principal name of the service. + * @return principalName + **/ + @ApiModelProperty(value = "The principal name of the service.") + public String getPrincipalName() { + return principalName; + } + + public void setPrincipalName(String principalName) { + this.principalName = principalName; + } + + public KerberosPrincipal keytab(String keytab) { + this.keytab = keytab; + return this; + } + + /** + * The URI of the kerberos keytab. It supports two schemes \"hdfs\" and \"file\". If the URI starts with \"hdfs://\" scheme, it indicates the path on hdfs where the keytab is stored. The keytab will be localized by YARN and made available to AM in its local directory. If the URI starts with \"file://\" scheme, it indicates a path on the local host presumbaly installed by admins upfront. + * @return keytab + **/ + @ApiModelProperty(value = "The URI of the kerberos keytab. It supports two schemes \"hdfs\" and \"file\". If the URI starts with \"hdfs://\" scheme, it indicates the path on hdfs where the keytab is stored. The keytab will be localized by YARN and made available to AM in its local directory. If the URI starts with \"file://\" scheme, it indicates a path on the local host where the keytab is presumbaly installed by admins upfront. ") + public String getKeytab() { + return keytab; + } + + public void setKeytab(String keytab) { + this.keytab = keytab; + } + + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + KerberosPrincipal kerberosPrincipal = (KerberosPrincipal) o; + return Objects.equals(this.principalName, kerberosPrincipal.principalName) && + Objects.equals(this.keytab, kerberosPrincipal.keytab); + } + + @Override + public int hashCode() { + return Objects.hash(principalName, keytab); + } + + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class KerberosPrincipal {\n"); + + sb.append(" principalName: ").append(toIndentedString(principalName)).append("\n"); + sb.append(" keytab: ").append(toIndentedString(keytab)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } + +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java index 77a2610..1f032b6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java @@ -62,6 +62,7 @@ private ServiceState state = null; private Map quicklinks = new HashMap<>(); private String queue = null; + private KerberosPrincipal kerberosPrincipal = new KerberosPrincipal(); /** * A unique service name. @@ -332,6 +333,24 @@ public void setQueue(String queue) { this.queue = queue; } + public Service kerberosPrincipal(KerberosPrincipal kerberosPrincipal) { + this.kerberosPrincipal = kerberosPrincipal; + return this; + } + + /** + * The Kerberos Principal of the service + * @return kerberosPrincipal + **/ + @ApiModelProperty(value = "The Kerberos Principal of the service") + public KerberosPrincipal getKerberosPrincipal() { + return kerberosPrincipal; + } + + public void setKerberosPrincipal(KerberosPrincipal kerberosPrincipal) { + this.kerberosPrincipal = kerberosPrincipal; + } + @Override public boolean equals(java.lang.Object o) { if (this == o) { @@ -373,6 +392,8 @@ public String toString() { sb.append(" quicklinks: ").append(toIndentedString(quicklinks)) .append("\n"); sb.append(" queue: ").append(toIndentedString(queue)).append("\n"); + sb.append(" kerberosPrincipal: ") + .append(toIndentedString(kerberosPrincipal)).append("\n"); sb.append("}"); return sb.toString(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index d1b6026..0725010 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -28,12 +28,16 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.registry.client.api.RegistryOperations; import org.apache.hadoop.registry.client.api.RegistryOperationsFactory; import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.HadoopKerberosName; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; @@ -43,6 +47,7 @@ import org.apache.hadoop.yarn.client.api.AppAdminClient; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.client.api.YarnClientApplication; +import org.apache.hadoop.yarn.client.util.YarnClientUtils; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -79,6 +84,9 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.ByteBuffer; import java.text.MessageFormat; import java.util.*; import java.util.concurrent.ConcurrentHashMap; @@ -552,7 +560,7 @@ private ApplicationId submitApp(Service app) // copy jars to hdfs and add to localResources addJarResource(serviceName, localResources); // add keytab if in secure env - addKeytabResourceIfSecure(fs, localResources, conf, serviceName); + addKeytabResourceIfSecure(fs, localResources, app); if (LOG.isDebugEnabled()) { printLocalResources(localResources); } @@ -581,6 +589,7 @@ private ApplicationId submitApp(Service app) amLaunchContext.setCommands(Collections.singletonList(cmdStr)); amLaunchContext.setEnvironment(env); amLaunchContext.setLocalResources(localResources); + addHdfsDelegationTokenIfSecure(amLaunchContext); submissionContext.setAMContainerSpec(amLaunchContext); yarnClient.submitApplication(submissionContext); return submissionContext.getApplicationId(); @@ -771,38 +780,85 @@ private Path persistAppDef(Path appDir, Service service) throws IOException { return appJson; } + private void addHdfsDelegationTokenIfSecure(ContainerLaunchContext amContext) + throws IOException { + if (!UserGroupInformation.isSecurityEnabled()) { + return; + } + Credentials credentials = new Credentials(); + String tokenRenewer = YarnClientUtils.getRmPrincipal(getConfig()); + if (StringUtils.isEmpty(tokenRenewer)) { + throw new IOException( + "Can't get Master Kerberos principal for the RM to use as renewer"); + } + // Get hdfs dt + final org.apache.hadoop.security.token.Token tokens[] = + fs.getFileSystem().addDelegationTokens(tokenRenewer, credentials); + if (tokens != null && tokens.length != 0) { + for (Token token : tokens) { + LOG.info("Got DT: " + token); + } + DataOutputBuffer dob = new DataOutputBuffer(); + credentials.writeTokenStorageToStream(dob); + ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + amContext.setTokens(fsTokens); + } + } + private void addKeytabResourceIfSecure(SliderFileSystem fileSystem, - Map localResource, Configuration conf, - String serviceName) throws IOException, BadConfigException { + Map localResource, Service service) + throws IOException, YarnException { if (!UserGroupInformation.isSecurityEnabled()) { return; } - String keytabPreInstalledOnHost = - conf.get(YarnServiceConf.KEY_AM_KEYTAB_LOCAL_PATH); - if (StringUtils.isEmpty(keytabPreInstalledOnHost)) { - String amKeytabName = - conf.get(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_NAME); - String keytabDir = conf.get(YarnServiceConf.KEY_HDFS_KEYTAB_DIR); - Path keytabPath = - fileSystem.buildKeytabPath(keytabDir, amKeytabName, serviceName); - if (fileSystem.getFileSystem().exists(keytabPath)) { - LocalResource keytabRes = - fileSystem.createAmResource(keytabPath, LocalResourceType.FILE); - localResource - .put(YarnServiceConstants.KEYTAB_DIR + "/" + amKeytabName, keytabRes); - LOG.info("Adding AM keytab on hdfs: " + keytabPath); - } else { - LOG.warn("No keytab file was found at {}.", keytabPath); - if (conf.getBoolean(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_REQUIRED, false)) { - throw new BadConfigException("No keytab file was found at %s.", - keytabPath); - } else { - LOG.warn("The AM will be " - + "started without a kerberos authenticated identity. " - + "The service is therefore not guaranteed to remain " - + "operational beyond 24 hours."); - } + String principalName = service.getKerberosPrincipal().getPrincipalName(); + if (StringUtils.isEmpty(principalName)) { + LOG.warn("No Kerberos principal name specified for " + service.getName()); + return; + } + if(StringUtils.isEmpty(service.getKerberosPrincipal().getKeytab())) { + LOG.warn("No Kerberos keytab specified for " + service.getName()); + return; + } + + String incomingUser = UserGroupInformation.getCurrentUser() + .getShortUserName(); + String principalUser = new HadoopKerberosName(principalName).getShortName(); + if (!incomingUser.equals(principalUser)) { + throw new YarnException(MessageFormat.format("Specified principal short" + + " user name ({}) doesn't match current user short name ({}), " + + "specified principal name = {}, current user = {}", + principalUser, incomingUser, principalName, + UserGroupInformation.getCurrentUser())); + } + URI keytabURI; + try { + keytabURI = new URI(service.getKerberosPrincipal().getKeytab()); + } catch (URISyntaxException e) { + throw new YarnException(e); + } + + switch (keytabURI.getScheme()) { + case "hdfs": + Path keytabOnhdfs = new Path(keytabURI); + if (!fileSystem.getFileSystem().exists(keytabOnhdfs)) { + LOG.warn(service.getName() + "'s keytab (principalName = " + + principalName + ") doesn't exist at: " + keytabOnhdfs); + return; } + LocalResource keytabRes = + fileSystem.createAmResource(keytabOnhdfs, LocalResourceType.FILE); + localResource.put(String.format(YarnServiceConstants.KEYTAB_LOCATION, + service.getName()), keytabRes); + LOG.debug("Adding " + service.getName() + "'s keytab for " + + "localization, uri = " + keytabOnhdfs); + break; + case "file": + LOG.debug("Using a keytab from localhost: " + keytabURI); + break; + default: + LOG.warn("Unsupported URI scheme " + keytabURI); + break; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java index 684d980..ea8904a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java @@ -53,13 +53,6 @@ */ public static final String YARN_SERVICE_BASE_PATH = "yarn.service.base.path"; - //TODO rename - /** Declare that a keytab must be provided */ - public static final String KEY_AM_LOGIN_KEYTAB_REQUIRED = "slider.am.login.keytab.required"; - public static final String KEY_AM_LOGIN_KEYTAB_NAME = "slider.am.login.keytab.name"; - public static final String KEY_HDFS_KEYTAB_DIR = "slider.hdfs.keytab.dir"; - public static final String KEY_AM_KEYTAB_LOCAL_PATH = "slider.am.keytab.local.path"; - /** * maximum number of failed containers (in a single component) * before the app exits diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java index 3973759..e18ac8a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java @@ -40,6 +40,8 @@ String APP_TYPE = "yarn-service"; String KEYTAB_DIR = "keytabs"; + String KEYTAB_LOCATION = KEYTAB_DIR + "/%s" + ".keytab"; + String RESOURCE_DIR = "resources"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java index 2d7c3bb..e1e88cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java @@ -19,16 +19,15 @@ package org.apache.hadoop.yarn.service.containerlaunch; import com.google.common.base.Preconditions; -import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerRetryContext; import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy; import org.apache.hadoop.yarn.api.records.LocalResource; -import org.apache.hadoop.yarn.util.Records; +import org.apache.hadoop.yarn.service.ServiceContext; import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; -import org.apache.hadoop.yarn.service.utils.CoreFileSystem; import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.apache.hadoop.yarn.util.Records; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,10 +49,6 @@ LoggerFactory.getLogger(AbstractLauncher.class); public static final String CLASSPATH = "CLASSPATH"; /** - * Filesystem to use for the launch - */ - protected final CoreFileSystem coreFileSystem; - /** * Env vars; set up at final launch stage */ protected final Map envVars = new HashMap<>(); @@ -63,25 +58,15 @@ protected final Map localResources = new HashMap<>(); protected final Map mountPaths = new HashMap<>(); private final Map serviceData = new HashMap<>(); - // security - protected final Credentials credentials; protected boolean yarnDockerMode = false; protected String dockerImage; protected String dockerNetwork = DEFAULT_DOCKER_NETWORK; protected String dockerHostname; protected String runPrivilegedContainer; + private ServiceContext context; - - /** - * Create instance. - * @param coreFileSystem filesystem - * @param credentials initial set of credentials -null is permitted - */ - public AbstractLauncher( - CoreFileSystem coreFileSystem, - Credentials credentials) { - this.coreFileSystem = coreFileSystem; - this.credentials = credentials != null ? credentials: new Credentials(); + public AbstractLauncher(ServiceContext context) { + this.context = context; } public void setYarnDockerMode(boolean yarnDockerMode){ @@ -113,14 +98,6 @@ public void addLocalResource(String subPath, LocalResource resource, String moun mountPaths.put(subPath, mountPath); } - /** - * Accessor to the credentials - * @return the credentials associated with this launcher - */ - public Credentials getCredentials() { - return credentials; - } - public void addCommand(String cmd) { commands.add(cmd); @@ -160,9 +137,9 @@ public ContainerLaunchContext completeContainerLaunch() throws IOException { containerLaunchContext.setLocalResources(localResources); //tokens - log.debug("{} tokens", credentials.numberOfTokens()); - containerLaunchContext.setTokens(CredentialUtils.marshallCredentials( - credentials)); + if (context.tokens != null) { + containerLaunchContext.setTokens(context.tokens.duplicate()); + } if(yarnDockerMode){ Map env = containerLaunchContext.getEnvironment(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java index b9f3a24..e07661b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.service.ServiceContext; import org.apache.hadoop.yarn.service.api.records.Component; import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; import org.apache.hadoop.yarn.service.provider.ProviderService; @@ -40,10 +41,11 @@ private ExecutorService executorService; private SliderFileSystem fs; - - public ContainerLaunchService(SliderFileSystem fs) { + private ServiceContext context; + public ContainerLaunchService(ServiceContext context) { super(ContainerLaunchService.class.getName()); - this.fs = fs; + this.fs = context.fs; + this.context = context; } @Override @@ -84,7 +86,7 @@ public ContainerLauncher( Component compSpec = instance.getCompSpec(); ProviderService provider = ProviderFactory.getProviderService( compSpec.getArtifact()); - AbstractLauncher launcher = new AbstractLauncher(fs, null); + AbstractLauncher launcher = new AbstractLauncher(context); try { provider.buildContainerLaunchContext(launcher, service, instance, fs, getConfig(), container); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CredentialUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CredentialUtils.java deleted file mode 100644 index fce58e5..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CredentialUtils.java +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.service.containerlaunch; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.io.DataOutputBuffer; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; -import org.apache.hadoop.yarn.client.ClientRMProxy; -import org.apache.hadoop.yarn.client.api.TimelineClient; -import org.apache.hadoop.yarn.client.api.YarnClient; -import org.apache.hadoop.yarn.conf.HAUtil; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; -import org.apache.hadoop.yarn.util.ConverterUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.text.DateFormat; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.Date; -import java.util.Iterator; -import java.util.List; - -import static org.apache.hadoop.yarn.conf.YarnConfiguration.*; - -/** - * Utils to work with credentials and tokens. - * - * Designed to be movable to Hadoop core - */ -public final class CredentialUtils { - - private CredentialUtils() { - } - - private static final Logger LOG = - LoggerFactory.getLogger(CredentialUtils.class); - - /** - * Save credentials to a byte buffer. Returns null if there were no - * credentials to save - * @param credentials credential set - * @return a byte buffer of serialized tokens - * @throws IOException if the credentials could not be written to the stream - */ - public static ByteBuffer marshallCredentials(Credentials credentials) throws IOException { - ByteBuffer buffer = null; - if (!credentials.getAllTokens().isEmpty()) { - DataOutputBuffer dob = new DataOutputBuffer(); - try { - credentials.writeTokenStorageToStream(dob); - } finally { - dob.close(); - } - buffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); - } - return buffer; - } - - /** - * Save credentials to a file - * @param file file to save to (will be overwritten) - * @param credentials credentials to write - * @throws IOException - */ - public static void saveTokens(File file, - Credentials credentials) throws IOException { - try(DataOutputStream daos = new DataOutputStream( - new FileOutputStream(file))) { - credentials.writeTokenStorageToStream(daos); - } - } - - /** - * Look up and return the resource manager's principal. This method - * automatically does the _HOST replacement in the principal and - * correctly handles HA resource manager configurations. - * - * From: YARN-4629 - * @param conf the {@link Configuration} file from which to read the - * principal - * @return the resource manager's principal string - * @throws IOException thrown if there's an error replacing the host name - */ - public static String getRMPrincipal(Configuration conf) throws IOException { - String principal = conf.get(RM_PRINCIPAL, ""); - String hostname; - Preconditions.checkState(!principal.isEmpty(), "Not set: " + RM_PRINCIPAL); - - if (HAUtil.isHAEnabled(conf)) { - YarnConfiguration yarnConf = new YarnConfiguration(conf); - if (yarnConf.get(RM_HA_ID) == null) { - // If RM_HA_ID is not configured, use the first of RM_HA_IDS. - // Any valid RM HA ID should work. - String[] rmIds = yarnConf.getStrings(RM_HA_IDS); - Preconditions.checkState((rmIds != null) && (rmIds.length > 0), - "Not set " + RM_HA_IDS); - yarnConf.set(RM_HA_ID, rmIds[0]); - } - - hostname = yarnConf.getSocketAddr( - RM_ADDRESS, - DEFAULT_RM_ADDRESS, - DEFAULT_RM_PORT).getHostName(); - } else { - hostname = conf.getSocketAddr( - RM_ADDRESS, - DEFAULT_RM_ADDRESS, - DEFAULT_RM_PORT).getHostName(); - } - return SecurityUtil.getServerPrincipal(principal, hostname); - } - - /** - * Create and add any filesystem delegation tokens with - * the RM(s) configured to be able to renew them. Returns null - * on an insecure cluster (i.e. harmless) - * @param conf configuration - * @param fs filesystem - * @param credentials credentials to update - * @return a list of all added tokens. - * @throws IOException - */ - public static Token[] addRMRenewableFSDelegationTokens(Configuration conf, - FileSystem fs, - Credentials credentials) throws IOException { - Preconditions.checkArgument(conf != null); - Preconditions.checkArgument(credentials != null); - if (UserGroupInformation.isSecurityEnabled()) { - return fs.addDelegationTokens(CredentialUtils.getRMPrincipal(conf), - credentials); - } - return null; - } - - /** - * Add an FS delegation token which can be renewed by the current user - * @param fs filesystem - * @param credentials credentials to update - * @throws IOException problems. - */ - public static void addSelfRenewableFSDelegationTokens( - FileSystem fs, - Credentials credentials) throws IOException { - Preconditions.checkArgument(fs != null); - Preconditions.checkArgument(credentials != null); - fs.addDelegationTokens( - getSelfRenewer(), - credentials); - } - - public static String getSelfRenewer() throws IOException { - return UserGroupInformation.getLoginUser().getShortUserName(); - } - - /** - * Create and add an RM delegation token to the credentials - * @param yarnClient Yarn Client - * @param credentials to add token to - * @return the token which was added - * @throws IOException - * @throws YarnException - */ - public static Token addRMDelegationToken(YarnClient yarnClient, - Credentials credentials) - throws IOException, YarnException { - Configuration conf = yarnClient.getConfig(); - Text rmPrincipal = new Text(CredentialUtils.getRMPrincipal(conf)); - Text rmDTService = ClientRMProxy.getRMDelegationTokenService(conf); - Token rmDelegationToken = - ConverterUtils.convertFromYarn( - yarnClient.getRMDelegationToken(rmPrincipal), - rmDTService); - credentials.addToken(rmDelegationToken.getService(), rmDelegationToken); - return rmDelegationToken; - } - - public static Token maybeAddTimelineToken( - Configuration conf, - Credentials credentials) - throws IOException, YarnException { - if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false)) { - LOG.debug("Timeline service enabled -fetching token"); - - try(TimelineClient timelineClient = TimelineClient.createTimelineClient()) { - timelineClient.init(conf); - timelineClient.start(); - Token token = - timelineClient.getDelegationToken( - CredentialUtils.getRMPrincipal(conf)); - credentials.addToken(token.getService(), token); - return token; - } - } else { - LOG.debug("Timeline service is disabled"); - return null; - } - } - - /** - * Filter a list of tokens from a set of credentials - * @param credentials credential source (a new credential set os re - * @param filter List of tokens to strip out - * @return a new, filtered, set of credentials - */ - public static Credentials filterTokens(Credentials credentials, - List filter) { - Credentials result = new Credentials(credentials); - Iterator> iter = - result.getAllTokens().iterator(); - while (iter.hasNext()) { - Token token = iter.next(); - LOG.debug("Token {}", token.getKind()); - if (filter.contains(token.getKind())) { - LOG.debug("Filtering token {}", token.getKind()); - iter.remove(); - } - } - return result; - } - - public static String dumpTokens(Credentials credentials, String separator) { - ArrayList> sorted = - new ArrayList<>(credentials.getAllTokens()); - Collections.sort(sorted, new TokenComparator()); - StringBuilder buffer = new StringBuilder(sorted.size()* 128); - for (Token token : sorted) { - buffer.append(tokenToString(token)).append(separator); - } - return buffer.toString(); - } - - /** - * Create a string for people to look at - * @param token token to convert to a string form - * @return a printable view of the token - */ - public static String tokenToString(Token token) { - DateFormat df = DateFormat.getDateTimeInstance( - DateFormat.SHORT, DateFormat.SHORT); - StringBuilder buffer = new StringBuilder(128); - buffer.append(token.toString()); - try { - TokenIdentifier ti = token.decodeIdentifier(); - buffer.append("; ").append(ti); - if (ti instanceof AbstractDelegationTokenIdentifier) { - // details in human readable form, and compensate for information HDFS DT omits - AbstractDelegationTokenIdentifier dt = (AbstractDelegationTokenIdentifier) ti; - buffer.append("; Renewer: ").append(dt.getRenewer()); - buffer.append("; Issued: ") - .append(df.format(new Date(dt.getIssueDate()))); - buffer.append("; Max Date: ") - .append(df.format(new Date(dt.getMaxDate()))); - } - } catch (IOException e) { - //marshall problem; not ours - LOG.debug("Failed to decode {}: {}", token, e, e); - } - return buffer.toString(); - } - - /** - * Get the expiry time of a token. - * @param token token to examine - * @return the time in milliseconds after which the token is invalid. - * @throws IOException - */ - public static long getTokenExpiryTime(Token token) throws IOException { - TokenIdentifier identifier = token.decodeIdentifier(); - Preconditions.checkState(identifier instanceof AbstractDelegationTokenIdentifier, - "Token %s of type: %s has an identifier which cannot be examined: %s", - token, token.getClass(), identifier); - AbstractDelegationTokenIdentifier id = - (AbstractDelegationTokenIdentifier) identifier; - return id.getMaxDate(); - } - - private static class TokenComparator - implements Comparator>, Serializable { - @Override - public int compare(Token left, - Token right) { - return left.getKind().toString().compareTo(right.getKind().toString()); - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java index c0c44c3..d65a196 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.service.provider; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; @@ -28,21 +27,18 @@ import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.service.ServiceContext; -import org.apache.hadoop.yarn.service.api.records.Service; import org.apache.hadoop.yarn.service.api.records.Component; import org.apache.hadoop.yarn.service.api.records.ConfigFile; import org.apache.hadoop.yarn.service.api.records.ConfigFormat; -import org.apache.hadoop.yarn.service.api.records.Configuration; import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; -import org.apache.hadoop.yarn.service.conf.YarnServiceConf; import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; import org.apache.hadoop.yarn.service.exceptions.BadCommandArgumentsException; import org.apache.hadoop.yarn.service.exceptions.SliderException; import org.apache.hadoop.yarn.service.utils.PublishedConfiguration; import org.apache.hadoop.yarn.service.utils.PublishedConfigurationOutputter; -import org.apache.hadoop.yarn.service.utils.SliderFileSystem; import org.apache.hadoop.yarn.service.utils.ServiceUtils; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -163,53 +159,6 @@ public static void substituteMapWithTokens(Map configs, } } - /** - * Localize the service keytabs for the service. - * @param launcher container launcher - * @param fileSystem file system - * @throws IOException trouble uploading to HDFS - */ - public void localizeServiceKeytabs(AbstractLauncher launcher, - SliderFileSystem fileSystem, Service service) throws IOException { - - Configuration conf = service.getConfiguration(); - String keytabPathOnHost = - conf.getProperty(YarnServiceConf.KEY_AM_KEYTAB_LOCAL_PATH); - if (ServiceUtils.isUnset(keytabPathOnHost)) { - String amKeytabName = - conf.getProperty(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_NAME); - String keytabDir = - conf.getProperty(YarnServiceConf.KEY_HDFS_KEYTAB_DIR); - // we need to localize the keytab files in the directory - Path keytabDirPath = fileSystem.buildKeytabPath(keytabDir, null, - service.getName()); - boolean serviceKeytabsDeployed = false; - if (fileSystem.getFileSystem().exists(keytabDirPath)) { - FileStatus[] keytabs = fileSystem.getFileSystem().listStatus( - keytabDirPath); - LocalResource keytabRes; - for (FileStatus keytab : keytabs) { - if (!amKeytabName.equals(keytab.getPath().getName()) - && keytab.getPath().getName().endsWith(".keytab")) { - serviceKeytabsDeployed = true; - log.info("Localizing keytab {}", keytab.getPath().getName()); - keytabRes = fileSystem.createAmResource(keytab.getPath(), - LocalResourceType.FILE); - launcher.addLocalResource(KEYTAB_DIR + "/" + - keytab.getPath().getName(), - keytabRes); - } - } - } - if (!serviceKeytabsDeployed) { - log.warn("No service keytabs for the service have been localized. " - + "If the service requires keytabs for secure operation, " - + "please ensure that the required keytabs have been uploaded " - + "to the folder {}", keytabDirPath); - } - } - } - public static Path initCompInstanceDir(SliderFileSystem fs, ComponentInstance instance) { Path compDir = new Path(new Path(fs.getAppDir(), "components"), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java index 704b097..e74ca81 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java @@ -111,6 +111,27 @@ public static RegistryOperations createKerberosInstance(Configuration conf, } /** + * Create a kerberos registry service client + * @param conf configuration + * @param jaasClientEntry the name of the login config entry + * @param principal principal of the client. + * @param keytab location to the keytab file + * @return a registry service client instance + */ + public static RegistryOperations createKerberosInstance(Configuration conf, + String jaasClientEntry, String principal, String keytab) { + Preconditions.checkArgument(conf != null, "Null configuration"); + conf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_KERBEROS); + conf.set(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, jaasClientEntry); + RegistryOperationsClient operations = + new RegistryOperationsClient("KerberosRegistryOperations"); + operations.setKerberosPrincipalAndKeytab(principal, keytab); + operations.init(conf); + return operations; + } + + + /** * Create and initialize an operations instance authenticated with write * access via an id:password pair. * diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java index 8713920..c81a0ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java @@ -127,6 +127,7 @@ public CuratorService(String name, RegistryBindingSource bindingSource) { } else { this.bindingSource = this; } + registrySecurity = new RegistrySecurity("registry security"); } /** @@ -152,8 +153,7 @@ protected void serviceInit(Configuration conf) throws Exception { registryRoot = conf.getTrimmed(KEY_REGISTRY_ZK_ROOT, DEFAULT_ZK_REGISTRY_ROOT); - // create and add the registy service - registrySecurity = new RegistrySecurity("registry security"); + // add the registy service addService(registrySecurity); if (LOG.isDebugEnabled()) { @@ -163,6 +163,10 @@ protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); } + public void setKerberosPrincipalAndKeytab(String principal, String keytab) { + registrySecurity.setKerberosPrincipalAndKeytab(principal, keytab); + } + /** * Start the service. * This is where the curator instance is started. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java index 23fadb5..b1714b6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java @@ -23,6 +23,7 @@ import com.google.common.collect.Lists; import org.apache.commons.lang.StringUtils; import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.util.KerberosUtil; @@ -31,6 +32,7 @@ import org.apache.hadoop.util.ZKUtil; import org.apache.zookeeper.Environment; import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.client.ZooKeeperSaslClient; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Id; import org.apache.zookeeper.server.auth.DigestAuthenticationProvider; @@ -44,9 +46,11 @@ import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.ListIterator; import java.util.Locale; +import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import static org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions.*; @@ -170,13 +174,17 @@ /** * Client context */ - private String jaasClientContext; + private String jaasClientEntry; /** * Client identity */ private String jaasClientIdentity; + private String principal; + + private String keytab; + /** * Create an instance * @param name service name @@ -262,13 +270,12 @@ private void initSecurity() throws IOException { } UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); - jaasClientContext = getOrFail(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, + jaasClientEntry = getOrFail(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT); jaasClientIdentity = currentUser.getShortUserName(); if (LOG.isDebugEnabled()) { LOG.debug("Auth is SASL user=\"{}\" JAAS context=\"{}\"", - jaasClientIdentity, - jaasClientContext); + jaasClientIdentity, jaasClientEntry); } break; @@ -738,9 +745,81 @@ public void applySecurityEnvironment(CuratorFrameworkFactory.Builder builder) { break; case sasl: - // bind to the current identity and context within the JAAS file - setZKSaslClientProperties(jaasClientIdentity, jaasClientContext); + JaasConfiguration jconf = + new JaasConfiguration(jaasClientEntry, principal, keytab); + javax.security.auth.login.Configuration.setConfiguration(jconf); + setSystemPropertyIfUnset(ZooKeeperSaslClient.ENABLE_CLIENT_SASL_KEY, + "true"); + setSystemPropertyIfUnset(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, + jaasClientEntry); + LOG.info( + "Enabling ZK sasl client: jaasClientEntry = " + jaasClientEntry + + ", principal = " + principal + ", keytab = " + keytab); + } + } + } + + public void setKerberosPrincipalAndKeytab(String principal, String keytab) { + this.principal = principal; + this.keytab = keytab; + } + + /** + * Creates a programmatic version of a jaas.conf file. This can be used + * instead of writing a jaas.conf file and setting the system property, + * "java.security.auth.login.config", to point to that file. It is meant to be + * used for connecting to ZooKeeper. + */ + @InterfaceAudience.Private + public static class JaasConfiguration extends + javax.security.auth.login.Configuration { + + private final javax.security.auth.login.Configuration baseConfig = + javax.security.auth.login.Configuration.getConfiguration(); + private static AppConfigurationEntry[] entry; + private String entryName; + + /** + * Add an entry to the jaas configuration with the passed in name, + * principal, and keytab. The other necessary options will be set for you. + * + * @param entryName The name of the entry (e.g. "Client") + * @param principal The principal of the user + * @param keytab The location of the keytab + */ + public JaasConfiguration(String entryName, String principal, String keytab) { + this.entryName = entryName; + Map options = new HashMap(); + options.put("keyTab", keytab); + options.put("principal", principal); + options.put("useKeyTab", "true"); + options.put("storeKey", "true"); + options.put("useTicketCache", "false"); + options.put("refreshKrb5Config", "true"); + String jaasEnvVar = System.getenv("HADOOP_JAAS_DEBUG"); + if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) { + options.put("debug", "true"); + } + entry = new AppConfigurationEntry[]{ + new AppConfigurationEntry(getKrb5LoginModuleName(), + AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, + options)}; + } + + @Override + public AppConfigurationEntry[] getAppConfigurationEntry(String name) { + return (entryName.equals(name)) ? entry : ((baseConfig != null) + ? baseConfig.getAppConfigurationEntry(name) : null); + } + + private String getKrb5LoginModuleName() { + String krb5LoginModuleName; + if (System.getProperty("java.vendor").contains("IBM")) { + krb5LoginModuleName = "com.ibm.security.auth.module.Krb5LoginModule"; + } else { + krb5LoginModuleName = "com.sun.security.auth.module.Krb5LoginModule"; } + return krb5LoginModuleName; } } @@ -899,7 +978,7 @@ public String buildSecurityDiagnostics() { .append("; "); builder.append(KEY_REGISTRY_CLIENT_JAAS_CONTEXT) .append("=") - .append(jaasClientContext) + .append(jaasClientEntry) .append("; "); builder.append(describeProperty(PROP_ZK_SASL_CLIENT_USERNAME)); builder.append(describeProperty(PROP_ZK_SASL_CLIENT_CONTEXT)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/RMRegistryOperationsService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/RMRegistryOperationsService.java deleted file mode 100644 index e11890f..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/RMRegistryOperationsService.java +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.registry.server.integration; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.curator.framework.api.BackgroundCallback; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.registry.client.impl.zk.RegistryBindingSource; -import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies; -import org.apache.hadoop.registry.server.services.DeleteCompletionCallback; -import org.apache.hadoop.registry.server.services.RegistryAdminService; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.concurrent.Future; - -/** - * Handle RM events by updating the registry - *

- * These actions are all implemented as event handlers to operations - * which come from the RM. - *

- * This service is expected to be executed by a user with the permissions - * to manipulate the entire registry, - */ -@InterfaceAudience.LimitedPrivate("YARN") -@InterfaceStability.Evolving -public class RMRegistryOperationsService extends RegistryAdminService { - private static final Logger LOG = - LoggerFactory.getLogger(RMRegistryOperationsService.class); - - private PurgePolicy purgeOnCompletionPolicy = PurgePolicy.PurgeAll; - - public RMRegistryOperationsService(String name) { - this(name, null); - } - - public RMRegistryOperationsService(String name, - RegistryBindingSource bindingSource) { - super(name, bindingSource); - } - - - /** - * Extend the parent service initialization by verifying that the - * service knows —in a secure cluster— the realm in which it is executing. - * It needs this to properly build up the user names and hence their - * access rights. - * - * @param conf configuration of the service - * @throws Exception - */ - @Override - protected void serviceInit(Configuration conf) throws Exception { - super.serviceInit(conf); - - verifyRealmValidity(); - } - - public PurgePolicy getPurgeOnCompletionPolicy() { - return purgeOnCompletionPolicy; - } - - public void setPurgeOnCompletionPolicy(PurgePolicy purgeOnCompletionPolicy) { - this.purgeOnCompletionPolicy = purgeOnCompletionPolicy; - } - - public void onApplicationAttemptRegistered(ApplicationAttemptId attemptId, - String host, int rpcport, String trackingurl) throws IOException { - - } - - public void onApplicationLaunched(ApplicationId id) throws IOException { - - } - - /** - * Actions to take as an AM registers itself with the RM. - * @param attemptId attempt ID - * @throws IOException problems - */ - public void onApplicationMasterRegistered(ApplicationAttemptId attemptId) throws - IOException { - } - - /** - * Actions to take when the AM container is completed - * @param containerId container ID - * @throws IOException problems - */ - public void onAMContainerFinished(ContainerId containerId) throws - IOException { - LOG.info("AM Container {} finished, purging application attempt records", - containerId); - - // remove all application attempt entries - purgeAppAttemptRecords(containerId.getApplicationAttemptId()); - - // also treat as a container finish to remove container - // level records for the AM container - onContainerFinished(containerId); - } - - /** - * remove all application attempt entries - * @param attemptId attempt ID - */ - protected void purgeAppAttemptRecords(ApplicationAttemptId attemptId) { - purgeRecordsAsync("/", - attemptId.toString(), - PersistencePolicies.APPLICATION_ATTEMPT); - } - - /** - * Actions to take when an application attempt is completed - * @param attemptId application ID - * @throws IOException problems - */ - public void onApplicationAttemptUnregistered(ApplicationAttemptId attemptId) - throws IOException { - LOG.info("Application attempt {} unregistered, purging app attempt records", - attemptId); - purgeAppAttemptRecords(attemptId); - } - - /** - * Actions to take when an application is completed - * @param id application ID - * @throws IOException problems - */ - public void onApplicationCompleted(ApplicationId id) - throws IOException { - LOG.info("Application {} completed, purging application-level records", - id); - purgeRecordsAsync("/", - id.toString(), - PersistencePolicies.APPLICATION); - } - - public void onApplicationAttemptAdded(ApplicationAttemptId appAttemptId) { - } - - /** - * This is the event where the user is known, so the user directory - * can be created - * @param applicationId application ID - * @param user username - * @throws IOException problems - */ - public void onStateStoreEvent(ApplicationId applicationId, String user) throws - IOException { - initUserRegistryAsync(user); - } - - /** - * Actions to take when the AM container is completed - * @param id container ID - * @throws IOException problems - */ - public void onContainerFinished(ContainerId id) throws IOException { - LOG.info("Container {} finished, purging container-level records", - id); - purgeRecordsAsync("/", - id.toString(), - PersistencePolicies.CONTAINER); - } - - /** - * Queue an async operation to purge all matching records under a base path. - *

    - *
  1. Uses a depth first search
  2. - *
  3. A match is on ID and persistence policy, or, if policy==-1, any match
  4. - *
  5. If a record matches then it is deleted without any child searches
  6. - *
  7. Deletions will be asynchronous if a callback is provided
  8. - *
- * @param path base path - * @param id ID for service record.id - * @param persistencePolicyMatch ID for the persistence policy to match: - * no match, no delete. - * @return a future that returns the #of records deleted - */ - @VisibleForTesting - public Future purgeRecordsAsync(String path, - String id, - String persistencePolicyMatch) { - - return purgeRecordsAsync(path, - id, persistencePolicyMatch, - purgeOnCompletionPolicy, - new DeleteCompletionCallback()); - } - - /** - * Queue an async operation to purge all matching records under a base path. - *
    - *
  1. Uses a depth first search
  2. - *
  3. A match is on ID and persistence policy, or, if policy==-1, any match
  4. - *
  5. If a record matches then it is deleted without any child searches
  6. - *
  7. Deletions will be asynchronous if a callback is provided
  8. - *
- * @param path base path - * @param id ID for service record.id - * @param persistencePolicyMatch ID for the persistence policy to match: - * no match, no delete. - * @param purgePolicy how to react to children under the entry - * @param callback an optional callback - * @return a future that returns the #of records deleted - */ - @VisibleForTesting - public Future purgeRecordsAsync(String path, - String id, - String persistencePolicyMatch, - PurgePolicy purgePolicy, - BackgroundCallback callback) { - LOG.info(" records under {} with ID {} and policy {}: {}", - path, id, persistencePolicyMatch); - return submit( - new AsyncPurge(path, - new SelectByYarnPersistence(id, persistencePolicyMatch), - purgePolicy, - callback)); - } - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java index e160d4a..829ef68 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java @@ -21,7 +21,6 @@ import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.api.BackgroundCallback; import org.apache.curator.framework.api.CuratorEvent; -import org.apache.hadoop.registry.server.integration.RMRegistryOperationsService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,7 +33,7 @@ */ public class DeleteCompletionCallback implements BackgroundCallback { private static final Logger LOG = - LoggerFactory.getLogger(RMRegistryOperationsService.class); + LoggerFactory.getLogger(DeleteCompletionCallback.class); private AtomicInteger events = new AtomicInteger(0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java index 5b34f60..0d4a467 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java @@ -23,7 +23,8 @@ import org.apache.hadoop.registry.client.binding.RegistryPathUtils; import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies; import org.apache.hadoop.registry.client.types.ServiceRecord; -import org.apache.hadoop.registry.server.integration.RMRegistryOperationsService; + +import org.apache.hadoop.registry.server.services.RegistryAdminService; import org.junit.Before; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,22 +32,16 @@ import java.io.IOException; import java.net.URISyntaxException; -/** - * Abstract registry tests .. inits the field {@link #registry} - * before the test with an instance of {@link RMRegistryOperationsService}; - * and {@link #operations} with the same instance cast purely - * to the type {@link RegistryOperations}. - * - */ + public class AbstractRegistryTest extends AbstractZKRegistryTest { private static final Logger LOG = LoggerFactory.getLogger(AbstractRegistryTest.class); - protected RMRegistryOperationsService registry; + protected RegistryAdminService registry; protected RegistryOperations operations; @Before public void setupRegistry() throws IOException { - registry = new RMRegistryOperationsService("yarnRegistry"); + registry = new RegistryAdminService("yarnRegistry"); operations = registry; registry.init(createRegistryConfiguration()); registry.start(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestRegistryRMOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestRegistryRMOperations.java deleted file mode 100644 index 451a69b..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestRegistryRMOperations.java +++ /dev/null @@ -1,369 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.registry.integration; - -import org.apache.curator.framework.api.BackgroundCallback; -import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; -import org.apache.hadoop.registry.AbstractRegistryTest; -import org.apache.hadoop.registry.client.api.BindFlags; -import org.apache.hadoop.registry.client.api.RegistryConstants; -import org.apache.hadoop.registry.client.binding.RegistryUtils; -import org.apache.hadoop.registry.client.binding.RegistryPathUtils; -import org.apache.hadoop.registry.client.impl.zk.ZKPathDumper; -import org.apache.hadoop.registry.client.impl.CuratorEventCatcher; -import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies; -import org.apache.hadoop.registry.client.types.RegistryPathStatus; -import org.apache.hadoop.registry.client.types.ServiceRecord; -import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; -import org.apache.hadoop.registry.server.services.DeleteCompletionCallback; -import org.apache.hadoop.registry.server.services.RegistryAdminService; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URI; -import java.util.Collection; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; - -import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint; -import static org.apache.hadoop.registry.client.binding.RegistryTypeUtils.restEndpoint; - -public class TestRegistryRMOperations extends AbstractRegistryTest { - protected static final Logger LOG = - LoggerFactory.getLogger(TestRegistryRMOperations.class); - - /** - * trigger a purge operation - * @param path path - * @param id yarn ID - * @param policyMatch policy to match ID on - * @param purgePolicy policy when there are children under a match - * @return the number purged - * @throws IOException - */ - public int purge(String path, - String id, - String policyMatch, - RegistryAdminService.PurgePolicy purgePolicy) throws - IOException, - ExecutionException, - InterruptedException { - return purge(path, id, policyMatch, purgePolicy, null); - } - - /** - * - * trigger a purge operation - * @param path pathn - * @param id yarn ID - * @param policyMatch policy to match ID on - * @param purgePolicy policy when there are children under a match - * @param callback optional callback - * @return the number purged - * @throws IOException - */ - public int purge(String path, - String id, - String policyMatch, - RegistryAdminService.PurgePolicy purgePolicy, - BackgroundCallback callback) throws - IOException, - ExecutionException, - InterruptedException { - - Future future = registry.purgeRecordsAsync(path, - id, policyMatch, purgePolicy, callback); - try { - return future.get(); - } catch (ExecutionException e) { - if (e.getCause() instanceof IOException) { - throw (IOException) e.getCause(); - } else { - throw e; - } - } - } - - @Test - public void testPurgeEntryCuratorCallback() throws Throwable { - - String path = "/users/example/hbase/hbase1/"; - ServiceRecord written = buildExampleServiceEntry( - PersistencePolicies.APPLICATION_ATTEMPT); - written.set(YarnRegistryAttributes.YARN_ID, - "testAsyncPurgeEntry_attempt_001"); - - operations.mknode(RegistryPathUtils.parentOf(path), true); - operations.bind(path, written, 0); - - ZKPathDumper dump = registry.dumpPath(false); - CuratorEventCatcher events = new CuratorEventCatcher(); - - LOG.info("Initial state {}", dump); - - // container query - String id = written.get(YarnRegistryAttributes.YARN_ID, ""); - int opcount = purge("/", - id, - PersistencePolicies.CONTAINER, - RegistryAdminService.PurgePolicy.PurgeAll, - events); - assertPathExists(path); - assertEquals(0, opcount); - assertEquals("Event counter", 0, events.getCount()); - - // now the application attempt - opcount = purge("/", - id, - PersistencePolicies.APPLICATION_ATTEMPT, - RegistryAdminService.PurgePolicy.PurgeAll, - events); - - LOG.info("Final state {}", dump); - - assertPathNotFound(path); - assertEquals("wrong no of delete operations in " + dump, 1, opcount); - // and validate the callback event - assertEquals("Event counter", 1, events.getCount()); - } - - @Test - public void testAsyncPurgeEntry() throws Throwable { - - String path = "/users/example/hbase/hbase1/"; - ServiceRecord written = buildExampleServiceEntry( - PersistencePolicies.APPLICATION_ATTEMPT); - written.set(YarnRegistryAttributes.YARN_ID, - "testAsyncPurgeEntry_attempt_001"); - - operations.mknode(RegistryPathUtils.parentOf(path), true); - operations.bind(path, written, 0); - - ZKPathDumper dump = registry.dumpPath(false); - - LOG.info("Initial state {}", dump); - - DeleteCompletionCallback deletions = new DeleteCompletionCallback(); - int opcount = purge("/", - written.get(YarnRegistryAttributes.YARN_ID, ""), - PersistencePolicies.CONTAINER, - RegistryAdminService.PurgePolicy.PurgeAll, - deletions); - assertPathExists(path); - - dump = registry.dumpPath(false); - - assertEquals("wrong no of delete operations in " + dump, 0, - deletions.getEventCount()); - assertEquals("wrong no of delete operations in " + dump, 0, opcount); - - - // now app attempt - deletions = new DeleteCompletionCallback(); - opcount = purge("/", - written.get(YarnRegistryAttributes.YARN_ID, ""), - PersistencePolicies.APPLICATION_ATTEMPT, - RegistryAdminService.PurgePolicy.PurgeAll, - deletions); - - dump = registry.dumpPath(false); - LOG.info("Final state {}", dump); - - assertPathNotFound(path); - assertEquals("wrong no of delete operations in " + dump, 1, - deletions.getEventCount()); - assertEquals("wrong no of delete operations in " + dump, 1, opcount); - // and validate the callback event - - } - - @Test - public void testPutGetContainerPersistenceServiceEntry() throws Throwable { - - String path = ENTRY_PATH; - ServiceRecord written = buildExampleServiceEntry( - PersistencePolicies.CONTAINER); - - operations.mknode(RegistryPathUtils.parentOf(path), true); - operations.bind(path, written, BindFlags.CREATE); - ServiceRecord resolved = operations.resolve(path); - validateEntry(resolved); - assertMatches(written, resolved); - } - - /** - * Create a complex example app - * @throws Throwable - */ - @Test - public void testCreateComplexApplication() throws Throwable { - String appId = "application_1408631738011_0001"; - String cid = "container_1408631738011_0001_01_"; - String cid1 = cid + "000001"; - String cid2 = cid + "000002"; - String appPath = USERPATH + "tomcat"; - - ServiceRecord webapp = createRecord(appId, - PersistencePolicies.APPLICATION, "tomcat-based web application", - null); - webapp.addExternalEndpoint(restEndpoint("www", - new URI("http", "//loadbalancer/", null))); - - ServiceRecord comp1 = createRecord(cid1, PersistencePolicies.CONTAINER, - null, - null); - comp1.addExternalEndpoint(restEndpoint("www", - new URI("http", "//rack4server3:43572", null))); - comp1.addInternalEndpoint( - inetAddrEndpoint("jmx", "JMX", "rack4server3", 43573)); - - // Component 2 has a container lifespan - ServiceRecord comp2 = createRecord(cid2, PersistencePolicies.CONTAINER, - null, - null); - comp2.addExternalEndpoint(restEndpoint("www", - new URI("http", "//rack1server28:35881", null))); - comp2.addInternalEndpoint( - inetAddrEndpoint("jmx", "JMX", "rack1server28", 35882)); - - operations.mknode(USERPATH, false); - operations.bind(appPath, webapp, BindFlags.OVERWRITE); - String componentsPath = appPath + RegistryConstants.SUBPATH_COMPONENTS; - operations.mknode(componentsPath, false); - String dns1 = RegistryPathUtils.encodeYarnID(cid1); - String dns1path = componentsPath + dns1; - operations.bind(dns1path, comp1, BindFlags.CREATE); - String dns2 = RegistryPathUtils.encodeYarnID(cid2); - String dns2path = componentsPath + dns2; - operations.bind(dns2path, comp2, BindFlags.CREATE); - - ZKPathDumper pathDumper = registry.dumpPath(false); - LOG.info(pathDumper.toString()); - - logRecord("tomcat", webapp); - logRecord(dns1, comp1); - logRecord(dns2, comp2); - - ServiceRecord dns1resolved = operations.resolve(dns1path); - assertEquals("Persistence policies on resolved entry", - PersistencePolicies.CONTAINER, - dns1resolved.get(YarnRegistryAttributes.YARN_PERSISTENCE, "")); - - Map children = - RegistryUtils.statChildren(operations, componentsPath); - assertEquals(2, children.size()); - Collection - componentStats = children.values(); - Map records = - RegistryUtils.extractServiceRecords(operations, - componentsPath, componentStats); - assertEquals(2, records.size()); - ServiceRecord retrieved1 = records.get(dns1path); - logRecord(retrieved1.get(YarnRegistryAttributes.YARN_ID, ""), retrieved1); - assertMatches(dns1resolved, retrieved1); - assertEquals(PersistencePolicies.CONTAINER, - retrieved1.get(YarnRegistryAttributes.YARN_PERSISTENCE, "")); - - // create a listing under components/ - operations.mknode(componentsPath + "subdir", false); - - // this shows up in the listing of child entries - Map childrenUpdated = - RegistryUtils.statChildren(operations, componentsPath); - assertEquals(3, childrenUpdated.size()); - - // the non-record child this is not picked up in the record listing - Map recordsUpdated = - - RegistryUtils.extractServiceRecords(operations, - componentsPath, - childrenUpdated); - assertEquals(2, recordsUpdated.size()); - - // now do some deletions. - - // synchronous delete container ID 2 - - // fail if the app policy is chosen - assertEquals(0, purge("/", cid2, PersistencePolicies.APPLICATION, - RegistryAdminService.PurgePolicy.FailOnChildren)); - // succeed for container - assertEquals(1, purge("/", cid2, PersistencePolicies.CONTAINER, - RegistryAdminService.PurgePolicy.FailOnChildren)); - assertPathNotFound(dns2path); - assertPathExists(dns1path); - - // expect a skip on children to skip - assertEquals(0, - purge("/", appId, PersistencePolicies.APPLICATION, - RegistryAdminService.PurgePolicy.SkipOnChildren)); - assertPathExists(appPath); - assertPathExists(dns1path); - - // attempt to delete app with policy of fail on children - try { - int p = purge("/", - appId, - PersistencePolicies.APPLICATION, - RegistryAdminService.PurgePolicy.FailOnChildren); - fail("expected a failure, got a purge count of " + p); - } catch (PathIsNotEmptyDirectoryException expected) { - // expected - } - assertPathExists(appPath); - assertPathExists(dns1path); - - - // now trigger recursive delete - assertEquals(1, - purge("/", appId, PersistencePolicies.APPLICATION, - RegistryAdminService.PurgePolicy.PurgeAll)); - assertPathNotFound(appPath); - assertPathNotFound(dns1path); - - } - - @Test - public void testChildDeletion() throws Throwable { - ServiceRecord app = createRecord("app1", - PersistencePolicies.APPLICATION, "app", - null); - ServiceRecord container = createRecord("container1", - PersistencePolicies.CONTAINER, "container", - null); - - operations.bind("/app", app, BindFlags.OVERWRITE); - operations.bind("/app/container", container, BindFlags.OVERWRITE); - - try { - int p = purge("/", - "app1", - PersistencePolicies.APPLICATION, - RegistryAdminService.PurgePolicy.FailOnChildren); - fail("expected a failure, got a purge count of " + p); - } catch (PathIsNotEmptyDirectoryException expected) { - // expected - } - - } - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRMRegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRMRegistryOperations.java deleted file mode 100644 index 41760d6..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRMRegistryOperations.java +++ /dev/null @@ -1,348 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.registry.secure; - - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.PathPermissionException; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.service.ServiceStateException; -import org.apache.hadoop.registry.client.api.RegistryConstants; -import org.apache.hadoop.registry.client.api.RegistryOperations; -import org.apache.hadoop.registry.client.api.RegistryOperationsFactory; -import org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException; -import org.apache.hadoop.registry.client.impl.zk.ZKPathDumper; -import org.apache.hadoop.registry.client.impl.RegistryOperationsClient; -import org.apache.hadoop.registry.client.impl.zk.RegistrySecurity; -import org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions; -import org.apache.hadoop.registry.server.integration.RMRegistryOperationsService; -import org.apache.hadoop.registry.server.services.RegistryAdminService; -import org.apache.zookeeper.client.ZooKeeperSaslClient; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Id; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.security.auth.login.LoginException; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.security.PrivilegedExceptionAction; -import java.util.List; - -import static org.apache.hadoop.registry.client.api.RegistryConstants.*; - -/** - * Verify that the {@link RMRegistryOperationsService} works securely - */ -public class TestSecureRMRegistryOperations extends AbstractSecureRegistryTest { - private static final Logger LOG = - LoggerFactory.getLogger(TestSecureRMRegistryOperations.class); - private Configuration secureConf; - private Configuration zkClientConf; - private UserGroupInformation zookeeperUGI; - - @Before - public void setupTestSecureRMRegistryOperations() throws Exception { - startSecureZK(); - secureConf = new Configuration(); - secureConf.setBoolean(KEY_REGISTRY_SECURE, true); - - // create client conf containing the ZK quorum - zkClientConf = new Configuration(secureZK.getConfig()); - zkClientConf.setBoolean(KEY_REGISTRY_SECURE, true); - assertNotEmpty(zkClientConf.get(RegistryConstants.KEY_REGISTRY_ZK_QUORUM)); - - // ZK is in charge - secureConf.set(KEY_REGISTRY_SYSTEM_ACCOUNTS, "sasl:zookeeper@"); - zookeeperUGI = loginUGI(ZOOKEEPER, keytab_zk); - } - - @After - public void teardownTestSecureRMRegistryOperations() { - } - - /** - * Create the RM registry operations as the current user - * @return the service - * @throws LoginException - * @throws FileNotFoundException - */ - public RMRegistryOperationsService startRMRegistryOperations() throws - LoginException, IOException, InterruptedException { - // kerberos - secureConf.set(KEY_REGISTRY_CLIENT_AUTH, - REGISTRY_CLIENT_AUTH_KERBEROS); - secureConf.set(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, ZOOKEEPER_CLIENT_CONTEXT); - - RMRegistryOperationsService registryOperations = zookeeperUGI.doAs( - new PrivilegedExceptionAction() { - @Override - public RMRegistryOperationsService run() throws Exception { - RMRegistryOperationsService operations - = new RMRegistryOperationsService("rmregistry", secureZK); - addToTeardown(operations); - operations.init(secureConf); - LOG.info(operations.bindingDiagnosticDetails()); - operations.start(); - return operations; - } - }); - - return registryOperations; - } - - /** - * test that ZK can write as itself - * @throws Throwable - */ - @Test - public void testZookeeperCanWriteUnderSystem() throws Throwable { - - RMRegistryOperationsService rmRegistryOperations = - startRMRegistryOperations(); - RegistryOperations operations = rmRegistryOperations; - operations.mknode(PATH_SYSTEM_SERVICES + "hdfs", - false); - ZKPathDumper pathDumper = rmRegistryOperations.dumpPath(true); - LOG.info(pathDumper.toString()); - } - - @Test - public void testAnonReadAccess() throws Throwable { - RMRegistryOperationsService rmRegistryOperations = - startRMRegistryOperations(); - describe(LOG, "testAnonReadAccess"); - RegistryOperations operations = - RegistryOperationsFactory.createAnonymousInstance(zkClientConf); - addToTeardown(operations); - operations.start(); - - assertFalse("RegistrySecurity.isClientSASLEnabled()==true", - RegistrySecurity.isClientSASLEnabled()); - operations.list(PATH_SYSTEM_SERVICES); - } - - @Test - public void testAnonNoWriteAccess() throws Throwable { - RMRegistryOperationsService rmRegistryOperations = - startRMRegistryOperations(); - describe(LOG, "testAnonNoWriteAccess"); - RegistryOperations operations = - RegistryOperationsFactory.createAnonymousInstance(zkClientConf); - addToTeardown(operations); - operations.start(); - - String servicePath = PATH_SYSTEM_SERVICES + "hdfs"; - expectMkNodeFailure(operations, servicePath); - } - - @Test - public void testAnonNoWriteAccessOffRoot() throws Throwable { - RMRegistryOperationsService rmRegistryOperations = - startRMRegistryOperations(); - describe(LOG, "testAnonNoWriteAccessOffRoot"); - RegistryOperations operations = - RegistryOperationsFactory.createAnonymousInstance(zkClientConf); - addToTeardown(operations); - operations.start(); - assertFalse("mknode(/)", operations.mknode("/", false)); - expectMkNodeFailure(operations, "/sub"); - expectDeleteFailure(operations, PATH_SYSTEM_SERVICES, true); - } - - /** - * Expect a mknode operation to fail - * @param operations operations instance - * @param path path - * @throws IOException An IO failure other than those permitted - */ - public void expectMkNodeFailure(RegistryOperations operations, - String path) throws IOException { - try { - operations.mknode(path, false); - fail("should have failed to create a node under " + path); - } catch (PathPermissionException expected) { - // expected - } catch (NoPathPermissionsException expected) { - // expected - } - } - - /** - * Expect a delete operation to fail - * @param operations operations instance - * @param path path - * @param recursive - * @throws IOException An IO failure other than those permitted - */ - public void expectDeleteFailure(RegistryOperations operations, - String path, boolean recursive) throws IOException { - try { - operations.delete(path, recursive); - fail("should have failed to delete the node " + path); - } catch (PathPermissionException expected) { - // expected - } catch (NoPathPermissionsException expected) { - // expected - } - } - - @Test - public void testAlicePathRestrictedAnonAccess() throws Throwable { - RMRegistryOperationsService rmRegistryOperations = - startRMRegistryOperations(); - String aliceHome = rmRegistryOperations.initUserRegistry(ALICE); - describe(LOG, "Creating anonymous accessor"); - RegistryOperations anonOperations = - RegistryOperationsFactory.createAnonymousInstance(zkClientConf); - addToTeardown(anonOperations); - anonOperations.start(); - anonOperations.list(aliceHome); - expectMkNodeFailure(anonOperations, aliceHome + "/anon"); - expectDeleteFailure(anonOperations, aliceHome, true); - } - - @Test - public void testUserZookeeperHomePathAccess() throws Throwable { - RMRegistryOperationsService rmRegistryOperations = - startRMRegistryOperations(); - final String home = rmRegistryOperations.initUserRegistry(ZOOKEEPER); - describe(LOG, "Creating ZK client"); - - RegistryOperations operations = zookeeperUGI.doAs( - new PrivilegedExceptionAction() { - @Override - public RegistryOperations run() throws Exception { - RegistryOperations operations = - RegistryOperationsFactory.createKerberosInstance(zkClientConf, - ZOOKEEPER_CLIENT_CONTEXT); - addToTeardown(operations); - operations.start(); - - return operations; - } - }); - operations.list(home); - String path = home + "/subpath"; - operations.mknode(path, false); - operations.delete(path, true); - } - - @Test - public void testUserHomedirsPermissionsRestricted() throws Throwable { - // test that the /users/$user permissions are restricted - RMRegistryOperationsService rmRegistryOperations = - startRMRegistryOperations(); - // create Alice's dir, so it should have an ACL for Alice - final String home = rmRegistryOperations.initUserRegistry(ALICE); - List acls = rmRegistryOperations.zkGetACLS(home); - ACL aliceACL = null; - for (ACL acl : acls) { - LOG.info(RegistrySecurity.aclToString(acl)); - Id id = acl.getId(); - if (id.getScheme().equals(ZookeeperConfigOptions.SCHEME_SASL) - && id.getId().startsWith(ALICE)) { - - aliceACL = acl; - break; - } - } - assertNotNull(aliceACL); - assertEquals(RegistryAdminService.USER_HOMEDIR_ACL_PERMISSIONS, - aliceACL.getPerms()); - } - - @Test - public void testDigestAccess() throws Throwable { - RMRegistryOperationsService registryAdmin = - startRMRegistryOperations(); - String id = "username"; - String pass = "password"; - registryAdmin.addWriteAccessor(id, pass); - List clientAcls = registryAdmin.getClientAcls(); - LOG.info("Client ACLS=\n{}", RegistrySecurity.aclsToString(clientAcls)); - - String base = "/digested"; - registryAdmin.mknode(base, false); - List baseACLs = registryAdmin.zkGetACLS(base); - String aclset = RegistrySecurity.aclsToString(baseACLs); - LOG.info("Base ACLs=\n{}", aclset); - ACL found = null; - for (ACL acl : baseACLs) { - if (ZookeeperConfigOptions.SCHEME_DIGEST.equals(acl.getId().getScheme())) { - found = acl; - break; - } - } - assertNotNull("Did not find digest entry in ACLs " + aclset, found); - zkClientConf.set(KEY_REGISTRY_USER_ACCOUNTS, - "sasl:somebody@EXAMPLE.COM, sasl:other"); - RegistryOperations operations = - RegistryOperationsFactory.createAuthenticatedInstance(zkClientConf, - id, - pass); - addToTeardown(operations); - operations.start(); - RegistryOperationsClient operationsClient = - (RegistryOperationsClient) operations; - List digestClientACLs = operationsClient.getClientAcls(); - LOG.info("digest client ACLs=\n{}", - RegistrySecurity.aclsToString(digestClientACLs)); - operations.stat(base); - operations.mknode(base + "/subdir", false); - ZKPathDumper pathDumper = registryAdmin.dumpPath(true); - LOG.info(pathDumper.toString()); - } - - @Test(expected = IllegalArgumentException.class) - public void testNoDigestAuthMissingId() throws Throwable { - RegistryOperationsFactory.createAuthenticatedInstance(zkClientConf, - "", - "pass"); - } - - @Test(expected = ServiceStateException.class) - public void testNoDigestAuthMissingId2() throws Throwable { - zkClientConf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_DIGEST); - zkClientConf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, ""); - zkClientConf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, "pass"); - RegistryOperationsFactory.createInstance("DigestRegistryOperations", - zkClientConf); - } - - @Test(expected = IllegalArgumentException.class) - public void testNoDigestAuthMissingPass() throws Throwable { - RegistryOperationsFactory.createAuthenticatedInstance(zkClientConf, - "id", - ""); - } - - @Test(expected = ServiceStateException.class) - public void testNoDigestAuthMissingPass2() throws Throwable { - zkClientConf.set(KEY_REGISTRY_CLIENT_AUTH, REGISTRY_CLIENT_AUTH_DIGEST); - zkClientConf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, "id"); - zkClientConf.set(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, ""); - RegistryOperationsFactory.createInstance("DigestRegistryOperations", - zkClientConf); - } - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md index 512c011..b7b64ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md @@ -208,6 +208,24 @@ If you are building from source code, make sure you use `-Pyarn-ui` in the `mvn` ``` +# Run with security +YARN service framework supports running in a secure(kerberized) environment. User needs to specify the kerberos principal name and keytab when they launch the service. +E.g. A typical configuration looks like below: +``` +{ + "name": "sample-service", + ... + ... + "kerberos_principal" : { + "principal_name" : "hdfs-demo@EXAMPLE.COM", + "keytab" : "hdfs:///etc/security/keytabs/hdfs.headless.keytab" + } +} +``` +* principal_name : the principal name of the user who launches the service +* keytab : URI of the keytab. It supports two modes: + * URI starts with `hdfs://`: The URI where the keytab is stored on hdfs. The keytab will be localized to each node by YARN. + * URI starts with `file://`: The URI where the keytab is stored on local host. It is assumed that admin pre-installs the keytabs on the local host before AM launches. # Try with Docker The above example is only for a non-docker container based service. YARN Service Framework also provides first-class support for managing docker based services. Most of the steps for managing docker based services are the same except that in docker the `Artifact` type for a component is `DOCKER` and the Artifact `id` is the name of the docker image. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md index c0e12c7..e224e5d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md @@ -291,6 +291,15 @@ The current state of the container of a service. |----|----|----|----|----| |state|enum of the state of the container|false|enum (INIT, STARTED, READY)|| +### KerberosPrincipal + +The kerberos principal info of the user who launches the service. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|principal_name|The principal name of the user who launches the service.|false|string|| +|keytab|The URI of the kerberos keytab. It supports two modes, URI starts with "hdfs://": A path on hdfs where the keytab is stored. The keytab will be localized by YARN to each host; URI starts with "file://": A path on the local host where the keytab is stored. It is assumed that the keytabs are pre-installed by admins before AM launches.|false|string|| + ### PlacementPolicy @@ -342,7 +351,7 @@ a service resource has the following attributes. |state|State of the service. Specifying a value for this attribute for the PUT payload means update the service to this desired state.|false|ServiceState|| |quicklinks|A blob of key-value pairs of quicklinks to be exported for a service.|false|object|| |queue|The YARN queue that this service should be submitted to.|false|string|| - +|kerberos_principal | The principal info of the user who launches the service|false|| ### ServiceState