diff --git a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java index d3ea2e7..558ebcb 100644 --- a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java +++ b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java @@ -70,6 +70,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Properties; import java.util.Set; @@ -109,6 +110,11 @@ */ public class MiniKdc { + public static final String JAVA_SECURITY_KRB5_CONF = + "java.security.krb5.conf"; + public static final String SUN_SECURITY_KRB5_DEBUG = + "sun.security.krb5.debug"; + public static void main(String[] args) throws Exception { if (args.length < 4) { System.out.println("Arguments: " + @@ -266,7 +272,8 @@ public MiniKdc(Properties conf, File workDir) throws Exception { } String orgName= conf.getProperty(ORG_NAME); String orgDomain = conf.getProperty(ORG_DOMAIN); - realm = orgName.toUpperCase() + "." + orgDomain.toUpperCase(); + realm = orgName.toUpperCase(Locale.ENGLISH) + "." + + orgDomain.toUpperCase(Locale.ENGLISH); } /** @@ -355,8 +362,8 @@ private void initDirectoryService() throws Exception { ds.addLast(new KeyDerivationInterceptor()); // create one partition - String orgName= conf.getProperty(ORG_NAME).toLowerCase(); - String orgDomain = conf.getProperty(ORG_DOMAIN).toLowerCase(); + String orgName= conf.getProperty(ORG_NAME).toLowerCase(Locale.ENGLISH); + String orgDomain = conf.getProperty(ORG_DOMAIN).toLowerCase(Locale.ENGLISH); JdbmPartition partition = new JdbmPartition(ds.getSchemaManager()); partition.setId(orgName); @@ -387,10 +394,10 @@ private void initKDCServer() throws Exception { String orgDomain = conf.getProperty(ORG_DOMAIN); String bindAddress = conf.getProperty(KDC_BIND_ADDRESS); final Map map = new HashMap(); - map.put("0", orgName.toLowerCase()); - map.put("1", orgDomain.toLowerCase()); - map.put("2", orgName.toUpperCase()); - map.put("3", orgDomain.toUpperCase()); + map.put("0", orgName.toLowerCase(Locale.ENGLISH)); + map.put("1", orgDomain.toLowerCase(Locale.ENGLISH)); + map.put("2", orgName.toUpperCase(Locale.ENGLISH)); + map.put("3", orgDomain.toUpperCase(Locale.ENGLISH)); map.put("4", bindAddress); ClassLoader cl = Thread.currentThread().getContextClassLoader(); @@ -455,9 +462,9 @@ private void initKDCServer() throws Exception { FileUtils.writeStringToFile(krb5conf, MessageFormat.format(sb.toString(), getRealm(), getHost(), Integer.toString(getPort()), System.getProperty("line.separator"))); - System.setProperty("java.security.krb5.conf", krb5conf.getAbsolutePath()); + System.setProperty(JAVA_SECURITY_KRB5_CONF, krb5conf.getAbsolutePath()); - System.setProperty("sun.security.krb5.debug", conf.getProperty(DEBUG, + System.setProperty(SUN_SECURITY_KRB5_DEBUG, conf.getProperty(DEBUG, "false")); // refresh the config @@ -481,8 +488,8 @@ private void initKDCServer() throws Exception { */ public synchronized void stop() { if (kdc != null) { - System.getProperties().remove("java.security.krb5.conf"); - System.getProperties().remove("sun.security.krb5.debug"); + System.getProperties().remove(JAVA_SECURITY_KRB5_CONF); + System.getProperties().remove(SUN_SECURITY_KRB5_DEBUG); kdc.stop(); try { ds.shutdown(); @@ -520,8 +527,8 @@ public synchronized void createPrincipal(String principal, String password) throws Exception { String orgName= conf.getProperty(ORG_NAME); String orgDomain = conf.getProperty(ORG_DOMAIN); - String baseDn = "ou=users,dc=" + orgName.toLowerCase() + ",dc=" + - orgDomain.toLowerCase(); + String baseDn = "ou=users,dc=" + orgName.toLowerCase(Locale.ENGLISH) + + ",dc=" + orgDomain.toLowerCase(Locale.ENGLISH); String content = "dn: uid=" + principal + "," + baseDn + "\n" + "objectClass: top\n" + "objectClass: person\n" + diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index a121faf..c181703 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -222,6 +222,11 @@ org.apache.hadoop + hadoop-yarn-registry + ${project.version} + + + org.apache.hadoop hadoop-yarn-server-nodemanager ${project.version} diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index b1dfb1e..4fc8afb 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -351,4 +351,14 @@ + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml index d91c67b..a9b6952 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml @@ -97,6 +97,11 @@ org.apache.hadoop hadoop-yarn-client + + + org.apache.hadoop + hadoop-yarn-registry + org.apache.hadoop diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index 2451030..70c24ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -57,6 +57,7 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.service.ServiceOperations; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.ApplicationConstants; @@ -93,6 +94,13 @@ import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.client.binding.BindingUtils; +import org.apache.hadoop.yarn.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.yarn.registry.client.services.RegistryOperationsService; +import org.apache.hadoop.yarn.registry.client.api.CreateFlags; +import org.apache.hadoop.yarn.registry.client.types.PersistencePolicies; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecord; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.log4j.LogManager; @@ -270,6 +278,9 @@ private final String linux_bash_command = "bash"; private final String windows_command = "cmd /c"; + @VisibleForTesting + RegistryOperationsService registryOperations; + /** * @param args Command line args */ @@ -570,6 +581,52 @@ public void run() throws YarnException, IOException { RegisterApplicationMasterResponse response = amRMClient .registerApplicationMaster(appMasterHostname, appMasterRpcPort, appMasterTrackingUrl); + + // Register with the YARN registry if it is enabled + boolean registryEnabled = + conf.getBoolean(RegistryConstants.KEY_REGISTRY_ENABLED, + RegistryConstants.DEFAULT_REGISTRY_ENABLED); + if (registryEnabled) { + LOG.info("Registering Service"); + registryOperations = new RegistryOperationsService(); + registryOperations.init(conf); + registryOperations.start(); + ServiceRecord serviceRecord = new ServiceRecord(); + String attemptID = this.appAttemptID.toString(); + String appId = this.appAttemptID.getApplicationId().toString(); + + serviceRecord.id = attemptID; + serviceRecord.persistence = PersistencePolicies.APPLICATION_ATTEMPT; + serviceRecord.description = "Distributed Shell"; + // if this service offered external RPC/Web access, they + // can be added to the service record + + String username = BindingUtils.currentUser(); + String serviceClass = DSConstants.SERVICE_CLASS_DISTRIBUTED_SHELL; + String serviceName = RegistryPathUtils.encodeYarnID(appId); + String path = + BindingUtils.servicePath(username, serviceClass, serviceName); + registryOperations.mknode(RegistryPathUtils.parentOf(path), true); + // app attempt entry + registryOperations.create(path + "-attempt", serviceRecord, + CreateFlags.OVERWRITE); + LOG.info("Registered " + serviceRecord + " at " + path ); + + serviceRecord.id = appId; + serviceRecord.persistence = PersistencePolicies.APPLICATION; + registryOperations.create(path + "-app", serviceRecord, + CreateFlags.OVERWRITE); + + // register one that is not deleted +// serviceRecord.id = appId; + serviceRecord.id = "persisting"; + serviceRecord.persistence = PersistencePolicies.PERMANENT; + registryOperations.create(path + "-permanent", serviceRecord, + CreateFlags.OVERWRITE); + } + + + // Dump out information about cluster capability as seen by the // resource manager int maxMem = response.getMaximumResourceCapability().getMemory(); @@ -679,6 +736,8 @@ protected boolean finish() { } amRMClient.stop(); + ServiceOperations.stop(registryOperations); + ServiceOperations.stop(timelineClient); return success; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index f3ce64c..6b94b4b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -213,8 +213,8 @@ public static void main(String[] args) { */ public Client(Configuration conf) throws Exception { this( - "org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster", - conf); + "org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster", + conf); } Client(String appMasterMainClass, Configuration conf) { @@ -670,7 +670,7 @@ public boolean run() throws IOException, YarnException { * @throws YarnException * @throws IOException */ - private boolean monitorApplication(ApplicationId appId) + protected boolean monitorApplication(ApplicationId appId) throws YarnException, IOException { while (true) { @@ -720,7 +720,7 @@ else if (YarnApplicationState.KILLED == state return false; } - if (System.currentTimeMillis() > (clientStartTime + clientTimeout)) { + if (timedOut()) { LOG.info("Reached client specified timeout for application. Killing application"); forceKillApplication(appId); return false; @@ -730,12 +730,46 @@ else if (YarnApplicationState.KILLED == state } /** + * Get the YARN client + * @return the client + */ + public YarnClient getYarnClient() { + return yarnClient; + } + + /** + * Get the client's configuration + * @return the configuration + */ + public Configuration getConf() { + return conf; + } + + public long getClientTimeout() { + return clientTimeout; + } + + public void setClientTimeout(long clientTimeout) { + this.clientTimeout = clientTimeout; + } + + /** + * Query the clock and timeout settings to decide + * whether or not the current run has timed ut + * @return true if the client's monitorApplication() operation + * has taken too long. + */ + protected boolean timedOut() { + return System.currentTimeMillis() > (clientStartTime + clientTimeout); + } + + /** * Kill a submitted application by sending a call to the ASM * @param appId Application Id to be killed. * @throws YarnException * @throws IOException */ - private void forceKillApplication(ApplicationId appId) + protected void forceKillApplication(ApplicationId appId) throws YarnException, IOException { // TODO clarify whether multiple jobs with the same app id can be submitted and be running at // the same time. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/DSConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/DSConstants.java index 5912f14..67f9a77 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/DSConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/DSConstants.java @@ -44,4 +44,10 @@ * Used to validate the local resource. */ public static final String DISTRIBUTEDSHELLSCRIPTLEN = "DISTRIBUTEDSHELLSCRIPTLEN"; + + /** + * Service class used when registering the service + */ + public static final String SERVICE_CLASS_DISTRIBUTED_SHELL = + "org-apache-hadoop-distributedshell"; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java index 6dff94c..4db27a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java @@ -32,6 +32,15 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.service.ServiceOperations; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.client.binding.BindingUtils; +import org.apache.hadoop.yarn.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.yarn.registry.client.services.RegistryOperationsService; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecord; import org.junit.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -69,14 +78,16 @@ @Before public void setup() throws Exception { LOG.info("Starting up YARN cluster"); + conf.setBoolean(RegistryConstants.KEY_REGISTRY_ENABLED, true); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); conf.set("yarn.log.dir", "target"); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); if (yarnCluster == null) { + // create a minicluster with the registry enabled yarnCluster = new MiniYARNCluster( - TestDistributedShell.class.getSimpleName(), 1, 1, 1, 1, true); + TestDistributedShell.class.getSimpleName(), 1, 1, 1, 1, true, true); yarnCluster.init(conf); yarnCluster.start(); NodeManager nm = yarnCluster.getNodeManager(0); @@ -305,7 +316,7 @@ public void testDSRestartWithPreviousRunningContainers() throws Exception { LOG.info("Client run completed. Result=" + result); // application should succeed - Assert.assertTrue(result); + Assert.assertTrue("client failed", result); } /* @@ -846,5 +857,259 @@ private int verifyContainerLog(int containerNum, } return numOfWords; } + + @Test(timeout = 90000) + public void testRegistryOperations() throws Exception { + + // create a client config with an aggressive timeout policy + Configuration clientConf = new Configuration(yarnCluster.getConfig()); + clientConf.setInt(RegistryConstants.KEY_REGISTRY_ZK_CONNECTION_TIMEOUT, 1000); + clientConf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_TIMES, 1); + clientConf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_CEILING, 1); + clientConf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_INTERVAL, 500); + clientConf.setInt(RegistryConstants.KEY_REGISTRY_ZK_SESSION_TIMEOUT, 2000); + + // create a registry operations instance + RegistryOperationsService regOps = new RegistryOperationsService(); + regOps.init(clientConf); + regOps.start(); + LOG.info("Registry Binding: " + regOps); + + // do a simple registry operation to verify that it is live + regOps.list("/"); + // check the system dir is present + regOps.list(RegistryConstants.PATH_SYSTEM_SERVICES); + // check the users dir is present + regOps.list(RegistryConstants.PATH_USERS); + + try { + String[] args = { + "--jar", + APPMASTER_JAR, + "--num_containers", + "1", + "--shell_command", + "sleep 15", + "--master_memory", + "512", + "--container_memory", + "128", + }; + + LOG.info("Initializing DS Client"); + RegistryMonitoringClient client = + new RegistryMonitoringClient(clientConf); + + client.init(args); + LOG.info("Running DS Client"); + boolean result; + try { + result = client.run(); + } finally { + client.stop(); + } + + LOG.info("Client run completed. Result=" + result); + + // application should have found service records + ServiceRecord serviceRecord = client.appAttemptRecord; + LOG.info("Service record = " + serviceRecord); + IOException lookupException = + client.lookupException; + if (serviceRecord == null && lookupException != null) { + LOG.error("Lookup of " + client.servicePath + + " failed with " + lookupException, lookupException); + throw lookupException; + } + + // the app should have succeeded or returned a failure message + if (!result) { + Assert.fail("run returned false: " + client.failureText); + } + + // the app-level record must have been retrieved + Assert.assertNotNull("No application record at " + client.appRecordPath, + client.appRecord); + + // sleep to let some async operations in the RM continue + Thread.sleep(10000); + // after the app finishes its records should have been purged + assertDeleted(regOps, client.appRecordPath); + assertDeleted(regOps, client.servicePath); + } finally { + ServiceOperations.stop(regOps); + } + } + + protected void assertDeleted(RegistryOperationsService regOps, + String path) throws IOException { + try { + ServiceRecord record = regOps.resolve(path); + Assert.fail("Expected the record at " + path + " to have been purged," + + " but found " + record); + } catch (PathNotFoundException expected) { + // expected + } + } + + + /** + * This is a subclass of the distributed shell client which + * monitors the registry as well as the YARN app status + */ + private class RegistryMonitoringClient extends Client { + private String servicePath; + private ServiceRecord permanentRecord; + private String permanentPath; + private IOException lookupException; + private ServiceRecord appAttemptRecord; + private String appAttemptPath; + + private ServiceRecord ephemeralRecord; + private String ephemeralPath; + + private ServiceRecord appRecord; + private String appRecordPath; + + + private String failureText; + private ApplicationReport report; + private final RegistryOperationsService regOps; + + private RegistryMonitoringClient(Configuration conf) throws Exception { + super(conf); + // client timeout of 30s for the test runs + setClientTimeout(30000); + regOps = new RegistryOperationsService(); + regOps.init(getConf()); + regOps.start(); + } + + public void stop() { + ServiceOperations.stopQuietly(regOps); + } + + + @Override + protected boolean monitorApplication(ApplicationId appId) + throws YarnException, IOException { + + String username = BindingUtils.currentUser(); + String serviceClass = DSConstants.SERVICE_CLASS_DISTRIBUTED_SHELL; + String serviceName = RegistryPathUtils.encodeYarnID(appId.toString()); + servicePath = + BindingUtils.servicePath(username, serviceClass, serviceName); + appAttemptPath = servicePath + "-attempt"; + ephemeralPath = servicePath + "-ephemeral"; + appRecordPath = servicePath + "-app"; + permanentPath = servicePath + "-permanent"; + + YarnClient yarnClient = getYarnClient(); + + while (!timedOut()) { + + // Check app status every 1 second. + try { + Thread.sleep(500); + } catch (InterruptedException e) { + LOG.debug("Thread sleep in monitoring loop interrupted"); + } + + // Get application report for the appId we are interested in + report = yarnClient.getApplicationReport(appId); + + YarnApplicationState state = + report.getYarnApplicationState(); + switch (state) { + + case NEW: + case NEW_SAVING: + case SUBMITTED: + case ACCEPTED: + continue; + + // running, extract service records if not already done + case RUNNING: + try { + permanentRecord = maybeResolve(permanentRecord, permanentPath); + // succesfull lookup, so discard any failure + lookupException = null; + } catch (PathNotFoundException e) { + lookupException = e; + } + appRecord = maybeResolveQuietly(appRecord, appRecordPath); + appAttemptRecord = maybeResolveQuietly(appAttemptRecord, + appAttemptPath); + ephemeralRecord = maybeResolveQuietly(ephemeralRecord, + ephemeralPath); + continue; + + case FINISHED: + // completed + boolean read = permanentRecord != null; + if (!read) { + failureText = "Permanent record was not resolved"; + } + return read; + + case KILLED: + failureText = "Application Killed: " + report.getDiagnostics(); + return false; + + case FAILED: + failureText = "Application Failed: " + report.getDiagnostics(); + return false; + + default: + break; + } + + } + + if (timedOut()) { + failureText = "Timed out: Killing application"; + forceKillApplication(appId); + } + return false; + } + + /** + * Resolve a record if it has not been resolved already + * @param r record + * @param path path + * @return r if it was non null, else the resolved record + * @throws IOException on any failure + */ + ServiceRecord maybeResolve(ServiceRecord r, String path) throws IOException { + if (r == null) { + ServiceRecord record = regOps.resolve(path); + LOG.info("Resolved at " + r +": " + record); + return record; + } + return r; + } + + /** + * Resolve a record if it has not been resolved already —ignoring + * any PathNotFoundException exceptions. + * @param r record + * @param path path + * @return r if it was non null, a resolved record if it was found, + * or null if the resolution failed with a PathNotFoundException + * @throws IOException on any failure + */ + ServiceRecord maybeResolveQuietly(ServiceRecord r, String path) throws + IOException { + try { + return maybeResolve(r, path); + } catch (PathNotFoundException ignored) { + // ignored + } + return r; + } + + } // end of class RegistryMonitoringClient + + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index a2c3fd0..5e0cc49 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -1368,4 +1368,130 @@ yarn.nodemanager.container-monitor.procfs-tree.smaps-based-rss.enabled false + + + + + + Is the registry enabled: does the RM start it up, + create the user and system paths, and purge + service records when containers, application attempts + and applications complete + + hadoop.registry.rm.enabled + false + + + + + + hadoop.registry.zk.root + /registry + + + + + Zookeeper session timeout in milliseconds + + hadoop.registry.zk.session.timeout.ms + 60000 + + + + + Zookeeper session timeout in milliseconds + + hadoop.registry.zk.connection.timeout.ms + 15000 + + + + + Zookeeper connection retry count before failing + + hadoop.registry.zk.retry.times + 5 + + + + + + hadoop.registry.zk.retry.interval.ms + 1000 + + + + + Zookeeper retry limit in milliseconds, during + exponential backoff: {@value} + + This places a limit even + if the retry times and interval limit, combined + with the backoff policy, result in a long retry + period + + hadoop.registry.zk.retry.ceiling.ms + 60000 + + + + + List of hostname:port pairs defining the + zookeeper quorum binding for the registry + + hadoop.registry.zk.quorum + localhost:2181 + + + + + Key to set if the registry is secure. Turning it on + changes the permissions policy from "open access" + to restrictions on kerberos with the option of + a user adding one or more auth key pairs down their + own tree. + + hadoop.registry.secure + false + + + + + A comma separated list of Zookeeper ACL identifiers with + system access to the registry in a secure cluster. + + These are given full access to all entries. + + If there is an "@" at the end of a SASL entry it + instructs the registry client to append the default kerberos domain. + + hadoop.registry.system.acls + sasl:yarn@, sasl:mapred@, sasl:mapred@hdfs@ + + + + + The kerberos realm: used to set the realm of + system principals which do not declare their realm, + and any other accounts that need the value. + + If empty, the default realm of the running process + is used. + + If neither are known and the realm is needed, then the registry + service/client will fail. + + hadoop.registry.kerberos.realm + + + + + + Key to define the JAAS context. Used in secure + mode + + hadoop.registry.jaas.context + Client + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml new file mode 100644 index 0000000..3fd8590 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml @@ -0,0 +1,217 @@ + + + + + hadoop-yarn + org.apache.hadoop + 3.0.0-SNAPSHOT + + 4.0.0 + hadoop-yarn-registry + 3.0.0-SNAPSHOT + hadoop-yarn-registry + + + + ${project.parent.basedir} + + + + + + org.apache.hadoop + hadoop-yarn-api + + + + org.apache.hadoop + hadoop-yarn-common + + + + org.apache.hadoop + hadoop-common + + + + + org.apache.hadoop + hadoop-common + test-jar + test + + + + + org.apache.hadoop + hadoop-minikdc + test + + + + org.codehaus.jackson + jackson-core-asl + compile + + + + org.codehaus.jackson + jackson-mapper-asl + compile + + + + commons-httpclient + commons-httpclient + test + + + + junit + junit + test + + + + org.apache.curator + curator-framework + + + + org.apache.curator + curator-test + test + + + + + + + + + ${basedir}/src/main/resources + + yarn-version-info.properties + + false + + + ${basedir}/src/main/resources + + yarn-version-info.properties + + true + + + + + org.apache.rat + apache-rat-plugin + + + + + + + org.apache.hadoop + hadoop-maven-plugins + + + version-info + generate-resources + + version-info + + + + ${basedir}/src/main + + java/**/*.java + + + + + + + + + maven-jar-plugin + + + + test-jar + + test-compile + + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/api/CreateFlags.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/api/CreateFlags.java new file mode 100644 index 0000000..0ef7743 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/api/CreateFlags.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.api; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Combinable Flags to use when creating a service entry. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface CreateFlags { + + /** + * Create the entry.. This is just "0" and can be "or"ed with anything + */ + int CREATE = 0; + + /** + * The entry should be created even if an existing entry is there. + */ + int OVERWRITE = 1; + + + /** + * The entry is ephemeral, when this session is closed the entry + * will be deleted. + */ + // this is not currently in use, so commented out. + //int EPHEMERAL = 2; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/api/RegistryConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/api/RegistryConstants.java new file mode 100644 index 0000000..3ea37c4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/api/RegistryConstants.java @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.api; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.zookeeper.ZooDefs; + +import java.io.IOException; + +/** + * Constants for the registry, including configuration keys and default + * values. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface RegistryConstants { + + /** + * prefix for registry configuration options: {@value}. + * Why hadoop. and not YARN? It can + * live outside YARN + */ + String REGISTRY_PREFIX = "hadoop.registry."; + + + /** + * Prefix for zookeeper-specific options. For clients + * using other protocols, these options are not used. + */ + String ZK_PREFIX = REGISTRY_PREFIX + "zk."; + + /** + * flag to indicate whether or not the registry should + * be enabled: {@value} + */ + String KEY_REGISTRY_ENABLED = REGISTRY_PREFIX + "rm.enabled"; + + boolean DEFAULT_REGISTRY_ENABLED = false; + + + /** + * Key to set if the registry is secure. Turning it on + * changes the permissions policy from "open access" + * to restrictions on kerberos with the option of + * a user adding one or more auth key pairs down their + * own tree. + */ + String KEY_REGISTRY_SECURE = REGISTRY_PREFIX + "secure"; + + /** + * Default registry security policy: {@value} + */ + boolean DEFAULT_REGISTRY_SECURE = false; + + /** + * Root path in the ZK tree for the registry: {@value} + */ + String KEY_REGISTRY_ZK_ROOT = ZK_PREFIX + "root"; + + /** + * Default root of the yarn registry: {@value} + */ + String DEFAULT_ZK_REGISTRY_ROOT = "/registry"; + + + /** + * List of hostname:port pairs defining the + * zookeeper quorum binding for the registry {@value} + */ + String KEY_REGISTRY_ZK_QUORUM = ZK_PREFIX + "quorum"; + + /** + * The default zookeeper quorum binding for the registry: {@value} + */ + String DEFAULT_REGISTRY_ZK_QUORUM = "localhost:2181"; + + /** + * Zookeeper session timeout in milliseconds: {@value} + */ + String KEY_REGISTRY_ZK_SESSION_TIMEOUT = + ZK_PREFIX + "session.timeout.ms"; + + /* + * The default ZK session timeout: {@value} + */ + int DEFAULT_ZK_SESSION_TIMEOUT = 60000; + + /** + * Zookeeper connection timeout in milliseconds: {@value} + */ + + String KEY_REGISTRY_ZK_CONNECTION_TIMEOUT = + ZK_PREFIX + "connection.timeout.ms"; + + /** + * The default ZK connection timeout: {@value} + */ + int DEFAULT_ZK_CONNECTION_TIMEOUT = 15000; + + /** + * Zookeeper connection retry count before failing: {@value} + */ + String KEY_REGISTRY_ZK_RETRY_TIMES = ZK_PREFIX + "retry.times"; + + /** + * The default # of times to retry a ZK connection: {@value} + */ + int DEFAULT_ZK_RETRY_TIMES = 5; + + + /** + * Zookeeper connect interval in milliseconds: {@value} + */ + String KEY_REGISTRY_ZK_RETRY_INTERVAL = + ZK_PREFIX + "retry.interval.ms"; + + + /** + * The default interval between connection retries: {@value} + */ + int DEFAULT_ZK_RETRY_INTERVAL = 1000; + + /** + * Zookeeper retry limit in milliseconds, during + * exponential backoff: {@value} + * + * This places a limit even + * if the retry times and interval limit, combined + * with the backoff policy, result in a long retry + * period + * + */ + String KEY_REGISTRY_ZK_RETRY_CEILING = + ZK_PREFIX + "retry.ceiling.ms"; + + /** + * Default limit on retries: {@value} + */ + int DEFAULT_ZK_RETRY_CEILING = 60000; + + + /** + * A comma separated list of Zookeeper ACL identifiers with + * system access to the registry in a secure cluster: {@value}. + * + * These are given full access to all entries. + * + * If there is an "@" at the end of an entry it + * instructs the registry client to append the default kerberos domain. + */ + String KEY_REGISTRY_SYSTEM_ACLS = REGISTRY_PREFIX + "system.acls"; + + /** + * Default system acls: {@value} + */ + String DEFAULT_REGISTRY_SYSTEM_ACLS = "sasl:yarn@, sasl:mapred@, sasl:mapred@hdfs@"; + + /** + * The kerberos realm: used to set the realm of + * system principals which do not declare their realm, + * and any other accounts that need the value. + * + * If empty, the default realm of the running process + * is used. + * + * If neither are known and the realm is needed, then the registry + * service/client will fail. + */ + String KEY_REGISTRY_KERBEROS_REALM = REGISTRY_PREFIX + "kerberos.realm"; + + + /** + * Key to define the JAAS context. Used in secure registries. + */ + String KEY_REGISTRY_CLIENT_JAAS_CONTEXT = REGISTRY_PREFIX + "jaas.context"; + + /** + * default registry JAAS context: {@value} + */ + String DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT = "Client"; + + + /** + * path to users off the root: {@value} + */ + String PATH_USERS = "/users/"; + /** + * path to system services off the root : {@value} + */ + String PATH_SYSTEM_SERVICES = "/services/"; + /** + * path under a service record to point to components of that service: + * {@value} + */ + String SUBPATH_COMPONENTS = "/components"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/api/RegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/api/RegistryOperations.java new file mode 100644 index 0000000..526e36d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/api/RegistryOperations.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.api; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.service.Service; +import org.apache.hadoop.yarn.registry.client.exceptions.InvalidPathnameException; +import org.apache.hadoop.yarn.registry.client.exceptions.NoChildrenForEphemeralsException; +import org.apache.hadoop.yarn.registry.client.types.RegistryPathStatus; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecord; + +import java.io.IOException; + +/** + * Registry Operations + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface RegistryOperations extends Service { + + /** + * Create a path. + * + * It is not an error if the path exists already, be it empty or not. + * + * The createParents flag also requests creating the parents. + * As entries in the registry can hold data while still having + * child entries, it is not an error if any of the parent path + * elements have service records. + * + * @param path path to create + * @param createParents also create the parents. + * @throws PathNotFoundException parent path is not in the registry. + * @throws AccessControlException access permission failure. + * @throws InvalidPathnameException path name is invalid. + * @throws IOException Any other IO Exception. + * @return true if the path was created, false if it existed. + */ + boolean mknode(String path, boolean createParents) + throws PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException; + + /** + * Set a service record to an entry + * @param path path to service record + * @param record service record service record to create/update + * @param createFlags creation flags + * @throws PathNotFoundException the parent path does not exist + * @throws FileAlreadyExistsException path exists but create flags + * do not include "overwrite" + * @throws AccessControlException access permission failure. + * @throws InvalidPathnameException path name is invalid. + * @throws IOException Any other IO Exception. + */ + void create(String path, ServiceRecord record, int createFlags) + throws PathNotFoundException, + FileAlreadyExistsException, + AccessControlException, + InvalidPathnameException, + IOException; + + + /** + * Resolve the record at a path + * @param path path to service record + * @return the record + * @throws PathNotFoundException path is not in the registry. + * @throws AccessControlException security restriction. + * @throws InvalidPathnameException the path is invalid. + * @throws IOException Any other IO Exception + */ + + ServiceRecord resolve(String path) throws PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException; + + /** + * Get the status of a path + * @param path path to query + * @return the status of the path + * @throws PathNotFoundException path is not in the registry. + * @throws AccessControlException security restriction. + * @throws InvalidPathnameException the path is invalid. + * @throws IOException Any other IO Exception + */ + RegistryPathStatus stat(String path) + throws PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException; + + /** + * List children of a directory + * @param path path + * @return a possibly empty array of child entries + * @throws PathNotFoundException path is not in the registry. + * @throws AccessControlException security restriction. + * @throws InvalidPathnameException the path is invalid. + * @throws IOException Any other IO Exception + */ + RegistryPathStatus[] list(String path) + throws PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException; + + /** + * Delete a path. + * + * If the operation returns without an error then the entry has been + * deleted. + * @param path path delete recursively + * @param recursive recursive flag + * @throws PathNotFoundException path is not in the registry. + * @throws AccessControlException security restriction. + * @throws InvalidPathnameException the path is invalid. + * @throws PathIsNotEmptyDirectoryException path has child entries, but + * recursive is false. + * @throws IOException Any other IO Exception + * + */ + void delete(String path, boolean recursive) + throws PathNotFoundException, + PathIsNotEmptyDirectoryException, + AccessControlException, + InvalidPathnameException, + IOException; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/BindingUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/BindingUtils.java new file mode 100644 index 0000000..f83e3c9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/BindingUtils.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.binding; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; + +import java.io.IOException; + +/** + * Methods for binding paths according to recommended layout, and for + * extracting some of the content + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class BindingUtils { + + /** + * Buld the user path -switches to the system path if the user is "". + * It also cross-converts the username to ascii via punycode + * @param user username or "" + * @return the path to the user + */ + public static String userPath(String user) { + Preconditions.checkArgument(user != null, "null user"); + if (user.isEmpty()) { + return RegistryConstants.PATH_SYSTEM_SERVICES; + } + + return RegistryPathUtils.join(RegistryConstants.PATH_USERS, + RegistryPathUtils.encodeForRegistry(user)); + } + + /** + * Create a service classpath + * @param user username or "" + * @param serviceClass service name + * @return a full path + */ + public static String serviceclassPath(String user, + String serviceClass) { + + return RegistryPathUtils.join(userPath(user), + serviceClass); + } + + + /** + * Get the current user path formatted for the registry + * @return the encoded shortname of the current user + * @throws IOException + */ + public static String currentUser() throws IOException { + return RegistryPathUtils.encodeForRegistry( + UserGroupInformation.getCurrentUser().getShortUserName()); + } + + /** + * Create a path to a service under a user & service class + * @param user username or "" + * @param serviceClass service name + * @param serviceName service name unique for that user & service class + * @return a full path + */ + public static String servicePath(String user, + String serviceClass, + String serviceName) { + + return RegistryPathUtils.join( + serviceclassPath(user, serviceClass), + serviceName); + } + + /** + * Create a path for listing components under a service + * @param user username or "" + * @param serviceClass service name + * @param serviceName service name unique for that user & service class + * @return a full path + */ + public static String componentListPath(String user, + String serviceClass, String serviceName) { + + return RegistryPathUtils.join(servicePath(user, serviceClass, serviceName), + RegistryConstants.SUBPATH_COMPONENTS); + } + + /** + * Create the path to a service record for a component + * @param user username or "" + * @param serviceClass service name + * @param serviceName service name unique for that user & service class + * @param componentName unique name/ID of the component + * @return a full path + */ + public static String componentPath(String user, + String serviceClass, String serviceName, String componentName) { + + return RegistryPathUtils.join( + componentListPath(user, serviceClass, serviceName), + componentName); + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/JsonSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/JsonSerDeser.java new file mode 100644 index 0000000..2e4db1c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/JsonSerDeser.java @@ -0,0 +1,323 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.binding; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.yarn.registry.client.exceptions.InvalidRecordException; +import org.codehaus.jackson.JsonGenerationException; +import org.codehaus.jackson.JsonParseException; +import org.codehaus.jackson.JsonProcessingException; +import org.codehaus.jackson.map.DeserializationConfig; +import org.codehaus.jackson.map.JsonMappingException; +import org.codehaus.jackson.map.ObjectMapper; +import org.codehaus.jackson.map.SerializationConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.EOFException; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.Arrays; + +/** + * Support for marshalling objects to and from JSON. + * It constructs an object mapper as an instance field. + * and synchronizes access to those methods + * which use the mapper + * @param + */ +@InterfaceAudience.Private() +@InterfaceStability.Evolving +public class JsonSerDeser { + + private static final Logger LOG = LoggerFactory.getLogger(JsonSerDeser.class); + private static final String UTF_8 = "UTF-8"; + + private final Class classType; + private final ObjectMapper mapper; + private final byte[] header; + + /** + * Create an instance bound to a specific type + * @param classType class to marshall + * @param header byte array to use as header + */ + public JsonSerDeser(Class classType, byte[] header) { + Preconditions.checkArgument(classType != null, "null classType"); + Preconditions.checkArgument(header != null, "null header"); + this.classType = classType; + this.mapper = new ObjectMapper(); + mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, + false); + // make an immutable copy to keep findbugs happy. + byte[] h = new byte[header.length]; + System.arraycopy(header, 0, h, 0, header.length); + this.header = h; + } + + public String getName() { + return classType.getSimpleName(); + } + + /** + * Convert from JSON + * + * @param json input + * @return the parsed JSON + * @throws IOException IO + * @throws JsonMappingException failure to map from the JSON to this class + */ + @SuppressWarnings("unchecked") + public synchronized T fromJson(String json) + throws IOException, JsonParseException, JsonMappingException { + try { + return mapper.readValue(json, classType); + } catch (IOException e) { + LOG.error("Exception while parsing json : " + e + "\n" + json, e); + throw e; + } + } + + /** + * Convert from a JSON file + * @param jsonFile input file + * @return the parsed JSON + * @throws IOException IO problems + * @throws JsonMappingException failure to map from the JSON to this class + */ + @SuppressWarnings("unchecked") + public synchronized T fromFile(File jsonFile) + throws IOException, JsonParseException, JsonMappingException { + try { + return mapper.readValue(jsonFile, classType); + } catch (IOException e) { + LOG.error("Exception while parsing json file {}: {}", jsonFile, e); + throw e; + } + } + + /** + * Convert from a JSON file + * @param resource input file + * @return the parsed JSON + * @throws IOException IO problems + * @throws JsonMappingException failure to map from the JSON to this class + */ + @SuppressWarnings({"IOResourceOpenedButNotSafelyClosed"}) + public synchronized T fromResource(String resource) + throws IOException, JsonParseException, JsonMappingException { + InputStream resStream = null; + try { + resStream = this.getClass().getResourceAsStream(resource); + if (resStream == null) { + throw new FileNotFoundException(resource); + } + return mapper.readValue(resStream, classType); + } catch (IOException e) { + LOG.error("Exception while parsing json resource {}: {}", resource, e); + throw e; + } finally { + IOUtils.closeStream(resStream); + } + } + + /** + * clone by converting to JSON and back again. + * This is much less efficient than any Java clone process. + * @param instance instance to duplicate + * @return a new instance + * @throws IOException problems. + */ + public T fromInstance(T instance) throws IOException { + return fromJson(toJson(instance)); + } + + /** + * Load from a Hadoop filesystem + * @param fs filesystem + * @param path path + * @return a loaded CD + * @throws IOException IO problems + * @throws EOFException if not enough bytes were read in + * @throws JsonParseException parse problems + * @throws JsonMappingException O/J mapping problems + */ + public T load(FileSystem fs, Path path) + throws IOException, JsonParseException, JsonMappingException { + FileStatus status = fs.getFileStatus(path); + long len = status.getLen(); + byte[] b = new byte[(int) len]; + FSDataInputStream dataInputStream = fs.open(path); + int count = dataInputStream.read(b); + if (count != len) { + throw new EOFException(path.toString() + ": read finished prematurely"); + } + return fromBytes(path.toString(), b, 0); + } + + + /** + * Save a cluster description to a hadoop filesystem + * @param fs filesystem + * @param path path + * @param overwrite should any existing file be overwritten + * @throws IOException IO exception + */ + public void save(FileSystem fs, Path path, T instance, + boolean overwrite) throws + IOException { + FSDataOutputStream dataOutputStream = fs.create(path, overwrite); + writeJsonAsBytes(instance, dataOutputStream); + } + + /** + * Write the json as bytes -then close the file + * @param dataOutputStream an outout stream that will always be closed + * @throws IOException on any failure + */ + private void writeJsonAsBytes(T instance, + DataOutputStream dataOutputStream) throws + IOException { + try { + byte[] b = toBytes(instance); + dataOutputStream.write(b); + } finally { + dataOutputStream.close(); + } + } + + /** + * Convert JSON To bytes + * @param instance instance to convert + * @return a byte array + * @throws IOException + */ + public byte[] toBytes(T instance) throws IOException { + String json = toJson(instance); + return json.getBytes(UTF_8); + } + + /** + * Convert JSON To bytes, inserting the header + * @param instance instance to convert + * @return a byte array + * @throws IOException + */ + public byte[] toByteswithHeader(T instance) throws IOException { + byte[] body = toBytes(instance); + + ByteBuffer buffer = ByteBuffer.allocate(body.length + header.length); + buffer.put(header); + buffer.put(body); + return buffer.array(); + } + + + /** + * Deserialize from a byte array + * @param path path the data came from + * @param bytes byte array + * @return offset in the array to read from + * @throws IOException all problems + * @throws EOFException not enough data + * @throws InvalidRecordException if the parsing failed -the record is invalid + */ + public T fromBytes(String path, byte[] bytes, int offset) throws IOException, + InvalidRecordException { + int data = bytes.length - offset; + if (data <= 0) { + throw new EOFException("No data at " + path); + } + String json = new String(bytes, offset, data, UTF_8); + try { + return fromJson(json); + } catch (JsonProcessingException e) { + throw new InvalidRecordException(path, e.toString(), e); + } + } + + /** + * Read from a byte array to a type, checking the header first + * @param path source of data + * @param buffer buffer + * @return the parsed structure + * @throws IOException on a failure + */ + @SuppressWarnings("unchecked") + public T fromBytesWithHeader(String path, byte[] buffer) throws IOException { + int hlen = header.length; + int blen = buffer.length; + if (hlen > 0) { + if (blen < hlen) { + throw new InvalidRecordException(path, + "Record too short for header of " + getName()); + } + byte[] magic = Arrays.copyOfRange(buffer, 0, hlen); + if (!Arrays.equals(header, magic)) { + throw new InvalidRecordException(path, + "Entry header does not match header of " + getName()); + } + } + return fromBytes(path, buffer, hlen); + } + + /** + * Check if a buffer has a header which matches this record type + * @param buffer buffer + * @return true if there is a match + * @throws IOException + */ + public boolean headerMatches(byte[] buffer) throws IOException { + int hlen = header.length; + int blen = buffer.length; + boolean matches = false; + if (blen > hlen) { + byte[] magic = Arrays.copyOfRange(buffer, 0, hlen); + matches = Arrays.equals(header, magic); + } + return matches; + } + + /** + * Convert an object to a JSON string + * @param instance instance to convert + * @return a JSON string description + * @throws JsonParseException parse problems + * @throws JsonMappingException O/J mapping problems + */ + public synchronized String toJson(T instance) throws IOException, + JsonGenerationException, + JsonMappingException { + mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true); + return mapper.writeValueAsString(instance); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/RecordOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/RecordOperations.java new file mode 100644 index 0000000..4159073 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/RecordOperations.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.binding; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.registry.client.api.RegistryOperations; +import org.apache.hadoop.yarn.registry.client.exceptions.InvalidRecordException; +import org.apache.hadoop.yarn.registry.client.types.RegistryPathStatus; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecord; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecordHeader; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.EOFException; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * Support for operations on records + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RecordOperations { + private static final Logger LOG = LoggerFactory.getLogger(JsonSerDeser.class); + + public static class ServiceRecordMarshal extends JsonSerDeser { + public ServiceRecordMarshal() { + super(ServiceRecord.class, ServiceRecordHeader.getData()); + } + } + + /** + * Extract all service records under a list of stat operations...this + * skips entries that are too short or simply not matching + * @param operations operation support for fetches + * @param stats list of stat results + * @return a possibly empty list + * @throws IOException for any IO Operation that wasn't ignored. + */ + public static Map extractServiceRecords(RegistryOperations operations, + RegistryPathStatus[] stats) throws IOException { + Map results = new HashMap(stats.length); + for (RegistryPathStatus stat : stats) { + if (stat.size > ServiceRecordHeader.getLength()) { + // maybe has data + try { + ServiceRecord serviceRecord = operations.resolve(stat.path); + results.put(stat.path, serviceRecord); + } catch (EOFException ignored) { + LOG.debug("data too short for {}", stat.path); + } catch (InvalidRecordException record) { + LOG.debug("Invalid record at {}", stat.path); + } + + } + } + return results; + + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/RegistryPathUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/RegistryPathUtils.java new file mode 100644 index 0000000..72d0874 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/RegistryPathUtils.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.binding; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.yarn.registry.client.exceptions.InvalidPathnameException; +import org.apache.hadoop.yarn.registry.client.services.RegistryInternalConstants; +import org.apache.zookeeper.common.PathUtils; + +import java.io.IOException; +import java.net.IDN; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Pattern; + +/** + * Low level zookeeper-related operations on paths + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class RegistryPathUtils { + + + private static final Pattern HOSTNAME = + Pattern.compile(RegistryInternalConstants.HOSTNAME_PATTERN); + + /** + * Validate ZK path with the path itself included in + * the exception text + * @param path path to validate + */ + public static String validateZKPath(String path) throws + InvalidPathnameException { + try { + PathUtils.validatePath(path); + + } catch (IllegalArgumentException e) { + throw new InvalidPathnameException(path, + "Invalid Path \"" + path + "\" : " + e, e); + } + return path; + } + + /** + * Validate ZK path with the path itself included in + * the exception text + * @param path path to validate + */ + public static String validateElementsAsDNS(String path) throws + InvalidPathnameException { + List splitpath = split(path); + for (String fragment : splitpath) { + if (!HOSTNAME.matcher(fragment).matches()) { + throw new InvalidPathnameException(path, + "Invalid Path element \"" + fragment + "\""); + } + } + return path; + } + + /* + * Create a full path from the registry root and the supplied subdir + * @param path path of operation + * @return an absolute path + * @throws IllegalArgumentException if the path is invalide + */ + public static String createFullPath(String base, String path) throws + IOException { + Preconditions.checkArgument(path != null, "null path"); + Preconditions.checkArgument(base != null, "null path"); + return validateZKPath(join(base, path)); + } + + /** + * Join two paths, guaranteeing that there will not be exactly + * one separator between the two, and exactly one at the front + * of the path. There will be no trailing "/" except for the special + * case that this is the root path + * @param base base path + * @param path second path to add + * @return a combined path. + */ + public static String join(String base, String path) { + Preconditions.checkArgument(path != null, "null path"); + Preconditions.checkArgument(base != null, "null path"); + StringBuilder fullpath = new StringBuilder(); + + if (!base.startsWith("/")) { + fullpath.append('/'); + } + fullpath.append(base); + + // guarantee a trailing / + if (!fullpath.toString().endsWith("/")) { + fullpath.append("/"); + } + // strip off any at the beginning + if (path.startsWith("/")) { + // path starts with /, so append all other characters -if present + if (path.length() > 1) { + fullpath.append(path.substring(1)); + } + } else { + fullpath.append(path); + } + + //here there may be a trailing "/" + String finalpath = fullpath.toString(); + if (finalpath.endsWith("/") && !"/".equals(finalpath)) { + finalpath = finalpath.substring(0, finalpath.length() - 1); + + } + return finalpath; + } + + /** + * split a path into elements, stripping empty elements + * @param path the path + * @return the split path + */ + public static List split(String path) { + // + String[] pathelements = path.split("/"); + List dirs = new ArrayList(pathelements.length); + for (String pathelement : pathelements) { + if (!pathelement.isEmpty()) { + dirs.add(pathelement); + } + } + return dirs; + } + + /** + * Get the parent of a path + * @param path + * @return the parent path + * @throws PathNotFoundException if the path was at root. + */ + public static String parentOf(String path) throws PathNotFoundException { + List elements = split(path); + + int size = elements.size(); + if (size == 0) { + throw new PathNotFoundException("No parent of " + path); + } + if (size == 1) { + return "/"; + } + elements.remove(size - 1); + StringBuilder parent = new StringBuilder(path.length()); + for (String element : elements) { + parent.append("/"); + parent.append(element); + } + return parent.toString(); + } + + /** + * Perform any formatting for the registry needed to convert + * non-simple-DNS elements + * @param element element to encode + * @return an encoded string + */ + public static String encodeForRegistry(String element) { + return IDN.toASCII(element); + } + + /** + * Perform whatever transforms are needed to get a YARN ID into + * a DNS-compatible name + * @param yarnId ID as string of YARN application, instance or container + * @return a string suitable for use in registry paths. + */ + public static String encodeYarnID(String yarnId) { + return yarnId.replace("_", "-"); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/RegistryTypeUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/RegistryTypeUtils.java new file mode 100644 index 0000000..fe0126f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/RegistryTypeUtils.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.binding; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.registry.client.exceptions.InvalidRecordException; +import org.apache.hadoop.yarn.registry.client.types.AddressTypes; +import org.apache.hadoop.yarn.registry.client.types.Endpoint; +import org.apache.hadoop.yarn.registry.client.types.ProtocolTypes; + +import java.net.InetSocketAddress; +import java.net.MalformedURLException; +import java.net.URI; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Utils to work with registry types + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RegistryTypeUtils { + + public static Endpoint urlEndpoint(String api, + String protocolType, + URI... urls) { + return new Endpoint(api, protocolType, urls); + } + + public static Endpoint restEndpoint(String api, + URI... urls) { + return urlEndpoint(api, ProtocolTypes.PROTOCOL_REST, urls); + } + + public static Endpoint webEndpoint(String api, + URI... urls) { + return urlEndpoint(api, ProtocolTypes.PROTOCOL_WEBUI, urls); + } + + public static Endpoint inetAddrEndpoint(String api, + String protocolType, + String hostname, + int port) { + return new Endpoint(api, + AddressTypes.ADDRESS_HOSTNAME_AND_PORT, + protocolType, + tuplelist(hostname, Integer.toString(port))); + } + + public static Endpoint ipcEndpoint(String api, + boolean protobuf, List address) { + ArrayList> addressList = new ArrayList>(); + addressList.add(address); + return new Endpoint(api, + AddressTypes.ADDRESS_HOSTNAME_AND_PORT, + protobuf ? ProtocolTypes.PROTOCOL_HADOOP_IPC_PROTOBUF + : ProtocolTypes.PROTOCOL_HADOOP_IPC, + addressList); + } + + public static List> tuplelist(String... t1) { + ArrayList> outer = new ArrayList>(); + outer.add(tuple(t1)); + return outer; + } + + public static List tuple(String... t1) { + return Arrays.asList(t1); + } + + public static List tuple(Object... t1) { + List l = new ArrayList(t1.length); + for (Object t : t1) { + l.add(t.toString()); + } + return l; + } + + /** + * Convert a socket address pair into a string tuple, (host, port). + * JDK7: move to InetAddress.getHostString() to avoid DNS lookups. + * @param address an address + * @return an element for the address list + */ + public static List marshall(InetSocketAddress address) { + return tuple(address.getHostName(), address.getPort()); + } + + /** + * Require a specific address type on an endpoint + * @param required required type + * @param epr endpoint + * @throws IllegalStateException if the type is wrong + */ + public static void requireAddressType(String required, Endpoint epr) throws + InvalidRecordException { + if (!required.equals(epr.addressType)) { + throw new InvalidRecordException( + epr.toString(), + "Address type of " + epr.addressType + + " does not match required type of " + + required); + } + + } + + /** + * Get a single URI endpoint + * @param epr endpoint + * @return the uri of the first entry in the address list. Null if the endpoint + * itself is null + * @throws InvalidRecordException if the type is wrong, there are no addresses + * or the payload ill-formatted + */ + public static List retrieveAddressesUriType(Endpoint epr) throws + InvalidRecordException { + if (epr == null) { + return null; + } + requireAddressType(AddressTypes.ADDRESS_URI, epr); + List> addresses = epr.addresses; + if (addresses.size() < 1) { + throw new InvalidRecordException(epr.toString(), + "No addresses in endpoint"); + } + List results = new ArrayList(addresses.size()); + for (List address : addresses) { + if (address.size() != 1) { + throw new InvalidRecordException(epr.toString(), + "Address payload invalid: wrong many element count: " + + address.size()); + } + results.add(address.get(0)); + } + return results; + } + + /** + * Get the address URLs. Guranteed to return at least one address. + * @param epr endpoint + * @return the address as a URL + * @throws InvalidRecordException if the type is wrong, there are no addresses + * or the payload ill-formatted + * @throws MalformedURLException address can't be turned into a URL + */ + public static List retrieveAddressURLs(Endpoint epr) throws + InvalidRecordException, + MalformedURLException { + if (epr == null) { + throw new InvalidRecordException("", "Null endpoint"); + } + List addresses = retrieveAddressesUriType(epr); + List results = new ArrayList(addresses.size()); + for (String address : addresses) { + results.add(new URL(address)); + } + return results; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/ZKPathDumper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/ZKPathDumper.java new file mode 100644 index 0000000..7370028 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/binding/ZKPathDumper.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.binding; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.api.GetChildrenBuilder; +import org.apache.zookeeper.data.Stat; + +import java.util.List; + +/** + * This class dumps a registry tree to a string. + * It does this in the toString() method, so it + * can be used in a log statement -the operation + * will only take place if the method is evaluated. + * + */ +@VisibleForTesting +public class ZKPathDumper { + + public static final int INDENT = 2; + private final CuratorFramework curator; + private final String root; + + /** + * Create a path dumper -but do not dump the path until asked + * @param curator curator instance + * @param root root + */ + public ZKPathDumper(CuratorFramework curator, + String root) { + Preconditions.checkArgument(curator != null); + Preconditions.checkArgument(root != null); + this.curator = curator; + this.root = root; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("ZK tree for ").append(root).append('\n'); + expand(builder, root, 1); + return builder.toString(); + } + + /** + * Recursively expand the path into the supplied string builder, increasing + * the indentation by {@link #INDENT} as it proceeds (depth first) down + * the tree + * @param builder string build to append to + * @param path path to examine + * @param indent current indentation + */ + private void expand(StringBuilder builder, + String path, + int indent) { + try { + GetChildrenBuilder childrenBuilder = curator.getChildren(); + List children = childrenBuilder.forPath(path); + for (String child : children) { + String childPath = path + "/" + child; + String body = ""; + Stat stat = curator.checkExists().forPath(childPath); + StringBuilder verboseDataBuilder = new StringBuilder(64); + verboseDataBuilder.append(" [") + .append(stat.getDataLength()) + .append("]"); + if (stat.getEphemeralOwner() > 0) { + verboseDataBuilder.append("*"); + } + body = verboseDataBuilder.toString(); + + // print each child + append(builder, indent, ' '); + builder.append('/').append(child); + builder.append(body); + builder.append('\n'); + // recurse + expand(builder, childPath, indent + INDENT); + } + } catch (Exception e) { + builder.append(e.toString()).append("\n"); + } + + } + + private void append(StringBuilder builder, int indent, char c) { + for (int i = 0; i < indent; i++) { + builder.append(c); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/AuthenticationFailedException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/AuthenticationFailedException.java new file mode 100644 index 0000000..1526bc9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/AuthenticationFailedException.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.exceptions; + +/** + * Exception raised when client access wasn't authenticated. + * That is: the credentials provided were incomplete or invalid. + */ +public class AuthenticationFailedException extends RegistryIOException { + public AuthenticationFailedException(String path, Throwable cause) { + super(path, cause); + } + + public AuthenticationFailedException(String path, String error) { + super(path, error); + } + + public AuthenticationFailedException(String path, + String error, + Throwable cause) { + super(path, error, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/InvalidPathnameException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/InvalidPathnameException.java new file mode 100644 index 0000000..40809f7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/InvalidPathnameException.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.exceptions; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A path name was invalid + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class InvalidPathnameException extends RegistryIOException { + public InvalidPathnameException(String path, String message) { + super(path, message); + } + + public InvalidPathnameException(String path, + String message, + Throwable cause) { + super(path, message, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/InvalidRecordException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/InvalidRecordException.java new file mode 100644 index 0000000..0ee2b9d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/InvalidRecordException.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.exceptions; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Raised if the entry doesn't match what was expected at the path. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class InvalidRecordException extends RegistryIOException { + + public InvalidRecordException(String path, String error) { + super(path, error); + } + + public InvalidRecordException(String path, + String error, + Throwable cause) { + super(path, error, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/NoChildrenForEphemeralsException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/NoChildrenForEphemeralsException.java new file mode 100644 index 0000000..929420d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/NoChildrenForEphemeralsException.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.exceptions; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Children are not allowed under ephemeral nodes. This is an aspect + * of ZK which isn't exposed to the registry API -ephemeral ZK nodes + * do not support children. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class NoChildrenForEphemeralsException extends RegistryIOException { + public NoChildrenForEphemeralsException(String path, Throwable cause) { + super(path, cause); + } + + public NoChildrenForEphemeralsException(String path, String error) { + super(path, error); + } + + public NoChildrenForEphemeralsException(String path, + String error, + Throwable cause) { + super(path, error, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/RegistryIOException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/RegistryIOException.java new file mode 100644 index 0000000..a82ac93 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/exceptions/RegistryIOException.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.exceptions; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.PathIOException; + +/** + * Exception for registry operations + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RegistryIOException extends PathIOException { + + public RegistryIOException(String path, Throwable cause) { + super(path, cause); + } + + public RegistryIOException(String path, String error) { + super(path, error); + } + + public RegistryIOException(String path, String error, Throwable cause) { + super(path, error, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/BindingInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/BindingInformation.java new file mode 100644 index 0000000..0fb9cfb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/BindingInformation.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.services; + +import org.apache.curator.ensemble.EnsembleProvider; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Binding information provided by a {@link RegistryBindingSource} + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class BindingInformation { + + public EnsembleProvider ensembleProvider; + public String description; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/RegistryBindingSource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/RegistryBindingSource.java new file mode 100644 index 0000000..b97403d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/RegistryBindingSource.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.services; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Interface which can be implemented by a registry binding source + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface RegistryBindingSource { + + /** + * Supply the binding information for this registry + * @return the binding information data + */ + BindingInformation supplyBindingInformation(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/RegistryInternalConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/RegistryInternalConstants.java new file mode 100644 index 0000000..9beb7d1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/RegistryInternalConstants.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.services; + +import org.apache.zookeeper.ZooDefs; + +/** + * Internal constants for the registry. + * + * These are the things which aren't visible to users. + * + */ +public interface RegistryInternalConstants { + + + /** + * Pattern of a hostname : {@value} + */ + String HOSTNAME_PATTERN = + "([a-z0-9]|[a-z0-9][a-z0-9\\-]*[a-z0-9])"; + /** + * Permissions for readers: {@value}. + */ + int PERMISSIONS_REGISTRY_READERS = ZooDefs.Perms.READ; + + /** + * Permissions for system services: {@value} + */ + + int PERMISSIONS_REGISTRY_SYSTEM_SERVICES = + ZooDefs.Perms.ALL; + /** + * Permissions for a user's root entry: {@value}. + * All except the admin permissions (ACL access) on a node + */ + int PERMISSIONS_REGISTRY_USER_ROOT = + ZooDefs.Perms.READ | ZooDefs.Perms.WRITE | ZooDefs.Perms.CREATE | + ZooDefs.Perms.DELETE; + + /** + * Name of the SASL auth provider which has to be added to ZK server to enable + * sasl: auth patterns. Without this callers can connect via SASL, but + * they can't use it in ACLs + */ + String SASLAUTHENTICATION_PROVIDER = + "org.apache.zookeeper.server.auth.SASLAuthenticationProvider"; + + /** + * String to use as the prefix when declaring a new auth provide. + */ + String ZOOKEEPER_AUTH_PROVIDER = "zookeeper.authProvider"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/RegistryOperationsService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/RegistryOperationsService.java new file mode 100644 index 0000000..57dfb25 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/RegistryOperationsService.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.services; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.yarn.registry.client.api.RegistryOperations; + +import org.apache.hadoop.yarn.registry.client.binding.RecordOperations; +import static org.apache.hadoop.yarn.registry.client.binding.RegistryPathUtils.*; + +import org.apache.hadoop.yarn.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.yarn.registry.client.exceptions.InvalidPathnameException; +import org.apache.hadoop.yarn.registry.client.api.CreateFlags; +import org.apache.hadoop.yarn.registry.client.services.zk.CuratorService; +import org.apache.hadoop.yarn.registry.client.types.RegistryPathStatus; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecord; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * The YARN ZK registry operations service. + * + * It's a YARN service: ephemeral nodes last as long as the client exists + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RegistryOperationsService extends CuratorService + implements RegistryOperations { + + private static final Logger LOG = + LoggerFactory.getLogger(RegistryOperationsService.class); + + private final RecordOperations.ServiceRecordMarshal serviceRecordMarshal + = new RecordOperations.ServiceRecordMarshal(); + + private List userAcl; + + public RegistryOperationsService(String name) { + this(name, null); + } + + public RegistryOperationsService() { + this("RegistryOperationsService"); + } + + public RegistryOperationsService(String name, + RegistryBindingSource bindingSource) { + super(name, bindingSource); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + super.serviceInit(conf); + + // if a secure cluster, switch to the security settings of this user + getRegistrySecurity().initSecurity(); + + // tODO: build up user ACLs for this user + + List userAcls = getRegistrySecurity().getSystemACLs(); + setUserAcl(userAcls); + } + + public List getUserAcl() { + return userAcl; + } + + public void setUserAcl(List userAcl) { + this.userAcl = userAcl; + } + + protected void validatePath(String path) throws InvalidPathnameException { + RegistryPathUtils.validateElementsAsDNS(path); + } + + @Override + public boolean mknode(String path, boolean createParents) throws + PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException { + validatePath(path); + return zkMkPath(path, CreateMode.PERSISTENT, createParents, + getUserAcl()); + } + + @Override + public void create(String path, + ServiceRecord record, + int createFlags) throws + PathNotFoundException, + FileAlreadyExistsException, + AccessControlException, + InvalidPathnameException, + IOException { + Preconditions.checkArgument(record != null, "null record"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(record.id), + "empty record ID"); + validatePath(path); + LOG.info("Registered at {} : {}", path, record); + + CreateMode mode = CreateMode.PERSISTENT; + byte[] bytes = serviceRecordMarshal.toByteswithHeader(record); + zkSet(path, mode, bytes, getUserAcl(), + ((createFlags & CreateFlags.OVERWRITE) != 0)); + } + + @Override + public ServiceRecord resolve(String path) throws + PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException { + byte[] bytes = zkRead(path); + return serviceRecordMarshal.fromBytesWithHeader(path, bytes); + } + + @Override + public RegistryPathStatus stat(String path) throws + PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException { + validatePath(path); + Stat stat = zkStat(path); + RegistryPathStatus status = new RegistryPathStatus( + path, + stat.getCtime(), + stat.getDataLength(), + stat.getNumChildren()); + LOG.debug("Stat {} => {}", path, status); + return status; + } + + @Override + public RegistryPathStatus[] list(String path) throws + PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException { + validatePath(path); + List childNames = zkList(path); + int size = childNames.size(); + ArrayList childList = new ArrayList( + size); + for (String childName : childNames) { + childList.add(stat(join(path, childName))); + } + return childList.toArray(new RegistryPathStatus[size]); + } + + @Override + public void delete(String path, boolean recursive) throws + PathNotFoundException, + PathIsNotEmptyDirectoryException, + AccessControlException, + InvalidPathnameException, + IOException { + validatePath(path); + zkDelete(path, recursive, null); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/zk/CuratorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/zk/CuratorService.java new file mode 100644 index 0000000..afc235a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/zk/CuratorService.java @@ -0,0 +1,732 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.services.zk; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.curator.ensemble.EnsembleProvider; +import org.apache.curator.ensemble.fixed.FixedEnsembleProvider; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.curator.framework.api.BackgroundCallback; +import org.apache.curator.framework.api.CreateBuilder; +import org.apache.curator.framework.api.DeleteBuilder; +import org.apache.curator.framework.api.GetChildrenBuilder; +import org.apache.curator.retry.BoundedExponentialBackoffRetry; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.PathAccessDeniedException; +import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.fs.PathPermissionException; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.util.ZKUtil; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.yarn.registry.client.binding.ZKPathDumper; +import org.apache.hadoop.yarn.registry.client.exceptions.AuthenticationFailedException; +import org.apache.hadoop.yarn.registry.client.exceptions.NoChildrenForEphemeralsException; +import org.apache.hadoop.yarn.registry.client.exceptions.RegistryIOException; +import org.apache.hadoop.yarn.registry.client.services.BindingInformation; +import org.apache.hadoop.yarn.registry.client.services.RegistryBindingSource; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Stat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; + +/** + * This service binds to Zookeeper via Apache Curator. It is more + * generic than just the YARN service registry; it does not implement + * any of the RegistryOperations API. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class CuratorService extends CompositeService + implements RegistryConstants, RegistryBindingSource { + + private static final Logger LOG = + LoggerFactory.getLogger(CuratorService.class); + public static final String SASL = "sasl"; + protected List systemACLs; + + /** + * the Curator binding + */ + private CuratorFramework curator; + + /** + * Parsed root ACL + */ + private List rootACL; + + /** + * Path to the registry root + */ + private String registryRoot; + + private final RegistryBindingSource bindingSource; + + private RegistrySecurity registrySecurity; + + /** + * the connection binding text for messages + */ + private String connectionDescription; + private String securityConnectionDiagnostics = ""; + + private EnsembleProvider ensembleProvider; + private boolean secure; + + + /** + * Construct the service. + * @param name service name + * @param bindingSource source of binding information. + * If null: use this instance + */ + public CuratorService(String name, RegistryBindingSource bindingSource) { + super(name); + if (bindingSource != null) { + this.bindingSource = bindingSource; + } else { + this.bindingSource = this; + } + } + + /** + * Create an instance using this service as the binding source (i.e. read + * configuration options from the registry) + * @param name service name + */ + public CuratorService(String name) { + this(name, null); + } + + /** + * Init the service. + * This is where the security bindings are set up + * @param conf configuration of the service + * @throws Exception + */ + @Override + protected void serviceInit(Configuration conf) throws Exception { + + registryRoot = conf.getTrimmed(KEY_REGISTRY_ZK_ROOT, + DEFAULT_ZK_REGISTRY_ROOT); + + registrySecurity = new RegistrySecurity(conf); + // is the registry secure? + secure = registrySecurity.isSecure(); + // if it is secure, either the user is using kerberos and has + // full rights, or they are delegated + LOG.debug("Creating Registry with root {}", registryRoot); + + super.serviceInit(conf); + } + + /** + * Start the service. + * This is where the curator instance is started. + * @throws Exception + */ + @Override + protected void serviceStart() throws Exception { + curator = createCurator(); + super.serviceStart(); + } + + /** + * Close the ZK connection if it is open + */ + @Override + protected void serviceStop() throws Exception { + IOUtils.closeStream(curator); + super.serviceStop(); + } + + /** + * Flag to indicate whether or not the registry is secure. + * Valid once the service is inited. + * @return service security policy + */ + public boolean isSecure() { + return secure; + } + + /** + * Get the registry security helper + * @return the registry security helper + */ + protected RegistrySecurity getRegistrySecurity() { + return registrySecurity; + } + + /** + * Build the security diagnostics string + * @return a string for diagnostics + */ + protected String buildSecurityDiagnostics() { + // build up the security connection diags + if (!secure) { + return "security disabled"; + } else { + StringBuilder builder = new StringBuilder(); + builder.append("secure cluster; "); + builder.append(registrySecurity.buildSecurityDiagnostics()); + return builder.toString(); + } + } + + /** + * Get the ACLs defined in the config key for this service, or + * the default + * @param confKey configuration key + * @param defaultPermissions default values + * @return an ACL list. + * @throws IOException + * @throws ZKUtil.BadAclFormatException on a bad ACL parse + */ + public List buildACLs(String confKey, String defaultPermissions) throws + IOException, ZKUtil.BadAclFormatException { + String zkAclConf = getConfig().get(confKey, defaultPermissions); + return parseACLs(zkAclConf); + } + + /** + * Parse an ACL list. This includes configuration indirection + * {@link ZKUtil#resolveConfIndirection(String)} + * @param zkAclConf configuration string + * @return an ACL list + * @throws IOException + */ + public List parseACLs(String zkAclConf) throws IOException { + return registrySecurity.parseACLs(zkAclConf); + } + + /** + * Create a new curator instance off the root path; using configuration + * options provided in the service configuration to set timeouts and + * retry policy. + * @return the newly created creator + */ + private CuratorFramework createCurator() throws IOException { + Configuration conf = getConfig(); + createEnsembleProvider(); + int sessionTimeout = conf.getInt(KEY_REGISTRY_ZK_SESSION_TIMEOUT, + DEFAULT_ZK_SESSION_TIMEOUT); + int connectionTimeout = conf.getInt(KEY_REGISTRY_ZK_CONNECTION_TIMEOUT, + DEFAULT_ZK_CONNECTION_TIMEOUT); + int retryTimes = conf.getInt(KEY_REGISTRY_ZK_RETRY_TIMES, + DEFAULT_ZK_RETRY_TIMES); + int retryInterval = conf.getInt(KEY_REGISTRY_ZK_RETRY_INTERVAL, + DEFAULT_ZK_RETRY_INTERVAL); + int retryCeiling = conf.getInt(KEY_REGISTRY_ZK_RETRY_CEILING, + DEFAULT_ZK_RETRY_CEILING); + + LOG.debug("Creating CuratorService with connection {}", + connectionDescription); + + // set the security options + + //log them + securityConnectionDiagnostics = buildSecurityDiagnostics(); + + // build up the curator itself + CuratorFrameworkFactory.Builder b = CuratorFrameworkFactory.builder(); + b.ensembleProvider(ensembleProvider) + .connectionTimeoutMs(connectionTimeout) + .sessionTimeoutMs(sessionTimeout) + + .retryPolicy(new BoundedExponentialBackoffRetry(retryInterval, + retryCeiling, + retryTimes)); + +/* + if (!root.isEmpty()) { + String namespace = root; + if (namespace.startsWith("/")) { + namespace = namespace.substring(1); + } + b.namespace(namespace); + } +*/ + + CuratorFramework framework = b.build(); + framework.start(); + + return framework; + } + + + @Override + public String toString() { + return super.toString() + + bindingDiagnosticDetails(); + } + + public String bindingDiagnosticDetails() { + return " Connection=\"" + connectionDescription + "\"" + + " root=\"" + registryRoot + "\"" + + " " + securityConnectionDiagnostics; + } + + /** + * Create a full path from the registry root and the supplied subdir + * @param path path of operation + * @return an absolute path + * @throws IllegalArgumentException if the path is invalide + */ + protected String createFullPath(String path) throws IOException { + return RegistryPathUtils.createFullPath(registryRoot, path); + } + + /** + * Get the registry binding source ... this can be used to + * create new ensemble providers + * @return the registry binding source in use + */ + public RegistryBindingSource getBindingSource() { + return bindingSource; + } + + /** + * Create the ensemble provider for this registry, by invoking + * {@link RegistryBindingSource#supplyBindingInformation()} on + * the provider stored in {@link #bindingSource} + * Sets {@link #ensembleProvider} to that value; + * sets {@link #connectionDescription} to the binding info + * for use in toString and logging; + * + */ + protected void createEnsembleProvider() { + BindingInformation binding = bindingSource.supplyBindingInformation(); + connectionDescription = binding.description + + " " + securityConnectionDiagnostics; + ensembleProvider = binding.ensembleProvider; + } + + + /** + * Supply the binding information. + * This implementation returns a fixed ensemble bonded to + * the quorum supplied by {@link #buildConnectionString()} + * @return the binding information + */ + @Override + public BindingInformation supplyBindingInformation() { + BindingInformation binding = new BindingInformation(); + String connectString = buildConnectionString(); + binding.ensembleProvider = new FixedEnsembleProvider(connectString); + binding.description = + "fixed ZK quorum \"" + connectString + "\""; + return binding; + } + + /** + * Override point: get the connection string used to connect to + * the ZK service + * @return a registry quorum + */ + protected String buildConnectionString() { + return getConfig().getTrimmed(KEY_REGISTRY_ZK_QUORUM, + DEFAULT_REGISTRY_ZK_QUORUM); + } + + + /** + * Create an IOE when an operation fails + * @param path path of operation + * @param operation operation attempted + * @param exception caught the exception caught + * @return an IOE to throw that contains the path and operation details. + */ + protected IOException operationFailure(String path, + String operation, + Exception exception) { + return operationFailure(path, operation, exception, null); + } + + + /** + * Create an IOE when an operation fails + * @param path path of operation + * @param operation operation attempted + * @param exception caught the exception caught + * @return an IOE to throw that contains the path and operation details. + */ + protected IOException operationFailure (String path, + String operation, + Exception exception, + List < ACL > acls){ + IOException ioe; + if (exception instanceof KeeperException.NoNodeException) { + ioe = new PathNotFoundException(path); + } else if (exception instanceof KeeperException.NodeExistsException) { + ioe = new FileAlreadyExistsException(path); + } else if (exception instanceof KeeperException.NoAuthException) { + ioe = new PathAccessDeniedException(path); + } else if (exception instanceof KeeperException.NotEmptyException) { + ioe = new PathIsNotEmptyDirectoryException(path); + } else if (exception instanceof KeeperException.AuthFailedException) { + ioe = new AuthenticationFailedException(path, + "Authentication Failed: " + exception, exception); + } else if (exception instanceof KeeperException.NoChildrenForEphemeralsException) { + ioe = new NoChildrenForEphemeralsException(path, + "Cannot create a path under an ephemeral node: " + exception, + exception); + } else if (exception instanceof KeeperException.InvalidACLException) { + // this is a security exception of a kind + // include the ACLs to help the diagnostics + StringBuilder builder = new StringBuilder(); + builder.append(path).append(" ["). + append(RegistrySecurity.aclsToString(acls)); + builder.append("]"); + builder.append(securityConnectionDiagnostics); + ioe = new PathPermissionException(builder.toString()); + } else { + ioe = new RegistryIOException(path, + "Failure of " + operation + " on " + path + ": " + + exception.toString(), + exception); + } + if (ioe.getCause() == null) { + ioe.initCause(exception); + } + + return ioe; + } + + /** + * Create a path if it does not exist. + * The check is poll + create; there's a risk that another process + * may create the same path before the create() operation is executed/ + * propagated to the ZK node polled. + * + * @param path path to create + * @return true iff the path was created + * @throws IOException + */ + @VisibleForTesting + public boolean maybeCreate(String path, CreateMode mode) throws IOException { + List acl = rootACL; + return maybeCreate(path, mode, acl, false); + } + + /** + * Create a path if it does not exist. + * The check is poll + create; there's a risk that another process + * may create the same path before the create() operation is executed/ + * propagated to the ZK node polled. + * + * @param path path to create + * @param acl ACL for path -used when creating a new entry + * @param createParents flag to trigger parent creation + * @return true iff the path was created + * @throws IOException + */ + @VisibleForTesting + public boolean maybeCreate(String path, + CreateMode mode, + List acl, + boolean createParents) throws IOException { + return zkMkPath(path, mode, createParents, acl); + } + + + /** + * Stat the file + * @param path path of operation + * @return a curator stat entry + * @throws IOException on a failure + * @throws PathNotFoundException if the path was not found + */ + public Stat zkStat(String path) throws IOException { + String fullpath = createFullPath(path); + Stat stat; + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Stat {}", fullpath); + } + stat = curator.checkExists().forPath(fullpath); + } catch (Exception e) { + throw operationFailure(fullpath, "read()", e); + } + if (stat == null) { + throw new PathNotFoundException(path); + } + return stat; + } + + public List zkGetACLS(String path) throws IOException { + String fullpath = createFullPath(path); + List acls; + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Stat {}", fullpath); + } + acls = curator.getACL().forPath(fullpath); + } catch (Exception e) { + throw operationFailure(fullpath, "read()", e); + } + if (acls == null) { + throw new PathNotFoundException(path); + } + return acls; + } + + /** + * Poll for a path existing + * @param path path of operation + * @return true if the path was visible from the ZK server + * queried. + * @throws IOException + */ + public boolean zkPathExists(String path) throws IOException { + try { + return zkStat(path) != null; + } catch (PathNotFoundException e) { + return false; + } catch (Exception e) { + throw operationFailure(path, "existence check", e); + } + } + + /** + * Verify a path exists + * @param path path of operation + * @throws PathNotFoundException if the path is absent + * @throws IOException + */ + public String zkPathMustExist(String path) throws IOException { + zkStat(path); + return path; + } + + /** + * Create a directory. It is not an error if it already exists + * @param path path to create + * @param mode mode for path + * @param createParents flag to trigger parent creation + * @param acls ACL for path + * @throws IOException any problem + */ + public boolean zkMkPath(String path, + CreateMode mode, + boolean createParents, + List acls) + throws IOException { + path = createFullPath(path); + if (acls != null && acls.size() == 0) { + throw new PathPermissionException(path + ": empty ACL list"); + } + + try { + RegistrySecurity.AclListInfo aclInfo = + new RegistrySecurity.AclListInfo(acls); + if (LOG.isDebugEnabled()) { + LOG.debug("Creating path {} with mode {} and ACL {}", + path, mode, aclInfo); + } + CreateBuilder createBuilder = curator.create(); + createBuilder.withMode(mode).withACL(acls); + if (createParents) { + createBuilder.creatingParentsIfNeeded(); + } + createBuilder.forPath(path); + + } catch (KeeperException.NodeExistsException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("path already present: {}", path, e); + } + return false; + + } catch (Exception e) { + throw operationFailure(path, "mkdir() ", e, acls); + } + return true; + } + + /** + * Recursively make a path + * @param path path to create + * @param acl ACL for path + * @throws IOException any problem + */ + public void zkMkParentPath(String path, + List acl) throws + IOException { + // split path into elements + + zkMkPath(RegistryPathUtils.parentOf(path), + CreateMode.PERSISTENT, true, acl); + } + + /** + * Create a path with given data. byte[0] is used for a path + * without data + * @param path path of operation + * @param data initial data + * @param acls + * @throws IOException + */ + public void zkCreate(String path, + CreateMode mode, + byte[] data, + List acls) throws IOException { + Preconditions.checkArgument(data != null, "null data"); + String fullpath = createFullPath(path); + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Creating {} with {} bytes ACL {}", fullpath, data.length, + new RegistrySecurity.AclListInfo(acls)); + } + curator.create().withMode(mode).withACL(acls).forPath(fullpath, data); + } catch (Exception e) { + throw operationFailure(fullpath, "create()", e, acls); + } + } + + /** + * Update the data for a path + * @param path path of operation + * @param data new data + * @throws IOException + */ + public void zkUpdate(String path, byte[] data) throws IOException { + Preconditions.checkArgument(data != null, "null data"); + path = createFullPath(path); + try { + LOG.debug("Updating {} with {} bytes", path, data.length); + curator.setData().forPath(path, data); + } catch (Exception e) { + throw operationFailure(path, "update()", e); + } + } + + /** + * Create or update an entry + * @param path path + * @param data data + * @param acl ACL for path -used when creating a new entry + * @param overwrite enable overwrite + * @throws IOException + * @return true if the entry was created, false if it was simply updated. + */ + public boolean zkSet(String path, + CreateMode mode, + byte[] data, + List acl, boolean overwrite) throws IOException { + Preconditions.checkArgument(data != null, "null data"); + if (!zkPathExists(path)) { + zkCreate(path, mode, data, acl); + return true; + } else { + if (overwrite) { + zkUpdate(path, data); + return false; + } else { + throw new FileAlreadyExistsException(path); + } + } + } + + /** + * Delete a directory/directory tree. + * It is not an error to delete a path that does not exist + * @param path path of operation + * @param recursive flag to trigger recursive deletion + * @param backgroundCallback callback; this being set converts the operation + * into an async/background operation. + * task + * @throws IOException on problems other than no-such-path + */ + public void zkDelete(String path, + boolean recursive, + BackgroundCallback backgroundCallback) throws IOException { + String fullpath = createFullPath(path); + try { + LOG.debug("Deleting {}", fullpath); + DeleteBuilder delete = curator.delete(); + if (recursive) { + delete.deletingChildrenIfNeeded(); + } + if (backgroundCallback != null) { + delete.inBackground(backgroundCallback); + } + delete.forPath(fullpath); + } catch (KeeperException.NoNodeException e) { + // not an error + } catch (Exception e) { + throw operationFailure(fullpath, "delete()", e); + } + } + + /** + * List all children of a path + * @param path path of operation + * @return a possibly empty list of children + * @throws IOException + */ + public List zkList(String path) throws IOException { + String fullpath = createFullPath(path); + try { + LOG.debug("ls {}", fullpath); + GetChildrenBuilder builder = curator.getChildren(); + List children = builder.forPath(fullpath); + return children; + } catch (Exception e) { + throw operationFailure(path, "ls()", e); + } + } + + /** + * Read data on a path + * @param path path of operation + * @return the data + * @throws IOException read failure + */ + public byte[] zkRead(String path) throws IOException { + String fullpath = createFullPath(path); + try { + LOG.debug("Reading {}", fullpath); + return curator.getData().forPath(fullpath); + } catch (Exception e) { + throw operationFailure(fullpath, "read()", e); + } + } + + /** + * Return a path dumper instance which can do a full dump + * of the registry tree in its toString() + * operation + * @return a class to dump the registry + */ + @VisibleForTesting + public ZKPathDumper dumpPath() { + return new ZKPathDumper(curator, registryRoot); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/zk/RegistrySecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/zk/RegistrySecurity.java new file mode 100644 index 0000000..eb876c4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/zk/RegistrySecurity.java @@ -0,0 +1,691 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.services.zk; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Splitter; +import com.google.common.collect.Lists; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.util.KerberosUtil; +import org.apache.hadoop.util.ZKUtil; +import org.apache.zookeeper.Environment; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.client.ZooKeeperSaslClient; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Id; +import org.apache.zookeeper.server.auth.DigestAuthenticationProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.login.AppConfigurationEntry; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.ListIterator; +import java.util.Locale; +import java.util.concurrent.CopyOnWriteArrayList; + +import static org.apache.hadoop.yarn.registry.client.services.zk.ZookeeperConfigOptions.*; +import static org.apache.zookeeper.client.ZooKeeperSaslClient.*; +import static org.apache.hadoop.yarn.registry.client.api.RegistryConstants.*; + +/** + * Implement the registry security ... standalone for easier testing + */ +public class RegistrySecurity { + private static final Logger LOG = + LoggerFactory.getLogger(RegistrySecurity.class); + public static final String CLIENT = "Client"; + public static final String SERVER = "Server"; + + /** + * Error raised when the registry is tagged as secure but this + * process doesn't have hadoop security enabled. + */ + public static final String E_NO_KERBEROS = + "Registry security is enabled -but Hadoop security is not enabled"; + private final Configuration conf; + private String idPassword; + private String domain; + private boolean secure; + + /** + * An ACL with read-write access for anyone + */ + public static final ACL ALL_READWRITE_ACCESS = + new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.ANYONE_ID_UNSAFE); + + /** + * An ACL with read access for anyone + */ + public static final ACL ALL_READ_ACCESS = + new ACL(ZooDefs.Perms.READ, ZooDefs.Ids.ANYONE_ID_UNSAFE); + + /** + * An ACL list containing the {@link #ALL_READWRITE_ACCESS} entry. + * It is copy on write so can be shared without worry + */ + public static final List WorldReadWriteACL; + + static { + List acls = new ArrayList(); + acls.add(ALL_READWRITE_ACCESS); + WorldReadWriteACL = new CopyOnWriteArrayList(acls); + } + + /** + * the list of system ACLs + */ + private List systemACLs = new ArrayList(); + + /** + * the default kerberos realm + */ + private String kerberosRealm; + + /** + * Create an instance with no password + */ + public RegistrySecurity(Configuration conf) throws IOException { + this(conf, ""); + } + + /** + * Create an instance + * @param conf config + * @param idPassword id:pass pair. If not empty, this tuple is validated + * @throws IOException on any configuration problem + */ + public RegistrySecurity(Configuration conf, String idPassword) + throws IOException { + this.conf = conf; + + secure = conf.getBoolean(KEY_REGISTRY_SECURE, DEFAULT_REGISTRY_SECURE); + + setIdPassword(idPassword); + } + + /** + * Init security. This triggers extraction and validation + * of both the + * After this operation, the {@link #systemACLs} list is valid. + * @return true if the cluster is secure. + * @throws IOException + */ + public boolean initSecurity() throws + IOException { + if (!UserGroupInformation.isSecurityEnabled()) { + addSystemACL(ALL_READWRITE_ACCESS); + return false; + } + initACLs(); + return true; + } + + /** + * Create a SASL + * @param perms permissions + * @return an ACL for the current user + * @throws IOException + */ + public ACL createSaslACLFromCurrentUser(int perms) throws IOException { + UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); + return createSaslACL(currentUser, perms); + } + + /** + * Given a UGI, create a SASL ACL from it + * @param ugi UGI + * @param perms permissions + * @return a new ACL + */ + public ACL createSaslACL(UserGroupInformation ugi, int perms) { + String userName = ugi.getUserName(); + return new ACL(perms, new Id("sasl", userName)); + } + + /** + * Init the ACLs. + * After this operation, the {@link #systemACLs} list is valid. + * @throws IOException + */ + @VisibleForTesting + public void initACLs() throws IOException { + if (secure) { + + String sysacls = + getOrFail(KEY_REGISTRY_SYSTEM_ACLS, DEFAULT_REGISTRY_SYSTEM_ACLS); + + kerberosRealm = conf.get(KEY_REGISTRY_KERBEROS_REALM, + getDefaultRealmInJVM()); + + systemACLs = + buildACLs(sysacls, kerberosRealm, ZooDefs.Perms.ALL); + addSystemACL(ALL_READ_ACCESS); + } else { + // principal list is empty + systemACLs = WorldReadWriteACL; + } + } + + /** + * Add another system ACL + * @param acl add ACL + */ + public void addSystemACL(ACL acl) { + systemACLs.add(acl); + } + + + /** + * Add an id:password pair + * @param idPasswordPair + * @throws IOException + */ + protected void setIdPassword(String idPasswordPair) throws IOException { + this.idPassword = idPasswordPair; + if (!StringUtils.isEmpty(idPasswordPair)) { + if (!isValid(idPasswordPair)) { + throw new IOException("Invalid id:password: " + idPasswordPair); + } + digest(idPasswordPair); + } + } + + /** + * Flag to indicate the cluster is secure + * @return true if the config enabled security + */ + public boolean isSecure() { + return secure; + } + + /** + * Get the system principals + * @return the system principals + */ + public List getSystemACLs() { + Preconditions.checkNotNull(systemACLs, "registry security is unitialized"); + return Collections.unmodifiableList(systemACLs); + } + + public String getKerberosRealm() { + return kerberosRealm; + } + + /** + * Get a conf option, throw an exception if it is null/empty + * @param key key + * @param defval default value + * @return the value + * @throws IOException if missing + */ + private String getOrFail(String key, String defval) throws IOException { + String val = conf.get(key, defval); + if (StringUtils.isEmpty(val)) { + throw new IOException("Missing value for configuration option " + key); + } + return val; + } + + /** + * Check for an id:password tuple being valid. + * This test is stricter than that in {@link DigestAuthenticationProvider}, + * which splits the string, but doesn't check the contents of each + * half for being non-"". + * @param idPasswordPair id:pass pair + * @return true if the pass is considered valid. + */ + public boolean isValid(String idPasswordPair) { + String parts[] = idPasswordPair.split(":"); + return parts.length == 2 + && !StringUtils.isEmpty(parts[0]) + && !StringUtils.isEmpty(parts[1]); + } + + /** + * Generate a base-64 encoded digest of the idPasswordPair pair + * @param idPasswordPair id:password + * @return a string that can be used for authentication + */ + public String digest(String idPasswordPair) throws + IOException { + if (StringUtils.isEmpty(idPasswordPair) || !isValid(idPasswordPair)) { + throw new IOException("Invalid id:password: " + idPasswordPair); + } + try { + return DigestAuthenticationProvider.generateDigest(idPasswordPair); + } catch (NoSuchAlgorithmException e) { + // unlikely since it is standard to the JVM, but maybe JCE restrictions + // could trigger it + throw new IOException(e.toString(), e); + } + } + + public List splitAclPairs(String aclString, String realm) { + List list = Lists.newArrayList( + Splitter.on(',').omitEmptyStrings().trimResults() + .split(aclString)); + ListIterator listIterator = list.listIterator(); + while (listIterator.hasNext()) { + String next = listIterator.next(); + if (next.startsWith("sasl") && next.endsWith("@")) { + listIterator.set(next + realm); + } + } + return list; + } + + /** + * Parse a string down to an ID, adding a realm if needed + * @param idPair id:data tuple + * @param realm realm to add + * @return the ID. + * @throws IllegalArgumentException if the idPair is invalid + */ + public Id parse(String idPair, String realm) { + int firstColon = idPair.indexOf(':'); + int lastColon = idPair.lastIndexOf(':'); + if (firstColon == -1 || lastColon == -1 || firstColon != lastColon) { + throw new IllegalArgumentException( + "ACL '" + idPair + "' not of expected form scheme:id"); + } + String scheme = idPair.substring(0, firstColon); + String id = idPair.substring(firstColon + 1); + if (id.endsWith("@")) { + Preconditions.checkArgument( + StringUtils.isNotEmpty(realm), + "@ suffixed account but no realm %s", id); + id = id + realm; + } + return new Id(scheme, id); + + } + + /** + * Parse the IDs, adding a realm if needed, setting the permissions + * @param principalList id string + * @param realm realm to add + * @param perms permissions + * @return the relevant ACLs + */ + public List buildACLs(String principalList, String realm, int perms) throws + IOException { + List aclPairs = splitAclPairs(principalList, realm); + List ids = new ArrayList(aclPairs.size()); + for (String aclPair : aclPairs) { + ACL newAcl = new ACL(); + newAcl.setId(parse(aclPair, realm)); + newAcl.setPerms(perms); + ids.add(newAcl); + } + return ids; + } + + + /** + * Parse an ACL list. This includes configuration indirection + * {@link ZKUtil#resolveConfIndirection(String)} + * @param zkAclConf configuration string + * @return an ACL list + * @throws IOException on a bad ACL parse + */ + public List parseACLs(String zkAclConf) throws IOException { + try { + return ZKUtil.parseACLs(ZKUtil.resolveConfIndirection(zkAclConf)); + } catch (ZKUtil.BadAclFormatException e) { + throw new IOException("Parsing " + zkAclConf + " :" + e, e); + } + } + + /** + * JAAS template: {@value} + * Note the semicolon on the last entry + */ + private static final String JAAS_ENTRY = + "%s { \n" + + " com.sun.security.auth.module.Krb5LoginModule required\n" + // kerberos module + + " keyTab=\"%s\"\n" + + " principal=\"%s\"\n" + + " useKeyTab=true\n" + + " useTicketCache=false\n" + + " doNotPrompt=true\n" + + " storeKey=true;\n" + + "}; \n" + ; + + + /** + * Create a JAAS entry for insertion + * @param context context of the entry + * @param principal kerberos principal + * @param keytab keytab + * @return a context + */ + public String createJAASEntry( + String context, + String principal, + File keytab) { + Preconditions.checkArgument(StringUtils.isNotEmpty(principal), + "invalid principal"); + Preconditions.checkArgument(StringUtils.isNotEmpty(context), + "invalid context"); + Preconditions.checkArgument(keytab != null && keytab.isFile(), + "Keytab null or missing: "); + return String.format( + Locale.ENGLISH, + JAAS_ENTRY, + context, + keytab.getAbsolutePath(), + principal); + } + + /** + * Create and save a JAAS config file + * @param dest destination + * @param principal kerberos principal + * @param keytab keytab + * @throws IOException trouble + */ + public void buildJAASFile(File dest, String principal, File keytab) throws + IOException { + StringBuilder jaasBinding = new StringBuilder(256); + jaasBinding.append(createJAASEntry("Server", principal, keytab)); + jaasBinding.append(createJAASEntry("Client", principal, keytab)); + FileUtils.write(dest, jaasBinding.toString()); + } + + /** + * Bind the JVM JAS setting to the specified JAAS file. + * + * Important: once a file has been loaded the JVM doesn't pick up + * changes + * @param jaasFile the JAAS file + */ + public static void bindJVMtoJAASFile(File jaasFile) { + String path = jaasFile.getAbsolutePath(); + LOG.debug("Binding {} to {}", Environment.JAAS_CONF_KEY, path); + System.setProperty(Environment.JAAS_CONF_KEY, path); + } + + /** + * Set the Zookeeper server property + * {@link ZookeeperConfigOptions#PROP_ZK_SASL_SERVER_CONTEXT} + * to the SASL context. When the ZK server starts, this is the context + * which it will read in + * @param contextName the name of the context + */ + public static void bindZKToServerJAASContext(String contextName) { + System.setProperty(PROP_ZK_SASL_SERVER_CONTEXT, contextName); + } + + /** + * Reset any system properties related to JAAS + */ + public static void clearJaasSystemProperties() { + System.clearProperty(Environment.JAAS_CONF_KEY); + } + + /** + * Resolve the context of an entry. This is an effective test of + * JAAS setup, because it will relay detected problems up + * @param context context name + * @return the entry + * @throws FileNotFoundException if there is no context entry found + */ + public static AppConfigurationEntry[] validateContext(String context) throws + FileNotFoundException { + javax.security.auth.login.Configuration configuration = + javax.security.auth.login.Configuration.getConfiguration(); + AppConfigurationEntry[] entries = + configuration.getAppConfigurationEntry(context); + if (entries == null) { + throw new FileNotFoundException( + String.format("Entry \"%s\" not found; " + + "JAAS config = %s", + context, describeProperty(Environment.JAAS_CONF_KEY) )); + } + return entries; + } + + /** + * Set the client properties. This forces the ZK client into + * failing if it can't auth. + * Important:This is JVM-wide. + * @param username username + * @param context login context + * @throws RuntimeException if the context cannot be found in the current + * JAAS context + */ + public static void setZKSaslClientProperties(String username, + String context) throws FileNotFoundException { + RegistrySecurity.validateContext(context); + enableZookeeperSASL(); + System.setProperty(SP_ZK_SASL_CLIENT_USERNAME, username); + System.setProperty(LOGIN_CONTEXT_NAME_KEY, context); + bindZKToServerJAASContext(context); + } + + /** + * Turn ZK SASL on + * Important:This is JVM-wide + */ + protected static void enableZookeeperSASL() { + System.setProperty(ENABLE_CLIENT_SASL_KEY, "true"); + } + + /** + * Clear all the ZK Sasl properties + * Important:This is JVM-wide + */ + public static void clearZKSaslProperties() { + disableZookeeperSASL(); + System.clearProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY); + System.clearProperty(ZookeeperConfigOptions.SP_ZK_SASL_CLIENT_USERNAME); + } + + /** + * Force disable ZK SASL bindings. + * Important:This is JVM-wide + */ + public static void disableZookeeperSASL() { + System.clearProperty(ZooKeeperSaslClient.ENABLE_CLIENT_SASL_KEY); + } + + /** + * Log details about the current Hadoop user at INFO. + * Robust against IOEs when trying to get the current user + */ + public void logCurrentHadoopUser() { + try { + UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); + LOG.info("Current user = {}",currentUser); + UserGroupInformation realUser = currentUser.getRealUser(); + LOG.info("Real User = {}" , realUser); + } catch (IOException e) { + LOG.warn("Failed to get current user {}, {}", e); + } + } + + /** + * Stringify a list of ACLs for logging + * @param acls ACL list + * @return a string for logs, exceptions, ... + */ + public static String aclsToString(List acls) { + StringBuilder builder = new StringBuilder(); + if (acls == null) { + builder.append("null ACL"); + } else { + builder.append('\n'); + for (ACL acl1 : acls) { + builder.append(acl1.toString()).append(" "); + } + } + return builder.toString(); + } + + /** + * Build up low-level security diagnostics to aid debugging + * @return a string to use in diagnostics + */ + public String buildSecurityDiagnostics() { + StringBuilder builder = new StringBuilder(); + builder.append(secure ? "secure registry; " + : "insecure registry; "); + + builder.append("System ACLs: ").append(aclsToString(systemACLs)); + builder.append(UgiInfo.fromCurrentUser()); + builder.append("Kerberos Realm: ").append(kerberosRealm).append(" ; "); + builder.append(describeProperty(Environment.JAAS_CONF_KEY)); + String sasl = + System.getProperty(ENABLE_CLIENT_SASL_KEY, + ENABLE_CLIENT_SASL_DEFAULT); + boolean saslEnabled = Boolean.valueOf(sasl); + builder.append(describeProperty(ENABLE_CLIENT_SASL_KEY, + ENABLE_CLIENT_SASL_DEFAULT)); + if (saslEnabled) { + builder.append(describeProperty(SP_ZK_SASL_CLIENT_USERNAME)); + builder.append(describeProperty(LOGIN_CONTEXT_NAME_KEY)); + } + builder.append(describeProperty(PROP_ZK_ALLOW_FAILED_SASL_CLIENTS, + "(undefined but defaults to true)")); + builder.append(describeProperty( + PROP_ZK_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE)); + return builder.toString(); + } + + + private static String describeProperty(String name) { + return describeProperty(name, "(undefined)"); + } + + private static String describeProperty(String name, String def) { + return "; " + name + "=" + System.getProperty(name, def); + } + + /** + * Get the default kerberos realm —returning "" if there + * is no realm or other problem + * @return the default realm of the system if it + * could be determined + */ + public static String getDefaultRealmInJVM() { + try { + return KerberosUtil.getDefaultRealm(); + // JDK7 + } catch (ClassNotFoundException e) { + + } catch (NoSuchMethodException e) { + + } catch (IllegalAccessException e) { + + } catch (InvocationTargetException e) { + + } + return ""; + } + + /** + * Create an ACL For a user. + * @param user + * @return the ACL For the specified user. Ifthe username doesn't end + * in "@" then the realm is added + */ + public ACL createACLForUser(UserGroupInformation user, int perms) { + LOG.debug("Creating ACL For ", new UgiInfo(user)); + + if (!secure) { + return ALL_READWRITE_ACCESS; + } else { + String username = user.getUserName(); + if (!username.contains("@")) { + username = username + "@" + kerberosRealm; + LOG.debug("Appending kerberos realm to make {}", username); + } + return new ACL(perms, new Id("sasl", username)); + } + } + + + /** + * On demand string-ifier for UGI with extra details + */ + public static class UgiInfo { + + public static UgiInfo fromCurrentUser() { + try { + return new UgiInfo(UserGroupInformation.getCurrentUser()); + } catch (IOException e) { + LOG.info("Failed to get current user {}", e, e); + return new UgiInfo(null); + + } + } + + private final UserGroupInformation ugi; + + public UgiInfo(UserGroupInformation ugi) { + this.ugi = ugi; + } + + + + @Override + public String toString() { + if (ugi==null) { + return "(null ugi)"; + } + StringBuilder builder = new StringBuilder(); + builder.append(ugi.getUserName()).append(": "); + builder.append(ugi.toString()); + builder.append(" hasKerberosCredentials=").append( + ugi.hasKerberosCredentials()); + builder.append(" isFromKeytab=").append(ugi.isFromKeytab()); + builder.append(" kerberos is enabled in Hadoop =").append(UserGroupInformation.isSecurityEnabled()); + + return builder.toString(); + } + + } + + public static class AclListInfo { + final List acls; + + + public AclListInfo(List acls) { + this.acls = acls; + } + + @Override + public String toString() { + return aclsToString(acls); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/zk/ZookeeperConfigOptions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/zk/ZookeeperConfigOptions.java new file mode 100644 index 0000000..3de7d00 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/services/zk/ZookeeperConfigOptions.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.services.zk; + +import org.apache.zookeeper.client.ZooKeeperSaslClient; +import org.apache.zookeeper.server.ZooKeeperSaslServer; + +/** + * Some ZK-internal configuration options which + * are usually set via system properties. + */ +public interface ZookeeperConfigOptions { + + /** + * This is a property which must be set to enable secure clients + */ + String PROP_ZK_ENABLE_SASL_CLIENT = ZooKeeperSaslClient.ENABLE_CLIENT_SASL_KEY; + + + /** + * Set this to the short name of the client, e.g, "user", + * not "user/host", or "user/host@realm" + */ + String SP_ZK_SASL_CLIENT_USERNAME = "zookeeper.sasl.client.username"; + + /** + * Set this to the short name of the client: {@value} + */ + String PROP_ZK_SASL_SERVER_CONTEXT = ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY; + + /** + * Should ZK downgrade on an auth failure? + */ + String PROP_ZK_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE = + "zookeeper.maintain_connection_despite_sasl_failure"; + + /** + * Allow failed SASL clients + */ + String PROP_ZK_ALLOW_FAILED_SASL_CLIENTS = + "zookeeper.allowSaslFailedClients"; + + String PROP_ZK_SERVER_REALM = "zookeeper.server.realm"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/AddressTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/AddressTypes.java new file mode 100644 index 0000000..6251936 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/AddressTypes.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.types; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Enum of address types -as integers. + * Why integers and not enums? Cross platform serialization as JSON + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface AddressTypes { + + /** + * Any other address + */ + public static final String ADDRESS_OTHER = ""; + + /** + * URI entries + */ + public static final String ADDRESS_URI = "uri"; + + /** + * hostname/port. A "/" is used so that IPv6 addresses can be + * easily split from the port declaration. + */ + public static final String ADDRESS_HOSTNAME_AND_PORT = "host/port"; + + /** + * path /a/b/c style + */ + public static final String ADDRESS_PATH = "path"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/Endpoint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/Endpoint.java new file mode 100644 index 0000000..8a47419 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/Endpoint.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.types; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.registry.client.binding.RegistryTypeUtils; +import org.codehaus.jackson.annotate.JsonIgnoreProperties; +import org.codehaus.jackson.map.annotate.JsonSerialize; + +import java.net.URI; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +/** + * Description of a single service/component endpoint. + * It is designed to be marshalled as JSON + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) +public class Endpoint { + + /** + * API implemented at the end of the binding + */ + public String api; + + /** + * Type of address. The standard types are defined in + * {@link AddressTypes} + */ + public String addressType; + + /** + * Protocol type. Some standard types are defined in + * {@link ProtocolTypes} + */ + public String protocolType; + + /** + * a list of address tuples —tuples whose format depends on the address type + */ + public List> addresses; + + public Endpoint() { + } + + + /** + * Build an endpoint with a list of addresses + * @param api API name + * @param addressType address type + * @param protocolType protocol type + * @param addrs addresses + */ + public Endpoint(String api, + String addressType, + String protocolType, + List> addrs) { + this.api = api; + this.addressType = addressType; + this.protocolType = protocolType; + this.addresses = new ArrayList>(); + if (addrs != null) { + addresses.addAll(addrs); + } + } + + /** + * Build an endpoint from a list of URIs; each URI + * is ASCII-encoded and added to the list of addresses. + * @param api API name + * @param protocolType protocol type + * @param uris URIs to convert to a list of tup;les + */ + public Endpoint(String api, + String protocolType, + URI... uris) { + this.api = api; + this.addressType = AddressTypes.ADDRESS_URI; + + this.protocolType = protocolType; + List> addrs = new ArrayList>(uris.length); + for (URI uri : uris) { + addrs.add(RegistryTypeUtils.tuple(uri.toString())); + } + this.addresses = addrs; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder("Endpoint{"); + sb.append("api='").append(api).append('\''); + sb.append(", addressType='").append(addressType).append('\''); + sb.append(", protocolType='").append(protocolType).append('\''); + if (addresses != null) { + sb.append(", address count=").append(addresses.size()); + } else { + sb.append(", null address list="); + } + sb.append('}'); + return sb.toString(); + } + + /** + * Validate the record by checking for null fields + */ + public void validate() { + Preconditions.checkNotNull(api, "null API field"); + Preconditions.checkNotNull(addressType, "null addressType field"); + Preconditions.checkNotNull(protocolType, "null protocolType field"); + Preconditions.checkNotNull(addresses, "null addresses field"); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/PersistencePolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/PersistencePolicies.java new file mode 100644 index 0000000..c63aa27 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/PersistencePolicies.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.types; + +/** + * Persistence policies for {@link ServiceRecord#persistence} + */ + +public interface PersistencePolicies { + + /** + * The record persists until removed manually: {@value}. + */ + int PERMANENT = 0; + + /** + * Remove when the YARN cluster is restarted: {@value}. + * This does not mean on HA failover; it means after a cluster stop/start. + */ + int CLUSTER_RESTART = 1; + + /** + * Remove when the YARN application defined in the id field + * terminates: {@value}. + */ + int APPLICATION = 2; + + /** + * Remove when the current YARN application attempt ID finishes: {@value}. + */ + int APPLICATION_ATTEMPT = 3; + + /** + * Remove when the YARN container in the ID field finishes. + */ + int CONTAINER = 4; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/ProtocolTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/ProtocolTypes.java new file mode 100644 index 0000000..09fec3b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/ProtocolTypes.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.types; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * some common protocol types + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface ProtocolTypes { + + String PROTOCOL_HADOOP_IPC = "hadoop/IPC"; + String PROTOCOL_HADOOP_IPC_PROTOBUF = "hadoop/protobuf"; + String PROTOCOL_IIOP = "IIOP"; + String PROTOCOL_REST = "REST"; + String PROTOCOL_RMI = "RMI"; + String PROTOCOL_SUN_RPC = "sunrpc"; + String PROTOCOL_THRIFT = "thrift"; + String PROTOCOL_TCP = "tcp"; + String PROTOCOL_UDP = "udp"; + String PROTOCOL_UNKNOWN = ""; + String PROTOCOL_WEBUI = "webui"; + String PROTOCOL_WSAPI = "WS"; + String PROTOCOL_ZOOKEEPER_BINDING = "zookeeper"; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/RegistryPathStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/RegistryPathStatus.java new file mode 100644 index 0000000..04e5136 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/RegistryPathStatus.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.types; + + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Output of a stat() call + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class RegistryPathStatus { + + /** + * Path in the registry to this entry + */ + public final String path; + public final long time; + public final long size; + public final int children; + + + public RegistryPathStatus(String path, + long time, + long size, int children) { + this.path = path; + this.time = time; + this.size = size; + this.children = children; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + RegistryPathStatus status = (RegistryPathStatus) o; + + if (size != status.size) { + return false; + } + if (time != status.time) { + return false; + } + if (path != null ? !path.equals(status.path) : status.path != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return path != null ? path.hashCode() : 0; + } + + @Override + public String toString() { + final StringBuilder sb = + new StringBuilder("RegistryPathStatus{"); + sb.append("path='").append(path).append('\''); + sb.append(", time=").append(time); + sb.append(", size=").append(size); + sb.append(", children=").append(children); + sb.append('}'); + return sb.toString(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/ServiceRecord.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/ServiceRecord.java new file mode 100644 index 0000000..0f0be04 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/ServiceRecord.java @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.types; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.codehaus.jackson.annotate.JsonAnySetter; +import org.codehaus.jackson.annotate.JsonIgnore; +import org.codehaus.jackson.annotate.JsonIgnoreProperties; +import org.codehaus.jackson.annotate.JsonProperty; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + + +/** + * JSON-marshallable description of a single component. + * It supports the deserialization of unknown attributes, but does + * not support their creation. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ServiceRecord { + + /** + * Attribute name of the yarn persistence option: {@value} + */ + public static final String YARN_PERSISTENCE = "yarn:persistence"; + + /** + * Attribute name of the yarn ID option used with yarn:persistence: {@value} + */ + public static final String YARN_ID = "yarn:id"; + + /** + * The time the service was registered -as seen by the service making + * the registration request. + */ + public long registrationTime; + + /** + * Description string + */ + public String description; + + /** + * ID. For containers: container ID. For application instances, application ID. + */ + @JsonProperty(YARN_ID) + public String id; + + /** + * The persistence attribute defines when a record and any child + * entries may be deleted. + * {@link PersistencePolicies} + */ + @JsonProperty(YARN_PERSISTENCE) + public int persistence = PersistencePolicies.PERMANENT; + + /** + * map to handle unknown attributes. + */ + @JsonIgnore + private Map otherAttributes = new HashMap(4); + + /** + * List of endpoints intended to of use to external callers + */ + public List external = new ArrayList(); + + /** + * List of internal endpoints + */ + public List internal = new ArrayList(); + + /** + * A string of arbitrary data. This should only be used for small amounts of + * data related to the binding ... any large amounts of data + * should be published by registered service endpoints. + */ + public String data; + + /** + * Create a service record with no ID, description or registration time. + * Endpoint lists are set to empty lists. + */ + public ServiceRecord() { + } + + /** + * Create a service record ... sets the registration time to the current + * system time. + * @param id service ID + * @param description description + * @param persistence persistence policy + * @param data a small amount of data to be associated with the record + */ + public ServiceRecord(String id, + String description, + int persistence, + String data) { + this.id = id; + this.description = description; + this.persistence = persistence; + this.registrationTime = System.currentTimeMillis(); + this.data = data; + } + + /** + * Add an external endpoint + * @param endpoint endpoint to set + */ + public void addExternalEndpoint(Endpoint endpoint) { + Preconditions.checkArgument(endpoint != null); + endpoint.validate(); + external.add(endpoint); + } + + /** + * Add an internal endpoint + * @param endpoint endpoint to set + */ + public void addInternalEndpoint(Endpoint endpoint) { + Preconditions.checkArgument(endpoint != null); + endpoint.validate(); + internal.add(endpoint); + } + + /** + * Look up an internal endpoint + * @param api API + * @return the endpoint or null if there was no match + */ + public Endpoint getInternalEndpoint(String api) { + return findByAPI(internal, api); + } + + /** + * Look up an external endpoint + * @param api API + * @return the endpoint or null if there was no match + */ + public Endpoint getExternalEndpoint(String api) { + return findByAPI(external, api); + } + + /** + * Handle unknown attributes by storing them in the + * {@link #otherAttributes} map + * @param key attribute name + * @param value attribute value. + */ + @JsonAnySetter + public void setOtherAttribute(String key, Object value) { + otherAttributes.put(key, value); + } + + /** + * The map of "other" attributes set when parsing. These + * are not included in the JSON value of this record when it + * is generated. + * @return a map of any unknown attributes in the deserialized JSON. + */ + @JsonIgnore + public Map getOtherAttributes() { + return otherAttributes; + } + + /** + * Find an endpoint by its API + * @param list list + * @param api api name + * @return the endpoint or null if there was no match + */ + private Endpoint findByAPI(List list, String api) { + for (Endpoint endpoint : list) { + if (endpoint.api.equals(api)) { + return endpoint; + } + } + return null; + } + + @Override + public String toString() { + final StringBuilder sb = + new StringBuilder("ServiceRecord{"); + sb.append("id='").append(id).append('\''); + sb.append(", persistence='").append(persistence).append('\''); + sb.append(", description='").append(description).append('\''); + sb.append(", external endpoints: {"); + for (Endpoint endpoint : external) { + sb.append(endpoint).append("; "); + } + sb.append("}, internal endpoints: {"); + for (Endpoint endpoint : internal) { + sb.append(endpoint).append("; "); + } + + sb.append('}'); + sb.append('}'); + return sb.toString(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/ServiceRecordHeader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/ServiceRecordHeader.java new file mode 100644 index 0000000..76ab562 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/client/types/ServiceRecordHeader.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.types; + +/** + * Service record header; access to the byte array kept private + * to avoid findbugs warnings of mutability + */ +public class ServiceRecordHeader { + /** + * Header of a service record: "jsonservicrec" + * By making this over 12 bytes long, we can auto-determine which entries + * in a listing are too short to contain a record without getting their data + */ + private static final byte[] RECORD_HEADER = { + 'j', 's', 'o', 'n', + 's', 'e', 'r', 'v', 'i', 'c', 'e', + 'r', 'e', 'c' + }; + + /** + * Get the length of the record header + * @return the header length + */ + public static int getLength() { + return RECORD_HEADER.length; + } + + /** + * Get a clone of the record header + * @return the new record header. + */ + public static byte[] getData() { + byte[] h = new byte[RECORD_HEADER.length]; + System.arraycopy(RECORD_HEADER, 0, h, 0, RECORD_HEADER.length); + return h; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/AddingCompositeService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/AddingCompositeService.java new file mode 100644 index 0000000..5add965 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/AddingCompositeService.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.server.services; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.service.Service; + +/** + * Composite service that exports the add/remove methods + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class AddingCompositeService extends CompositeService { + + + public AddingCompositeService(String name) { + super(name); + } + + @Override + public void addService(Service service) { + super.addService(service); + } + + @Override + public boolean removeService(Service service) { + return super.removeService(service); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/MicroZookeeperService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/MicroZookeeperService.java new file mode 100644 index 0000000..9dde793 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/MicroZookeeperService.java @@ -0,0 +1,282 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.server.services; + +import com.google.common.base.Preconditions; +import org.apache.commons.lang.StringUtils; +import org.apache.curator.ensemble.fixed.FixedEnsembleProvider; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.client.services.BindingInformation; +import org.apache.hadoop.yarn.registry.client.services.RegistryBindingSource; +import org.apache.hadoop.yarn.registry.client.services.RegistryInternalConstants; +import org.apache.hadoop.yarn.registry.client.services.zk.RegistrySecurity; +import org.apache.hadoop.yarn.registry.client.services.zk.ZookeeperConfigOptions; +import org.apache.zookeeper.server.ServerCnxnFactory; +import org.apache.zookeeper.server.ZooKeeperServer; +import org.apache.zookeeper.server.persistence.FileTxnSnapLog; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; + +/** + * This is a small, localhost Zookeeper service instance that is contained + * in a YARN service...it's been derived from Apache Twill. + * + * It implements {@link RegistryBindingSource} and provides binding information, + * once started. Until start() is called, the hostname & + * port may be undefined. Accordingly, the service raises an exception in this + * condition. + * + * If you wish to chain together a registry service with this one under + * the same CompositeService, this service must be added + * as a child first. + * + * It also sets the configuration parameter + * {@link RegistryConstants#KEY_REGISTRY_ZK_QUORUM} + * to its connection string. Any code with access to the service configuration + * can view it. + */ +@InterfaceStability.Evolving +public class MicroZookeeperService + extends AbstractService + implements RegistryBindingSource, RegistryConstants, + ZookeeperConfigOptions, + MicroZookeeperServiceKeys{ + + + private static final Logger + LOG = LoggerFactory.getLogger(MicroZookeeperService.class); + + + private File instanceDir; + private File dataDir; + private int tickTime; + private int port; + private String host; + private boolean secureServer; + + private ServerCnxnFactory factory; + private BindingInformation binding; + private File confDir; + private StringBuilder diagnostics = new StringBuilder(); + + /** + * Create an instance + * @param name service name + */ + public MicroZookeeperService(String name) { + super(name); + } + + /** + * Get the connection string. + * @return the string + * @throws IllegalStateException if the connection is not yet valid + */ + public String getConnectionString() { + Preconditions.checkState(factory != null, "service not started"); + InetSocketAddress addr = factory.getLocalAddress(); + return String.format("%s:%d", addr.getHostName(), addr.getPort()); + } + + /** + * Get the connection address + * @return the connection as an address + * @throws IllegalStateException if the connection is not yet valid + */ + public InetSocketAddress getConnectionAddress() { + Preconditions.checkState(factory != null, "service not started"); + return factory.getLocalAddress(); + } + + /** + * Create an inet socket addr from the local host + port number + * @param port port to use + * @return a (hostname, port) pair + * @throws UnknownHostException if the server cannot resolve the host + */ + private InetSocketAddress getAddress(int port) throws UnknownHostException { + return new InetSocketAddress(host, port < 0 ? 0 : port); + } + + /** + * Initialize the service, including choosing a path for the data + * @param conf configuration + * @throws Exception + */ + @Override + protected void serviceInit(Configuration conf) throws Exception { + port = conf.getInt(KEY_ZKSERVICE_PORT, 0); + tickTime = conf.getInt(KEY_ZKSERVICE_TICK_TIME, + ZooKeeperServer.DEFAULT_TICK_TIME); + String instancedirname = conf.getTrimmed( + KEY_ZKSERVICE_DIR, ""); + host = conf.getTrimmed(KEY_ZKSERVICE_HOST, DEFAULT_ZKSERVICE_HOST); + if (instancedirname.isEmpty()) { + File testdir = new File(System.getProperty("test.dir", "target")); + instanceDir = new File(testdir, "zookeeper" + getName()); + } else { + instanceDir = new File(instancedirname); + FileUtil.fullyDelete(instanceDir); + } + LOG.debug("Instance directory is {}", instanceDir); + mkdirStrict(instanceDir); + dataDir = new File(instanceDir, "data"); + confDir = new File(instanceDir, "conf"); + mkdirStrict(dataDir); + mkdirStrict(confDir); + super.serviceInit(conf); + } + + /** + * Create a directory, ignoring if the dir is already there, + * and failing if a file or something else was at the end of that + * path + * @param dir dir to guarantee the existence of + * @throws IOException IO problems, or path exists but is not a dir + */ + private void mkdirStrict(File dir) throws IOException { + if (!dir.mkdirs()) { + if (!dir.isDirectory()) { + throw new IOException("Failed to mkdir " + dir); + } + } + } + + /** + * Add a formatted string to the diagnostics + * @param text + * @param args + */ + protected void addDiagnostics(String text, Object ... args) { + diagnostics.append(String.format(text, args)).append('\n'); + } + + /** + * Get the diagnostics info + * @return the diagnostics string built up + */ + public String getDiagnostics() { + return diagnostics.toString(); + } + + /** + * set up security. this must be done prior to creating + * the ZK instance, as it sets up JAAS if that has not been done already. + * + * @return true if the cluster has security enabled. + */ + public boolean setupSecurity() throws IOException { + Configuration conf = getConfig(); + String jaasContext = conf.getTrimmed(KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT); + secureServer = StringUtils.isNotEmpty(jaasContext); + if (secureServer) { + RegistrySecurity.validateContext(jaasContext); + RegistrySecurity.bindZKToServerJAASContext(jaasContext); + // policy on failed auth + System.setProperty(PROP_ZK_ALLOW_FAILED_SASL_CLIENTS, + conf.get(KEY_ZKSERVICE_ALLOW_FAILED_SASL_CLIENTS, + "true")); + + //needed so that you can use sasl: strings in the registry + System.setProperty(RegistryInternalConstants.ZOOKEEPER_AUTH_PROVIDER +".1", + RegistryInternalConstants.SASLAUTHENTICATION_PROVIDER); + addDiagnostics(new RegistrySecurity(conf).buildSecurityDiagnostics()); + String serverContext = + System.getProperty(PROP_ZK_SASL_SERVER_CONTEXT); + addDiagnostics("Server JAAS context s = %s", serverContext); + return true; + } else { + return false; + } + } + + /** + * Startup: start ZK. It is only after this that + * the binding information is valid. + * @throws Exception + */ + @Override + protected void serviceStart() throws Exception { + + setupSecurity(); + + ZooKeeperServer zkServer = new ZooKeeperServer(); + FileTxnSnapLog ftxn = new FileTxnSnapLog(dataDir, dataDir); + zkServer.setTxnLogFactory(ftxn); + zkServer.setTickTime(tickTime); + + LOG.info("Starting Local Zookeeper service"); + factory = ServerCnxnFactory.createFactory(); + factory.configure(getAddress(port), -1); + factory.startup(zkServer); + + String connectString = getConnectionString(); + LOG.info("In memory ZK started at {}\n", connectString); + + if (LOG.isDebugEnabled()) { + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + zkServer.dumpConf(pw); + pw.flush(); + LOG.debug(sw.toString()); + } + binding = new BindingInformation(); + binding.ensembleProvider = new FixedEnsembleProvider(connectString); + binding.description = + getName() + " reachable at \"" + connectString + "\""; + + addDiagnostics(binding.description); + // finally: set the binding information in the config + getConfig().set(KEY_REGISTRY_ZK_QUORUM, connectString); + } + + /** + * When the service is stopped, it deletes the data directory + * and its contents + * @throws Exception + */ + @Override + protected void serviceStop() throws Exception { + if (factory != null) { + factory.shutdown(); + factory = null; + } + if (dataDir != null) { + FileUtil.fullyDelete(dataDir); + } + } + + @Override + public BindingInformation supplyBindingInformation() { + Preconditions.checkNotNull(binding, + "Service is not started: binding information undefined"); + return binding; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/MicroZookeeperServiceKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/MicroZookeeperServiceKeys.java new file mode 100644 index 0000000..a2bd29b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/MicroZookeeperServiceKeys.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.server.services; + +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; + +/** + * Service keys for configuring the ZK service. + * These are not used in registry clients or the RM-side service, + * so are kept separate. + */ +public interface MicroZookeeperServiceKeys { + public static final String ZKSERVICE_PREFIX = + RegistryConstants.REGISTRY_PREFIX + "zk.service."; + /** + * Key to define the JAAS context for the ZK service + */ + public static final String KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT = + ZKSERVICE_PREFIX + "service.jaas.context"; + + /** + * ZK servertick time: {@value} + */ + public static final String KEY_ZKSERVICE_TICK_TIME = + ZKSERVICE_PREFIX + "ticktime"; + + /** + * host to register on + */ + public static final String KEY_ZKSERVICE_HOST = ZKSERVICE_PREFIX + "host"; + /** + * Default host to serve on -this is "localhost" as it + * is the only one guaranteed to be available. + */ + public static final String DEFAULT_ZKSERVICE_HOST = "localhost"; + /** + * port; 0 or below means "any": {@value} + */ + public static final String KEY_ZKSERVICE_PORT = ZKSERVICE_PREFIX + "port"; + + /** + * Directory containing data: {@value} + */ + public static final String KEY_ZKSERVICE_DIR = ZKSERVICE_PREFIX + ".dir"; + + /** + * Should failed SASL clients be allowed: {@value}? + * + * Default is the ZK default: true + */ + public static final String KEY_ZKSERVICE_ALLOW_FAILED_SASL_CLIENTS = + ZKSERVICE_PREFIX + ".allow.failed.sasl.clients"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/RMRegistryOperationsService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/RMRegistryOperationsService.java new file mode 100644 index 0000000..1a8d0a8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/yarn/registry/server/services/RMRegistryOperationsService.java @@ -0,0 +1,528 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.server.services; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.commons.lang.StringUtils; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.api.BackgroundCallback; +import org.apache.curator.framework.api.CuratorEvent; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.registry.client.binding.BindingUtils; +import org.apache.hadoop.yarn.registry.client.exceptions.InvalidRecordException; +import org.apache.hadoop.yarn.registry.client.services.RegistryBindingSource; +import org.apache.hadoop.yarn.registry.client.services.RegistryOperationsService; +import org.apache.hadoop.yarn.registry.client.services.zk.RegistrySecurity; +import org.apache.hadoop.yarn.registry.client.types.PersistencePolicies; +import org.apache.hadoop.yarn.registry.client.types.RegistryPathStatus; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecord; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.data.ACL; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.io.EOFException; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Extends the registry operations with extra support for resource management + * operations, including creating and cleaning up the registry. + * + * These actions are all implemented as event handlers to operations + * which come from the RM. + * + * This service is expected to be executed by a user with the permissions + * to manipulate the entire registry, + */ +@InterfaceAudience.LimitedPrivate("YARN") +@InterfaceStability.Evolving +public class RMRegistryOperationsService extends RegistryOperationsService { + private static final Logger LOG = + LoggerFactory.getLogger(RMRegistryOperationsService.class); + + private final ExecutorService executor; + + private PurgePolicy purgeOnCompletionPolicy = PurgePolicy.PurgeAll; + + public RMRegistryOperationsService(String name) { + this(name, null); + } + + public RMRegistryOperationsService(String name, + RegistryBindingSource bindingSource) { + super(name, bindingSource); + + + executor = Executors.newCachedThreadPool( + new ThreadFactory() { + AtomicInteger counter = new AtomicInteger(1); + + @Override + public Thread newThread(Runnable r) { + return new Thread(r, + "RegistryOperations " + counter.getAndIncrement()); + } + }); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + super.serviceInit(conf); + RegistrySecurity registrySecurity = getRegistrySecurity(); + if (registrySecurity.isSecure()) { + ACL sasl = registrySecurity.createSaslACLFromCurrentUser(ZooDefs.Perms.ALL); + registrySecurity.addSystemACL(sasl); + LOG.info("Registry System ACLs:", + RegistrySecurity.aclsToString( + registrySecurity.getSystemACLs())); + } + + } + + /** + * Start the service, including creating base directories with permissions + * @throws Exception + */ + @Override + protected void serviceStart() throws Exception { + super.serviceStart(); + + // create the root directories + createRootRegistryPaths(); + } + + /** + * Stop the service: halt the executor. + * @throws Exception exception. + */ + @Override + protected void serviceStop() throws Exception { + stopExecutor(); + super.serviceStop(); + } + + /** + * Stop the executor if it is not null. + * This uses {@link ExecutorService#shutdownNow()} + * and so does not block until they have completed. + */ + protected synchronized void stopExecutor() { + if (executor != null) { + executor.shutdownNow(); + } + } + + /** + * Get the executor + * @return the executor + */ + protected ExecutorService getExecutor() { + return executor; + } + + /** + * Create the initial registry paths + * @throws IOException any failure + */ + @VisibleForTesting + public void createRootRegistryPaths() throws IOException { + // create the root directories + + systemACLs = getRegistrySecurity().getSystemACLs(); + LOG.info("System ACLs {}", + RegistrySecurity.aclsToString(systemACLs)); + + maybeCreate("", CreateMode.PERSISTENT, systemACLs, false); + maybeCreate(PATH_USERS, CreateMode.PERSISTENT, + systemACLs, false); + maybeCreate(PATH_SYSTEM_SERVICES, + CreateMode.PERSISTENT, + systemACLs, false); + } + + + /** + * Create the path for a user + * @param username username + * @throws IOException any failure + */ + @VisibleForTesting + public void initUserRegistryAsync(final String username) throws IOException { + submit(new Callable() { + @Override + public Object call() throws Exception { + initUserRegistry(username); + return null; + } + }); + } + + /** + * Perform the actual initialization actions + * @param username + * @throws IOException + */ + public void initUserRegistry(String username) throws IOException { + String path = homeDir(username); + maybeCreate(path, CreateMode.PERSISTENT, + createAclForUser(username), false); + } + + + /** + * Get the path to a user's home dir + * @param username username + * @return a path for services underneath + */ + protected String homeDir(String username) { + return BindingUtils.userPath(username); + } + + /** + * Set up the ACL for the user. + * Important: this must run client-side as it needs + * to know the id:pass tuple for a user + * @param username user name + * @return an ACL list + * @throws IOException ACL creation/parsing problems + */ + private List createAclForUser(String username) throws IOException { + // todo, make more specific for that user. + // + return getUserAcl(); + } + + public PurgePolicy getPurgeOnCompletionPolicy() { + return purgeOnCompletionPolicy; + } + + public void setPurgeOnCompletionPolicy(PurgePolicy purgeOnCompletionPolicy) { + this.purgeOnCompletionPolicy = purgeOnCompletionPolicy; + } + + public void onApplicationAttemptRegistered(ApplicationAttemptId attemptId, + String host, int rpcport, String trackingurl) throws IOException { + + } + + public void onApplicationLaunched(ApplicationId id) throws IOException { + + } + + /** + * Actions to take as an AM registers itself with the RM. + * @param attemptId attempt ID + * @throws IOException problems + */ + public void onApplicationMasterRegistered(ApplicationAttemptId attemptId) throws + IOException { + } + + /** + * Actions to take when the AM container is completed + * @param containerId container ID + * @throws IOException problems + */ + public void onAMContainerFinished(ContainerId containerId) throws + IOException { + LOG.info("AM Container {} finished, purging application attempt records", + containerId); + + // remove all application attempt entries + purgeAppAttemptRecords(containerId.getApplicationAttemptId()); + + // also treat as a container finish to remove container + // level records for the AM container + onContainerFinished(containerId); + } + + /** + * remove all application attempt entries + * @param attemptId attempt ID + */ + protected void purgeAppAttemptRecords(ApplicationAttemptId attemptId) { + purgeRecordsAsync("/", + attemptId.toString(), + PersistencePolicies.APPLICATION_ATTEMPT); + } + + /** + * Actions to take when an application attempt is completed + * @param attemptId application ID + * @throws IOException problems + */ + public void onApplicationAttemptUnregistered(ApplicationAttemptId attemptId) + throws IOException { + LOG.info("Application attempt {} unregistered, purging app attempt records", + attemptId); + purgeAppAttemptRecords(attemptId); + + } + + + /** + * Actions to take when an application is completed + * @param id application ID + * @throws IOException problems + */ + public void onApplicationCompleted(ApplicationId id) + throws IOException { + LOG.info("Application {} completed, purging application-level records", + id); + purgeRecordsAsync("/", + id.toString(), + PersistencePolicies.APPLICATION); + } + + public void onApplicationAttemptAdded(ApplicationAttemptId appAttemptId) { + } + + /** + * This is the event where the user is known, so the user directory + * can be created + * @param applicationId application ID + * @param user username + * @throws IOException problems + */ + public void onStateStoreEvent(ApplicationId applicationId, String user) throws + IOException { + initUserRegistryAsync(user); + } + + + /** + * Actions to take when the AM container is completed + * @param id container ID + * @throws IOException problems + */ + public void onContainerFinished(ContainerId id) throws IOException { + LOG.info("Container {} finished, purging container-level records", + id); + purgeRecordsAsync("/", + id.toString(), + PersistencePolicies.CONTAINER); + } + + /** + * Policy to purge entries + */ + public enum PurgePolicy { + PurgeAll, + FailOnChildren, + SkipOnChildren + } + + /** + * Queue an async operation to purge all matching records under a base path. + *
    + *
  1. Uses a depth first search
  2. + *
  3. A match is on ID and persistence policy, or, if policy==-1, any match
  4. + *
  5. If a record matches then it is deleted without any child searches
  6. + *
  7. Deletions will be asynchronous if a callback is provided
  8. + *
+ * @param path base path + * @param id ID for service record.id + * @param persistencePolicyMatch ID for the persistence policy to match: + * no match, no delete. + * @return a future that returns the #of records deleted + */ + @VisibleForTesting + public Future purgeRecordsAsync(String path, + String id, + int persistencePolicyMatch) { + + LOG.info(" records under {} with ID {} and policy {}: {}", + path, id, persistencePolicyMatch); + return submit(new AsyncPurgeRegistry(path, id, persistencePolicyMatch, + new DeleteCompletionCallback())); + } + + /** + * Recursive operation to purge all matching records under a base path. + *
    + *
  1. Uses a depth first search
  2. + *
  3. A match is on ID and persistence policy, or, if policy==-1, any match
  4. + *
  5. If a record matches then it is deleted without any child searches
  6. + *
  7. Deletions will be asynchronous if a callback is provided
  8. + *
+ * + * @param path base path + * @param id ID for service record.id + * @param persistencePolicyMatch ID for the persistence policy to match: no match, no delete. + * If set to to -1 or below, " don't check" + * @param purgePolicy what to do if there is a matching record with children + * @return the number of calls to the zkDelete() operation. This is purely for + * testing. + * @throws IOException problems + * @throws PathIsNotEmptyDirectoryException if an entry cannot be deleted + * as his children and the purge policy is FailOnChildren + */ + @VisibleForTesting + public int purgeRecords(String path, + String id, + int persistencePolicyMatch, + PurgePolicy purgePolicy, + BackgroundCallback callback) throws IOException { + Preconditions.checkArgument(StringUtils.isNotEmpty(path), + "Empty 'path' argument"); + Preconditions.checkArgument(StringUtils.isNotEmpty(id), + "Empty 'id' argument"); + + // list this path's children + RegistryPathStatus[] entries = list(path); + + boolean toDelete = false; + // look at self to see if it has a service record + try { + ServiceRecord serviceRecord = resolve(path); + // there is now an entry here. + toDelete = serviceRecord.id.equals(id) + && (persistencePolicyMatch < 0 + || serviceRecord.persistence == persistencePolicyMatch); + } catch (EOFException ignored) { + // ignore + } catch (InvalidRecordException ignored) { + // ignore + } + + if (toDelete && entries.length > 0) { + if (LOG.isDebugEnabled()) { + LOG.debug("Match on record @ {} with children ", path); + } + // there's children + switch (purgePolicy) { + case SkipOnChildren: + // don't do the deletion... continue to next record + toDelete = false; + break; + case PurgeAll: + // mark for deletion + toDelete = true; + entries = new RegistryPathStatus[0]; + break; + case FailOnChildren: + throw new PathIsNotEmptyDirectoryException(path); + } + } + + int deleteOps = 0; + + if (toDelete) { + deleteOps++; + zkDelete(path, true, callback); + } + + // now go through the children + for (RegistryPathStatus status : entries) { + deleteOps += purgeRecords(status.path, + id, + persistencePolicyMatch, + purgePolicy, + callback); + } + + return deleteOps; + } + + + /** + * Callback for delete operations completing + */ + protected static class DeleteCompletionCallback implements BackgroundCallback { + @Override + public void processResult(CuratorFramework client, + CuratorEvent event) throws + Exception { + if (LOG.isDebugEnabled()) { + LOG.debug("Delete event {}", event); + } + } + } + + /** + * Submit a callable + * @param callable callable + * @param type of the final get + * @return a future to wait on + */ + public Future submit(Callable callable) { + LOG.debug("Submitting {}", callable); + return getExecutor().submit(callable); + } + + /** + * An async registry action + */ + private class AsyncPurgeRegistry implements Callable { + + final BackgroundCallback callback; + private final String path; + private final String id; + private final int persistencePolicyMatch; + + private AsyncPurgeRegistry(String path, + String id, + int persistencePolicyMatch, + BackgroundCallback callback) { + this.path = path; + this.id = id; + this.persistencePolicyMatch = persistencePolicyMatch; + this.callback = callback; + } + + @Override + public Integer call() throws Exception { + try { + LOG.info("executing {}", this); + return purgeRecords(path, + id, + persistencePolicyMatch, + purgeOnCompletionPolicy, + callback); + } catch (IOException e) { + LOG.info("Error during {}: {}", this, e, e); + return 0; + } + } + + @Override + public String toString() { + + return String.format( + "record purge under %s with ID %s and policy %d: {}", + path, id, persistencePolicyMatch); + + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/resources/.keep b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/resources/.keep new file mode 100644 index 0000000..e69de29 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.tla b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.tla new file mode 100644 index 0000000..3310e9e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.tla @@ -0,0 +1,503 @@ +---------------------------- MODULE yarnregistry ---------------------------- + +EXTENDS FiniteSets, Sequences, Naturals, TLC + + +(* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *) + +(* + +============================================================================ + +This defines the YARN registry in terms of operations on sets of records. + +Every registry entry is represented as a record containing both the path and the data. + +It assumes that + +1. operations on this set are immediate. +2. selection operations (such as \A and \E are atomic) +3. changes are immediately visible to all other users of the registry. +4. This clearly implies that changes are visible in the sequence in which they happen. + +A Zookeeper-based registry does not meet all those assumptions + +1. changes may take time to propagate across the ZK quorum, hence changes cannot +be considered immediate from the perspective of other registry clients. (assumptions (1) and (3)). + +2. Selection operations may not be atomic. (assumption (2)). + +Operations will still happen in the order received by the elected ZK master + +A stricter definition would try to state that all operations are eventually true excluding other changes +happening during a sequence of action. This is left as an excercise for the reader. + +The specification also omits all coverage of the permissions policy. This is something +which can be tuned by the Resource Manager: who sets up +*) + + + +CONSTANTS + PathChars, \* the set of valid characters in a path + Paths, \* the set of all possible valid paths + Data, \* the set of all possible sequences of bytes + Address, \* the set of all possible address n-tuples + Addresses, \* the set of all possible address instances + Endpoints , \* the set of all possible endpoints + PersistPolicies,\* the set of persistence policies + ServiceRecords, \* all service records + Registries, \* the set of all possile registries + PutActions, \* all possible put actions + DeleteActions, \* all possible delete actions + PurgeActions, \* all possible purge actions + MkdirActions \* all possible mkdir actions + + + +(* the registry*) +VARIABLE registry + +(* Sequence of actions to apply to the registry *) +VARIABLE actions + +---------------------------------------------------------------------------------------- +(* Tuple of all variables. *) + + +vars == << registry, actions >> + + +---------------------------------------------------------------------------------------- + + + + +(* Persistence policy *) +PersistPolicySet == { + "" \* Undefined; field not present. PERMANENT is implied. + "PERMANENT", \* persists until explicitly removed + "APPLICATION", \* persists until the application finishes + "APPLICATION-ATTEMPT", \* persists until the application attempt finishes + "CONTAINER" \* persists until the container finishes + } + +(* Type invariants. *) +TypeInvariant == + /\ \A p \in PersistPolicies: p \in PersistPolicySet + + + +---------------------------------------------------------------------------------------- + + + +(* + +An Entry is defined as a path, and the actual +data which it contains. + +By including the path in an entry, we avoid having to define some +function mapping Path -> entry. Instead a registry can be defined as a +set of RegistryEntries matching the validity critera. + +*) + +RegistryEntry == [ + \* The path to the entry + path: Paths, + + \* the data in the entry + data: Data + ] + + +(* + An endpoint in a service record +*) +Endpoint == [ + \* API of the endpoint: some identifier + api: STRING, + + \* A list of address n-tuples + addresses: Addresses +] + +(* + A service record +*) +ServiceRecord == [ + \* ID -used when applying the persistence policy + id: STRING, + + \* the persistence policy + persistence: PersistPolicySet, + + \*A description + description: STRING, + + \* A set of endpoints + external: Endpoints, + + \* Endpoints intended for use internally + internal: Endpoints +] + + +---------------------------------------------------------------------------------------- + +(* Action Records *) + +putAction == [ + type: "put", + record: ServiceRecord +] + +deleteAction == [ + type: "delete", + path: STRING, + recursive: BOOLEAN +] + +purgeAction == [ + type: "purge", + path: STRING, + persistence: PersistPolicySet +] + +mkNodeAction == [ + type: "mknode", + path: STRING, + parents: BOOLEAN +] + + +---------------------------------------------------------------------------------------- + +(* + + Path operations + +*) + +(* parent is defined for non empty sequences *) + +parent(path) == SubSeq(path, 1, Len(path)-1) + +isParent(path, c) == path = parent(c) + +---------------------------------------------------------------------------------------- +(* +Registry Access Operations +*) +(* +Lookup all entries in a registry with a matching path +*) +lookup(Registry, path) == \A entry \in Registry: entry.path = path + + +(* +A path exists in the registry iff there is an entry with that path +*) + +exists(Registry, path) == lookup(Registry, path) /= {} + +(* parent entry, or an empty set if there is none *) +parentEntry(Registry, path) == lookup(Registry, parent(path)) + +isRootPath(path) == path = <<>> + +(* the root entry *) +isRootEntry(entry) == entry.path = <<>> + + +(* A path p is an ancestor of another path d if they are different, and the path d + starts with path p *) + +isAncestorOf(path, d) == + /\ path /= d + /\ \E k : SubSeq(d, 0, k) = path + + +ancestorPathOf(path) == + \A a \in Paths: isAncestorOf(a, path) + +(* the set of all children of a path in the registry *) + +children(R, path) == \A c \in R: isParent(path, c.path) + +(* a path has children if the children() function does not return the empty set *) +hasChildren(R, path) == children(R, path) /= {} + +(* Descendant: a child of a path or a descendant of a child of a path *) + +descendants(R, path) == \A e \in R: isAncestorOf(path, e.path) + +(* Ancestors: all entries in the registry whose path is an entry of the path argument *) +ancestors(R, path) == \A e \in R: isAncestorOf(e.path, path) + +(* +The set of entries that are a path and its descendants +*) +pathAndDescendants(R, path) == + \/ \A e \in R: isAncestorOf(path, e.path) + \/ lookup(R, path) + + +(* + +For validity, all entries must match the following criteria + *) + +validRegistry(R) == + \* there can be at most one entry for a path. + /\ \A e \in R: Cardinality(lookup(R, e.path)) = 1 + + \* There's at least one root entry + /\ \E e \in R: isRootEntry(e) + + \* an entry must be the root entry or have a parent entry + /\ \A e \in R: isRootEntry(e) \/ exists(R, parent(e.path)) + + \* If the entry has data, it must be a service record + /\ \A e \in R: (e.data = << >> \/ e.data \in ServiceRecords) + + +---------------------------------------------------------------------------------------- +(* + Registry Manipulation +*) + +(* + An entry can be put into the registry iff + -its parent is present or it is the root entry + +*) +canPut(R, e) == + isRootEntry(e) \/ exists(R, parent(e.path)) + +(* put adds/replaces an entry if permitted *) + +put(R, e) == + /\ canPut(R, e) + /\ R' = (R \ lookup(R, e.path)) \union {e} + + +(* + mknode() adds a new empty entry where there was none before, iff + -the parent exists + -it meets the requirement for being "put" +*) + +mknodeSimple(R, path) == + LET record == [ path |-> path, data |-> <<>> ] + IN \/ exists(R, path) + \/ (exists(R, parent(path)) /\ canPut(R, record) /\ (R' = R \union {record} )) + + +(* +For all parents, the mknodeSimple criteria must apply. +This could be defined recursively, though as TLA+ does not support recursion, +an alternative is required + + +Because this specification is declaring the final state of a operation, not +the implemental, all that is needed is to describe those parents. + +It declares that the mkdirSimple state applies to the path and all its parents in the set R' + +*) +mknodeWithParents(R, path) == + /\ \A p2 \in ancestors(R, path) : mkdirSimple(R, p2) + /\ mknodeSimple(R, path) + + +mknode(R, path, recursive) == + IF recursive THEN mknodeWithParents(R, path) ELSE mknodeSimple(R, path) + +(* Deletion is set difference on any existing entries *) + +simpleDelete(R, path) == + /\ ~isRootPath(path) + /\ children(R, path) = {} + /\ R' = R \ lookup(R, path) + +(* recursive delete: neither the path or its descendants exists in the new registry *) + +recursiveDelete(R, path) == + + \* Root path: the new registry is the initial registry again + /\ isRootPath(path) => R' = { [ path |-> <<>>, data |-> <<>> ] } + \* Any other entry: the new registry is a set with any existing + \* entry for that path is removed, and the new entry added + /\ ~isRootPath(path) => R' = R \ ( lookup(R, path) \union descendants(R, path)) + + +(* Delete operation which chooses the recursiveness policy based on an argument*) + +delete(R, path, recursive) == + IF recursive THEN recursiveDelete(R, path) ELSE simpleDelete(R, path) + + +(* +Purge ensures that all entries under a path with the matching ID and policy are not there +afterwards +*) + +purge(R, path, id, persistence) == + /\ (persistence \in PersistPolicySet) + /\ \A p2 \in pathAndDescendants(R, path) : + (p2.id = id /\ p2.persistence = persistence) => recursiveDelete(R, p2.path) + +(* +Resolve() resolves the record at a path or fails. + +It relies on the fact that if the cardinality of a set is 1, then the CHOOSE operator +is guaranteed to return the single entry of that set, iff the choice predicate holds. + +Using a predicate of TRUE, it always succeeds, so this function selects +the sole entry of the lookup operation. +*) + +resolveRecord(R, path) == + LET l == lookup(R, path) IN + /\ Cardinality(l) = 1 + /\ CHOOSE e \in l : TRUE + +(* + The specific action of putting an entry into a record includes validating the record +*) + +validRecordToPut(path, record) == + \* The root entry must have permanent persistence + /\ (isRootPath(path) => (record.persistence = "PERMANENT" \/ record.persistence = "") + + +(* + putting a service record involves validating it then putting it in the registry + marshalled as the data in the entry + *) +putRecord(R, path, record) == + /\ validRecordToPut(path, record) + /\ put(R, [path |-> path, data |-> record]) + + +---------------------------------------------------------------------------------------- + + + +(* + The action queue can only contain one of the sets of action types, and + by giving each a unique name, those sets are guaranteed to be disjoint +*) + QueueInvariant == + /\ \A a \in actions: + \/ (a \in PutActions /\ a.type="put") + \/ (a \in DeleteActions /\ a.type="delete") + \/ (a \in PurgeActions /\ a.type="purge") + \/ (a \in MknodeActions /\ a.type="mknode") + + + +(* + Applying queued actions +*) + +applyAction(R, a) == + \/ (a \in PutActions /\ putRecord(R, a.path, a.record) ) + \/ (a \in MkdirActions /\ mkdir(R, a.path, a.recursive) ) + \/ (a \in DeleteActions /\ delete(R, a.path, a.recursive) ) + \/ (a \in PurgeActions /\ purge(R, a.path, a.id, a.persistence)) + + +(* + Apply the first action in a list and then update the actions +*) +applyFirstAction(R, a) == + /\ actions /= <<>> + /\ applyAction(R, Head(a)) + /\ actions' = Tail(a) + + +Next == applyFirstAction(registry, actions) + +(* +All submitted actions must eventually be applied. +*) + + +Liveness == <>( actions = <<>> ) + + +(* +The initial state of a registry has the root entry. +*) + +InitialRegistry == registry = { + [ path |-> <<>>, data |-> <<>> ] +} + + +(* +The valid state of the "registry" variable is defined as +Via the validRegistry predicate +*) + +ValidRegistryState == validRegistry(registry) + + + +(* +The initial state of the system +*) +InitialState == + /\ InitialRegistry + /\ ValidRegistryState + /\ actions = <<>> + + +(* +The registry has an initial state, the series of state changes driven by the actions, +and the requirement that it does act on those actions. +*) +RegistrySpec == + /\ InitialState + /\ [][Next]_vars + /\ Liveness + + +---------------------------------------------------------------------------------------- + +(* +Theorem: For all operations from that initial state, the registry state is still valid +*) +THEOREM InitialState => [] ValidRegistryState + +(* +Theorem: for all operations from that initial state, the type invariants hold +*) +THEOREM InitialState => [] TypeInvariant + +(* +Theorem: the queue invariants hold +*) +THEOREM InitialState => [] QueueInvariant + +============================================================================= + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/site/markdown/registry/index.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/site/markdown/registry/index.md new file mode 100644 index 0000000..bc8b59c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/site/markdown/registry/index.md @@ -0,0 +1,22 @@ + + + +# YARN Service Registry + +* [Architecture](yarn-registry.md) +* [Security](registry-security.md) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/site/markdown/registry/registry-security.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/site/markdown/registry/registry-security.md new file mode 100644 index 0000000..d9f1ee3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/site/markdown/registry/registry-security.md @@ -0,0 +1,118 @@ + + +# Registry Security + +## Security Model + +The security model of the registry is designed to meet the following goals + +1. Deliver functional security on a secure ZK installation. +1. Allow the RM to create per-user regions of the registration space +1. Allow applications belonging to a user to write registry entries +into their part of the space. These may be short-lived or long-lived +YARN applications, or they may be be static applications. +1. Prevent other users from writing into another user's part of the registry. +1. Allow system services to register to a `/services` section of the registry. +1. Provide read access to clients of a registry. +1. Permit future support of DNS +1. Permit the future support of registering data private to a user. +This allows a service to publish binding credentials (keys &c) for clients to use. +1. Not require a ZK keytab on every user's home directory in a YARN cluster. +This implies that kerberos credentials cannot be used by YARN applications. + + +ZK security uses an ACL model, documented in +[Zookeeper and SASL](https://cwiki.apache.org/confluence/display/ZOOKEEPER/Zookeeper+and+SASL) +In which different authentication schemes may be used to restrict access +to different znodes. This permits the registry to use a mixed +Kerberos + Private password model. + +* The YARN-based registry (the `RMRegistryOperationsService`), uses kerberos +as the authentication mechanism for YARN itself. +* The registry configures the base of the registry to be writeable only by +itself and other hadoop system accounts holding the relevant kerberos credentials. +* The user specific parts of the tree are also configured to allow the same +system accounts to write and manipulate that part of the tree. +* User accounts are created with a `(username,password)` keypair granted +write access to their part of the tree. +* The secret part of the keypair is stored in the users' home directory +on HDFS, using the Hadoop Credentials API. +* Initially, the entire registry tree will be world readable. + + +What are the limitations of such a scheme? + +1. It is critical that the user-specific registry keypair is kept a secret. +This relies on filesystem security to keep the file readable only + by the (authenticated) user. +1. As the [ZK Documentation says](http://zookeeper.apache.org/doc/r3.4.6/zookeeperProgrammers.html#sc_ZooKeeperAccessControl), +*" Authentication is done by sending the username:password in clear text" +1. While it is possible to change the password for an account, +this involves a recursive walk down the registry tree, and will stop all +running services from being able to authenticate for write access until they +reload the key. +1. A world-readable registry tree is exposing information about the cluster. +There is some mitigation here in that access may be restricted by IP Address. +1. There's also the need to propagate information from the registry down to +the clients for setting up ACLs. + + + +## ACL Configuration propagation + +The registry manager cannot rely on clients consistently setting +ZK permissions. At the very least, they cannot relay on client applications +unintentionally wrong values for the accounts of the system services + +*Solution*: Initially, a registry permission is used here. + +### Automatic domain extension + +*work in progress* + +It may be possible to determine the realm of a cluster at run time from a local +user's kerberos tokens as used to talk to YARN or HDFS. This could be used to +auto-generate account names with the correct realm for the system accounts +from a string such as `hadoop@,yarn@,mapred@`. This would aid having +valid constants. + +#### In-registry publishing of core binding data + +Another strategy could be to have a `ServiceRecord` at the root +of the registry that actually defines the registry —including listing +those default binding values in the `data` field.. + +### IP address restriction +Read access to the registry may be restricted by IP address. +This allows access to the registry to be restricted to the Hadoop cluster +and any nodes explicitly named as having access + + + +### Auditing + +Something (perhaps the RM) could scan a user's portion of the registry and +detect some ACL problems: IP/world access too lax, admin account settings wrong. +It cannot view or fix the ACL permissions unless it has the `ADMIN` permission, +though that situation can at least be detected. Given the RM must have `DELETE` +permissions further up the stack, it would be in a position to delete the errant +part of the tree —though this could be a destructive overreaction. + +## Further Reading + +* [Zookeeper and SASL](https://cwiki.apache.org/confluence/display/ZOOKEEPER/Zookeeper+and+SASL) +* [Up and Running with Secure Zookeeper](https://github.com/ekoontz/zookeeper/wiki) + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/site/markdown/registry/yarn-registry.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/site/markdown/registry/yarn-registry.md new file mode 100644 index 0000000..1bca7bc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/site/markdown/registry/yarn-registry.md @@ -0,0 +1,837 @@ + + +## The YARN Service Registry + +# Introduction and concepts + +This document describes a YARN service registry built to address a problem: +*how can clients talk to YARN-deployed services and the components which form +such services?* + +Service registration and discovery is a long-standing problem in distributed +computing, dating back to Xerox's Grapevine Service. This proposal is for a +registry for locating distributed applications deployed by YARN, and +determining the binding information needed to communicate with these +applications. + +### Definitions +**Service**: a potentially-distributed application deployed in —or reachable +from— a Hadoop YARN cluster. Examples: Apache HBase, Apache hcatalog, Apache +Storm. Services may be short-lived or long-lived. + +**Service Class:** the name of a type of service, used as a path in a registry +and matching the DNS-compatible path naming scheme. Examples: +`org-apache-hbase`, `org-apache-hcatalog` + +**Component**: a distributed element of a service. Examples: HBase master +nodes, HBase region servers and HBase REST servers. + +**Service Instance:** A single instance of an application. Example, an HBase +cluster `demo1`. A service instance is running if the instances the components +which for the service are running. This does not imply "live" in the +distributed computing sense, merely that the process are running. + +**Component Instance**: a single instance of a component within a service +instance. Examples: an HBase master node on host `rack1server6` or a region +server on host `rack3server40`. + +**Endpoint**: one means of binding with a service instance or a component +instance. Examples: HBase's Apache Zookeeper binding, a Java JMX port on a +region server, a Web UI on an HBase Master, and the REST API of an HBase REST +component instance. Endpoints may be *internal* —for use within the service +instance, or *external*: for use by clients of a service instance. + +**Service Record**: a record in the registry describing a service instance or a +component instance, including listing its endpoints. + +**YARN Resource Manager, "RM":** the YARN component which allows client +applications to submit work (including requests to deploy service instances) to +a YARN cluster. The RM retains state on all running applications. + +**YARN Application**: An application deployed via YARN. Every application +instance has a unique application ID. + +**YARN Application Master, "AM":** the application-specific component which is +scheduled and deployed by the RM. It has the duty of maintaining the internal +state of the application, including requesting and managing all other component +instances of this application instance. The YARN RM will detect the failure of +the AM, and respond by rescheduling it. + +**YARN Container:** An allocation of resources, including CPU and RAM, for a +component instance. The AM has the responsibility of requesting the containers +its components need, and building the commands to instantiate its component +instances onto allocated containers. Every allocated container has a unique +container ID. + +## The binding problem +Hadoop YARN allows applications to run on the Hadoop cluster. Some of these are +batch jobs or queries that can managed via Yarn’s existing API using its +application ID. In addition YARN can deploy ong-lived services instances such a +pool of Apache Tomcat web servers or an Apache HBase cluster. YARN will deploy +them across the cluster depending on the individual each component requirements +and server availability. These service instances need to be discovered by +clients; traditionally their IP added is registered in DNS or in some +configuration file —but that is not feasible in YARN-deployed applications when +neither the hostname nor network ports can be known in advance. + +As a result there is no easy way for clients to interact with dynamically +deployed applications. + +YARN supports a rudimentary registry which allows YARN Application Masters to +register a web URL and an IPC address. but is not sufficient for our purposes +since it It does not allow any other *endpoints* to be registered —such as REST +URLs, or zookeeper path or the endpoints of the tasks that the Application +Master executes. Further, information that can be registered is mapped to the +YARN application instance —a unique instance ID that changes whenever a YARN +application is started. This makes it impossible to resolve binding information +via a static reference to a named service, or to even probe for the existence +of a service instance which is not currently live. + +# Use Cases + +## Service Name Examples + + +Core Hadoop services. + +These may be deployed statically, dynamically via an account with the +permissions to write to the `/services` path, or even registrations of remote +services accessible from within the Hadoop cluster + + /services/hdfs + /services/yarn + /services/oozie + +Yarn-deployed services belonging to individual users. + + /users/joe/org-apache-hbase/demo1 + /users/joe/org-apache-hbase/demo1/components/regionserver1 + +## Registration Use Cases + +1. A Hadoop core service that is not running under YARN example: HDFS) can be +registered in for discovery. This could be done by the service or by management +tools.. + +2. A long-lived application deployed by YARN registers itself for discovery by +clients. The registration data is intended to outlive the application master, +and perhaps the lifespan of a single deployment of the service instance. + +3. Component instances of a service register themselves, publishing internal +binding information, such as JMX ports. + +4. A YARN-deployed application can bind to dependent service instances both +static and dynamic. Example: a Tomcat web pool binding to the dynamic HBase +service instance "/users/joe/services/hbase/demo1". + +5. Component Instances use the registry to bind to an internal endpoint of +their application master, to which they heartbeat regularly. + +## Unsupported Registration use cases: + +1. A short-lived Yarn application is registered automatically in the registry, +including all its containers. and unregistered when the job terminates. +Short-lived applications with many containers will place excessive load on a +registry. All YARN applications will be given the option of registering, but it +will not be automatic —and application authors must be advised against +registering short-lived containers. + +## Lookup Use Cases + +1. A client application looks up a dynamically deployed service instance whose +user, service class and instance name is known, e.g. +`/users/joe/services/hbase/demo1`, and retrieves the information needed to +connect to the service + +2. A client application looks up a statically deployed Hadoop service Example: +`/services/hdfs`. + +3. An Application Master enumerates all registered component instances, +discovers their listed JMX ports, and, initializes own web UI, offers links to these +endpoints. + +4. A user connects to a private HBase service instance at +`/users/joe/services/hbase/demo1`. + +5. A user connects to the cluster’s HBase service at `/services/hbase`. + +6. A user looks up the binding information to a remote Hadoop cluster's +filesystem at `/net/cluster4/services/hdfs`. The registration information +includes the `webhdfs://` URL for the remote filesystem. + +7. A user lists their HBase service instances: + + ls /users/joe/services/hbase + +8. User finds all Hbase services in the cluster: + + find -endpointField.api=org.apache.hbase + +9. Possibly in future: looking up a service via DNS. + +This registry proposal is intended to support these use cases by providing a +means for applications to register their service endpoints, and for clients to +locate them. + +# Key Requirements of a Service Registry + +Allow dynamic registration of service instances + + * YARN deployed services instances must be able register their bindings and be + discovered by clients. + + * Core Hadoop service instances must be able to register their service + endpoints. + + * The binding must be upgradable if the service moves or in case if HA fails + over. + + * A service instance must be able to publish a variety of endpoints for a + service: Web UI, RPC, REST, Zookeeper, others. Furthermore one must also be + able register certificates and other public security information may be + published as part of a binding. + +Registry service properties: + + * The registry must be highly available. + + * Scale: many services and many clients in a large cluster. This will limit + how much data a service can publish. + + * Ubiquity: we need this in every YARN cluster, whether physical, virtual or + in-cloud. + + * Must support hierarchical namespace and names. The name convention must + match that of DNS so that we have the option of accessing the namespace via + DNS protocol at a later phase of the project. + + * Registry API Language/Protocols + + * Cross-language: independent of any language; client language != service + + * REST API for reading registry data + +Access Control: + + * Read access for all + + * Write is restricted so that squatting and impersonation can be avoided. + +Remote accessibility: supports remote access even on clusters which are + only reachable via Apache Knox, or hosted in cloud environments. + +## Non-Requirements + +* The registry is not intended for liveness detection, leader-election or + perform other "shared consensual state" actions for an application itself, + other than potentially sharing binding information between component + instances. + +* The registry is not intended to be a store for arbitrary application state, + or for publishing configuration data other than binding information to + endpoints offered by a service and its components. Such use would overload + the registry and rapidly reach limits of what Zookeeper permits. + +# Architecture + +We propose a base registry service that binds string-names to records +describing service and component instances. We plan to use ZK as the base name +service since it supports many of the properties, We pick a part of the ZK +namespace to be the root of the service registry ( default: `yarnRegistry`). + +On top this base implementation we build our registry service API and the +naming conventions that Yarn will use for its services. The registry will be +accessed by the registry API, not directly via ZK - ZK is just an +implementation choice (although unlikely to change in the future). + +1. Services are registered by binding a **_path_** to a value called a +**_Service Record_**. Paths are hierarchical and use `/` as the root as well as +the separator. + +2. Service records are registered as persistent znodes. This ensures that the +record remains present during planned and unplanned outages of the service, on +the assumption that client code is resilient to transient outages. + +3. Each service instance’s service record lists the endpoints for its various +protocols exported by that service instance. + +4. For each protocol endpoint it must contain + + 1. The *protocol* name including: Web, REST, IPC, zookeeper. (type:string) + + 2. Its *address*: the specific details used to locate this endpoint + + 3. Its *addressType*. This is the format of the binding string. (URL, ZK path, + hostname:port pair). For the predefined protocols, we will define what + format the binding string MUST be. Example: `protocol==REST` means binding + type is `URL`, `protocol==IPC` binding uses the addresstype `host/port`. + + 4. The *api*. This is the API offered by the endpoint, and is application + specific. examples: `org.apache.hadoop.namenode`, + `org.apache.hadoop.webhdfs` + +5. Endpoints may be *external* —for use by programs other than the service +itself, and *internal* —for connecting components within the service instance. +They will be listed in different sections of the Service Record to distinguish +them. + +6. Core services will be registered using the following convention: +`/services/{servicename}` e.g. `/services/hdfs`. + +7. Yarn services SHOULD be registered using the following convention: + + /users/{username}/{serviceclass}/{instancename} + +6. Component instances SHOULD be registered under + + /users/{username}/{serviceclass}/{instancename}/components/{componentname} + + +8. Each of the user's services which follows this convention must have unique +service class names, + +9. Each component instance must have a name that is unique for that service +instance. For a YARN-deployed application, this can be trivially +derived from the container ID. + +The requirements for unique names ensures that the path to a service instance +or component instance is guaranteed to be unique, and that all instances of a +specific service class can be enumerated by listing all children of the service +class path. + + +# Registry Model + +Service entries MUST be persistent —it is the responsibility of YARN and other +tools to determine when a service entry is to be deleted. + +## Path Elements + +All path elements MUST match that of a lower-case entry in a hostname path as +defined in RFC1123; the regular expression is: + + ([a-z0-9]|([a-z0-9][a-z0-9\-]*[a-z0-9])) + +This policy will ensure that were the registry hierarchy ever to exported by a +DNS service, all service classes and names would be valid. + +A complication arises with user names, as platforms may allow user names with +spaces, high unicode and other characters in them. Such paths must be converted +to valid DNS hostname entries using the punycode convention used for +internationalized DNS. + +## Service Record + +A Service Record has some basic information and possibly empty lists of +internal and external endpoints. + +### Service Record: + +A Service Record contains some basic informations and two lists of endpoints: +one list for users of a service, one list for internal use within the +application. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescription
description: StringHuman-readable description.
registrationTime: longRegistration time as a `System.getTimeMillis()` value seen at the service.
yarn:id: String/td> + YARN application or container ID (missing/empty for statically deployed services).
yarn:persistence: intPersistence policy.
external: List<Endpoint>A list of service endpoints for external callers.
internal: List<Endpoint>A list of service endpoints for internal use within the service instance.
+ + +The optional `yarn:persistence` and `yarn:id` attributes defines when a record +*and any child entries* may be deleted. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Policy #NameDescription
0PermanentThe record persists until removed manually.
1ApplicationRemove when the YARN application defined in the id field terminates.
2Application AttemptRemove when the current YARN application attempt finishes.
3ContainerRemove when the YARN container in the ID field finishes
+ + +The policies which clean up when an application, application attempt or +container terminates require the `yarn:id` field to match that of the +application, attempt or container. If the wrong ID is set, the cleanup does not +take place —and if set to a different application or container, will be cleaned +up according the lifecycle of that application. + +These attributes use the prefix "`yarn:`" to indicate that their reliance on +the YARN layer of the Hadoop cluster to implement the policy. If the registry +were to run standalone —which is entirely possible— all records would be +implicitly persistent. + +### Endpoint: + + + + + + + + + + + + + + + + + + + + + + +
NameDescription
addresses: List<List<String>>a list of address tuples whose format depends on the address type
addressType: Stringformat of the binding
protocol: StringProtocol. Examples: +`http`, `https`, `hadoop-rpc`, `zookeeper`, `web`, `REST`, `SOAP`, ...
api: StringAPI implemented at the end of the binding
+ + +All string fields have a limit on size, to dissuade services from hiding +complex JSON structures in the text description. + +### Field: Address Type + +The addressType field defines the string format of entries. + +Having separate types is that tools (such as a web viewer) can process binding +strings without having to recognize the protocol. + + + + + + + + + + + + + + + + + + + + + + + + + + +
Formatbinding format
`url``[URL]`
`hostname``[hostname]`
`inetaddress``[hostname, port]`
`path``[/path/to/something]`
`zookeeper``[quorum-entry, path]`
+ + +An actual zookeeper binding consists of a list of `hostname:port` bindings –the +quorum— and the path within. In the proposed schema, every quorum entry will be +listed as a triple of `[hostname, port, path]`. Client applications do not +expect the path to de be different across the quorum. The first entry in the +list of quorum hosts MUST define the path to be used by all clients. Later +entries SHOULD list the same path, though clients MUST ignore these. + +New Address types may be defined; if not standard please prefix with the +character sequence `"x-"`. + +#### **Field: API** + +APIs may be unique to a service class, or may be common across by service +classes. They MUST be given unique names. These MAY be based on service +packages but MAY be derived from other naming schemes: + +### Examples of Service Entries + +Here is an example of a service entry for a YARN-deployed tomcat application. + +After creation and registration of the application, the registry looks as follows: + + /users + /devteam + /org-apache-tomcat + /test1 + /components + /container-1408631738011-0001-01-000002 + /container-1408631738011-0001-01-000001 + +The `/users/devteam/org-apache-tomcat/tomcat-test` service record describes the +overall application. It lists the YARN application ID, and exports the URL to a +load balancer. It's persistence is 0; permanent. + + { + "description" : "tomcat-based web application", + "registrationTime" : 1408638082444, + "yarn:id" : "application_1408631738011_0001", + "yarn:persistence" : "0", + "external" : [ { + "api" : "www", + "addressType" : "uri", + "protocolType" : "REST", + "addresses" : [ [ "http://loadbalancer/" ] [ "http://loadbalancer2/" ] ] + } ], + "internal" : [ ] + } + +The service instance is built from two component instances, each described with +their container ID converted into a DNS-compatible hostname. The entries are +marked as ephemeral. If the entries were set within the container, then when +that container is released or if the component fails, the entries will be +automatically removed. Accordingly, it's persistence policy is declared to be +"3", container. The `yarn:id` field identifies the container whose completion +will trigger the deletion of this entry + + /users/devteam/org-apache-tomcat/test1/components/container-1408631738011-0001-01-000001 + + { + "registrationTime" : 1408638082445, + "yarn:id" : "container_1408631738011_0001_01_000001", + "yarn:persistence" : "3", + "description" : null, + "external" : [ { + "api" : "www", + "addressType" : "uri", + "protocolType" : "REST", + "addresses" : [ [ "http://rack4server3:43572" ] ] + } ], + "internal" : [ { + "api" : "jmx", + "addressType" : "host/port", + "protocolType" : "JMX", + "addresses" : [ [ "rack4server3", "43573" ] ] + } ] + } + +The component instances list their endpoints: the public REST API as an +external endpoint, the JMX addresses as internal. + + /users/devteam/org-apache-tomcat/test1/components/container-1408631738011-0001-01-000002 + + { + "registrationTime" : 1408638082445, + "yarn:id" : "container_1408631738011_0001_01_000002", + "yarn:persistence" : "3", + "description" : null, + "external" : [ { + "api" : "www", + "addressType" : "uri", + "protocolType" : "REST", + "addresses" : [ [ "http://rack1server28:35881" ] ] + } ], + "internal" : [ { + "api" : "jmx", + "addressType" : "host/port", + "protocolType" : "JMX", + "addresses" : [ [ "rack1server28", "35882" ] ] + } ] + } + +This information could be used by the (hypothetical) load balancer to enumerate +the components and build a list of component instances to dispatch requests to. +Similarly, a management application could enumerate all available component +instances and their JMX ports, then connect to each to collect performance +metrics. + +# Registry API + +Here is the registry API as seen from a Java application. The API is a thin +layer above the ZK operations, essentially building up paths, reading, writing +and updating entries, and enumerating children. The REST API is implemented +inside a server and use this same API to implement its REST API. + +The exceptions that are listed are only a subset of possible exception —the +interface merely lists those that have special meaning. + +All write operations must assume that they are communicating with a registry +service with the consistency view of a Zookeeper client; read-only clients must +assume that some operations may not be immediately visible to them. + +### Registry Operations + + public interface RegistryOperations extends Service { + + /** + * Create a path. + * + * It is not an error if the path exists already, be it empty or not. + * + * The createParents flag also requests creating the parents. + * As entries in the registry can hold data while still having + * child entries, it is not an error if any of the parent path + * elements have service records. + * + * @param path path to create + * @param createParents also create the parents. + * @throws PathNotFoundException parent path is not in the registry. + * @throws AccessControlException access permission failure. + * @throws InvalidPathnameException path name is invalid. + * @throws IOException Any other IO Exception. + * @return true if the path was created, false if it existed. + */ + boolean mknode(String path, boolean createParents) + throws PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException; + + /** + * Set a service record to an entry + * @param path path to service record + * @param record service record service record to create/update + * @param createFlags creation flags + * @throws PathNotFoundException the parent path does not exist + * @throws FileAlreadyExistsException path exists but create flags + * do not include "overwrite" + * @throws AccessControlException access permission failure. + * @throws InvalidPathnameException path name is invalid. + * @throws IOException Any other IO Exception. + */ + void create(String path, ServiceRecord record, int createFlags) + throws PathNotFoundException, + FileAlreadyExistsException, + AccessControlException, + InvalidPathnameException, + IOException; + + + /** + * Resolve the record at a path + * @param path path to service record + * @return the record + * @throws PathNotFoundException path is not in the registry. + * @throws AccessControlException security restriction. + * @throws InvalidPathnameException the path is invalid. + * @throws IOException Any other IO Exception + */ + + ServiceRecord resolve(String path) throws PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException; + + /** + * Get the status of a path + * @param path path to query + * @return the status of the path + * @throws PathNotFoundException path is not in the registry. + * @throws AccessControlException security restriction. + * @throws InvalidPathnameException the path is invalid. + * @throws IOException Any other IO Exception + */ + RegistryPathStatus stat(String path) + throws PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException; + + /** + * List children of a directory + * @param path path + * @return a possibly empty array of child entries + * @throws PathNotFoundException path is not in the registry. + * @throws AccessControlException security restriction. + * @throws InvalidPathnameException the path is invalid. + * @throws IOException Any other IO Exception + */ + RegistryPathStatus[] list(String path) + throws PathNotFoundException, + AccessControlException, + InvalidPathnameException, + IOException; + + /** + * Delete a path. + * + * If the operation returns without an error then the entry has been + * deleted. + * @param path path delete recursively + * @param recursive recursive flag + * @throws PathNotFoundException path is not in the registry. + * @throws AccessControlException security restriction. + * @throws InvalidPathnameException the path is invalid. + * @throws PathIsNotEmptyDirectoryException path has child entries, but + * recursive is false. + * @throws IOException Any other IO Exception + * + */ + void delete(String path, boolean recursive) + throws PathNotFoundException, + PathIsNotEmptyDirectoryException, + AccessControlException, + InvalidPathnameException, + IOException; + + } + + + +## Security + +The registry will allow a service instance can only be registered under the +path where it has permissions. Yarn will create directories with appropriate +permissions for users where Yarn deployed services can be registered by a user. +of the user account of the service instance. The admin will also create +directories (such as `/services`) with appropriate permissions (where core Hadoop +services can register themselves. + +There will no attempt to restrict read access to registry information. The +services will protect inappropriate access by clients by requiring +authentication and authorization. There is a *scope* field in a service record +, but this is just a marker to say "internal API only", rather than a direct +security restriction. (this is why "internal" and "external" are proposed, not +"public" and "private"). + +Rationale: the endpoints being registered would be discoverable through port +scanning anyway. Having everything world-readable allows the REST API to have a +simpler access model —and is consistent with DNS. + +On a secure cluster, ZK token renewal may become an issue for long-lived +services —if their token expires their session may expire. Renewal of such +tokens is not part of the API implementation —we may need to add a means to +update the tokens of an instance of the registry operations class. + +### Security Policy Summary + +The registry is designed to be secured *on a kerberos-managed cluster*. + +* The registry root grants full rights to "system accounts": +`mapred`, `hdfs`, `yarn` : `"rwcda"`; all other accounts, and anonymous access +is read-only. + +* The permissions are similarly restricted for `/users`, and `/services/` + +* installations may extend or change these system accounts. + +* When an application belonging to a user is scheduled, YARN +SHALL create an entry for that user `/users/${username}`. + +* This node will have full access to the system; the user the access rights: +`"crd"`. That is, they may create or delete child nodes, but not write to +their home node, —or alter its permissions. + +* Applications wishing to write to the registry must use a SASL connection +to authenticate via Zookeeper. + +* Applications creating nodes in the user path MUST include the site-specified +system accounts in the ACL list, with full access. + +* Applications creating nodes in the user path MUST include an ACL Which + +* Applications creating nodes in the user path MUST declare their own +user identity as a `sasl:user@REALM` entry. + +* Applications creating nodes the user path MAY add extra `digest:` ACL tokens +so as to give their services the ability to manipulate portions of the +registry *without needing kerberos credentials*. + +The digest-driven authentication avoid the problem of credential renewal in +long-lived applications. An YARN application may be passed the token to +connect with the ZK service when launched. It can then create or update an +entry, including a secret digest ACL in the permissions of nodes it creates. +As a result, even after the credentials expire, it retains *some* access. + +Note that for this to be successful, the client will need to fall back +session to *not* use SASL, instead using authentication id:pass credentials. + + +## Out of cluster and cross-cluster access + +1. A client should be able to access the registry of another cluster in order +to access services of that cluster. Detail of this need to further fleshed out. + +2. Firewall services such as Apache Knox can examine the internal set of +published services, and publish a subset of their endpoints. They MAY implement +a future REST API. + +# Limits + +**Entry Size** + +Zookeeper has a default limit of 1MB/node. If all endpoints of a service or +component are stored in JSON attached to that node, then there is a total limit +of 1MB of all endpoint registration data. + +To prevent this becoming a problem, the client API should implement strict +limits on the maximum length of fields, with low limits on the addressType, +protocol, and api fields, something longer on the description and addresses +elements —along with a limit on the number of elements in the addresses field. + +**Name size** + +To support DNS in future, there must be a limit of 63 bytes on all path +elements. For non-ASCII User names, this restriction implies that a shorter +path may be a limit. + +**Rate of Update** + +A rapid rate of entry change is considered antisocial in a ZK cluster. +Implementations may throttle update operations. + +**Rate of Polling** + +Clients which poll the registry may be throttled. + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/AbstractRegistryTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/AbstractRegistryTest.java new file mode 100644 index 0000000..ed0bb74 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/AbstractRegistryTest.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry; + +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.yarn.registry.client.api.RegistryOperations; +import org.apache.hadoop.yarn.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.yarn.registry.client.types.PersistencePolicies; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecord; +import org.apache.hadoop.yarn.registry.server.services.RMRegistryOperationsService; +import org.junit.Before; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URISyntaxException; + +/** + * Abstract registry tests .. inits the field {@link #registry} + * before the test with an instance of {@link RMRegistryOperationsService}; + * and {@link #operations} with the same instance cast purely + * to the type {@link RegistryOperations}. + * + */ +public class AbstractRegistryTest extends AbstractZKRegistryTest { + private static final Logger LOG = + LoggerFactory.getLogger(AbstractRegistryTest.class); + protected RMRegistryOperationsService registry; + protected RegistryOperations operations; + + @Before + public void setupRegistry() throws IOException { + registry = new RMRegistryOperationsService("yarnRegistry"); + operations = registry; + registry.init(createRegistryConfiguration()); + registry.start(); + operations.delete("/", true); + registry.createRootRegistryPaths(); + addToTeardown(registry); + } + + + /** + * Create a service entry with the sample endpoints, and put it + * at the destination + * @param path path + * @param createFlags flags + * @return the record + * @throws IOException on a failure + */ + protected ServiceRecord putExampleServiceEntry(String path, int createFlags) throws + IOException, + URISyntaxException { + return putExampleServiceEntry(path, createFlags, PersistencePolicies.PERMANENT); + } + + /** + * Create a service entry with the sample endpoints, and put it + * at the destination + * @param path path + * @param createFlags flags + * @return the record + * @throws IOException on a failure + */ + protected ServiceRecord putExampleServiceEntry(String path, + int createFlags, + int persistence) + throws IOException, URISyntaxException { + ServiceRecord record = buildExampleServiceEntry(persistence); + + registry.mknode(RegistryPathUtils.parentOf(path), true); + operations.create(path, record, createFlags); + return record; + } + + /** + * Assert a path exists + * @param path path in the registry + * @throws IOException + */ + public void assertPathExists(String path) throws IOException { + operations.stat(path); + } + + /** + * assert that a path does not exist + * @param path path in the registry + * @throws IOException + */ + public void assertPathNotFound(String path) throws IOException { + try { + operations.stat(path); + fail("Path unexpectedly found: " + path); + } catch (PathNotFoundException e) { + + } + } + + /** + * Assert that a path resolves to a service record + * @param path path in the registry + * @throws IOException + */ + public void assertResolves(String path) throws IOException { + operations.resolve(path); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/AbstractZKRegistryTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/AbstractZKRegistryTest.java new file mode 100644 index 0000000..f297402 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/AbstractZKRegistryTest.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.Service; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.server.services.AddingCompositeService; +import org.apache.hadoop.yarn.registry.server.services.MicroZookeeperService; +import org.apache.hadoop.yarn.registry.server.services.MicroZookeeperServiceKeys; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestName; +import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; + +public class AbstractZKRegistryTest extends RegistryTestHelper { + private static final Logger LOG = + LoggerFactory.getLogger(AbstractZKRegistryTest.class); + + private static final AddingCompositeService servicesToTeardown = + new AddingCompositeService("teardown"); + // static initializer guarantees it is always started + // ahead of any @BeforeClass methods + static { + servicesToTeardown.init(new Configuration()); + servicesToTeardown.start(); + } + + @Rule + public final Timeout testTimeout = new Timeout(10000); + + @Rule + public TestName methodName = new TestName(); + + protected static void addToTeardown(Service svc) { + servicesToTeardown.addService(svc); + } + + @AfterClass + public static void teardownServices() throws IOException { + describe(LOG, "teardown of static services"); + servicesToTeardown.close(); + } + + protected static MicroZookeeperService zookeeper; + + + @BeforeClass + public static void createZKServer() throws Exception { + File zkDir = new File("target/zookeeper"); + FileUtils.deleteDirectory(zkDir); + assertTrue(zkDir.mkdirs()); + zookeeper = new MicroZookeeperService("InMemoryZKService"); + YarnConfiguration conf = new YarnConfiguration(); + conf.set(MicroZookeeperServiceKeys.KEY_ZKSERVICE_DIR, zkDir.getAbsolutePath()); + zookeeper.init(conf); + zookeeper.start(); + addToTeardown(zookeeper); + } + + /** + * give our thread a name + */ + @Before + public void nameThread() { + Thread.currentThread().setName("JUnit"); + } + + /** + * Returns the connection string to use + * + * @return connection string + */ + public String getConnectString() { + return zookeeper.getConnectionString(); + } + + public YarnConfiguration createRegistryConfiguration() { + YarnConfiguration conf = new YarnConfiguration(); + conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_CONNECTION_TIMEOUT, 1000); + conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_INTERVAL, 500); + conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_TIMES, 10); + conf.setInt(RegistryConstants.KEY_REGISTRY_ZK_RETRY_CEILING, 10); + conf.set(RegistryConstants.KEY_REGISTRY_ZK_QUORUM, + zookeeper.getConnectionString()); + return conf; + } +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/RegistryTestHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/RegistryTestHelper.java new file mode 100644 index 0000000..ece6784 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/RegistryTestHelper.java @@ -0,0 +1,346 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.client.binding.RecordOperations; +import org.apache.hadoop.yarn.registry.client.binding.RegistryTypeUtils; +import org.apache.hadoop.yarn.registry.client.types.AddressTypes; +import org.apache.hadoop.yarn.registry.client.types.Endpoint; +import org.apache.hadoop.yarn.registry.client.types.ProtocolTypes; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecord; +import org.apache.hadoop.yarn.registry.secure.AbstractSecureRegistryTest; +import org.apache.zookeeper.common.PathUtils; +import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.Subject; +import javax.security.auth.login.LoginContext; +import javax.security.auth.login.LoginException; +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.List; + +import static org.apache.hadoop.yarn.registry.client.binding.RegistryTypeUtils.inetAddrEndpoint; +import static org.apache.hadoop.yarn.registry.client.binding.RegistryTypeUtils.ipcEndpoint; +import static org.apache.hadoop.yarn.registry.client.binding.RegistryTypeUtils.restEndpoint; +import static org.apache.hadoop.yarn.registry.client.binding.RegistryTypeUtils.tuple; +import static org.apache.hadoop.yarn.registry.client.binding.RegistryTypeUtils.webEndpoint; + +/** + * This is a set of static methods to aid testing the registry operations. + * The methods can be imported statically —or the class used as a base + * class for tests. + */ +public class RegistryTestHelper extends Assert { + public static final String SC_HADOOP = "org-apache-hadoop"; + public static final String USER = "devteam/"; + public static final String NAME = "hdfs"; + public static final String API_WEBHDFS = "org_apache_hadoop_namenode_webhdfs"; + public static final String API_HDFS = "org_apache_hadoop_namenode_dfs"; + public static final String USERPATH = RegistryConstants.PATH_USERS + USER; + public static final String PARENT_PATH = USERPATH + SC_HADOOP + "/"; + public static final String ENTRY_PATH = PARENT_PATH + NAME; + public static final String NNIPC = "nnipc"; + public static final String IPC2 = "IPC2"; + private static final Logger LOG = + LoggerFactory.getLogger(RegistryTestHelper.class); + public static final String KTUTIL = "ktutil"; + private final RecordOperations.ServiceRecordMarshal recordMarshal = + new RecordOperations.ServiceRecordMarshal(); + + /** + * Assert the path is valid by ZK rules + * @param path + */ + public static void assertValidZKPath(String path) { + try { + PathUtils.validatePath(path); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Invalid Path " + path + ": " + e, e); + } + } + + public static void assertNotEmpty(String message, String check) { + if (StringUtils.isEmpty(check)) { + fail(message); + } + } + + public static void assertNotEmpty(String check) { + if (StringUtils.isEmpty(check)) { + fail("Empty string"); + } + } + + /** + * Log the details of a login context + * @param name name to assert that the user is logged in as + * @param loginContext the login context + */ + public static void logLoginDetails(String name, + LoginContext loginContext) { + assertNotNull("Null login context", loginContext); + Subject subject = loginContext.getSubject(); + LOG.info("Logged in as {}:\n {}", name, subject); + } + + /** + * Set the JVM property to enable Kerberos debugging + */ + public static void enableKerberosDebugging() { + System.setProperty(AbstractSecureRegistryTest.SUN_SECURITY_KRB5_DEBUG, + "true"); + } + /** + * Set the JVM property to enable Kerberos debugging + */ + public static void disableKerberosDebugging() { + System.setProperty(AbstractSecureRegistryTest.SUN_SECURITY_KRB5_DEBUG, + "false"); + } + + /** + * General code to validate bits of a component/service entry built iwth + * {@link #addSampleEndpoints(ServiceRecord, String)} + * @param record instance to check + */ + protected void validateEntry(ServiceRecord record) { + assertNotNull("null service record", record); + List endpoints = record.external; + assertEquals(2, endpoints.size()); + + Endpoint webhdfs = findEndpoint(record, API_WEBHDFS, true, 1, 1); + assertEquals(API_WEBHDFS, webhdfs.api); + assertEquals(AddressTypes.ADDRESS_URI, webhdfs.addressType); + assertEquals(ProtocolTypes.PROTOCOL_REST, webhdfs.protocolType); + List> addressList = webhdfs.addresses; + List url = addressList.get(0); + String addr = url.get(0); + assertTrue(addr.contains("http")); + assertTrue(addr.contains(":8020")); + + Endpoint nnipc = findEndpoint(record, NNIPC, false, 1,2); + assertEquals("wrong protocol in " + nnipc, ProtocolTypes.PROTOCOL_THRIFT, + nnipc.protocolType); + + Endpoint ipc2 = findEndpoint(record, IPC2, false, 1,2); + + Endpoint web = findEndpoint(record, "web", true, 1, 1); + assertEquals(1, web.addresses.size()); + assertEquals(1, web.addresses.get(0).size()); + + } + + /** + * Assert that an endpoint matches the criteria + * @param endpoint + * @param addressType + * @param protocolType + * @param api + */ + public void assertMatches(Endpoint endpoint, + String addressType, + String protocolType, + String api) { + assertNotNull(endpoint); + assertEquals(addressType, endpoint.addressType); + assertEquals(protocolType, endpoint.protocolType); + assertEquals(api, endpoint.api); + } + + /** + * Assert the records match. Only the ID, registration time, + * description and persistence are checked —not endpoints. + * @param source record that was written + * @param resolved the one that resolved. + */ + public void assertMatches(ServiceRecord source, ServiceRecord resolved) { + assertNotNull("Null source record ", source); + assertNotNull("Null resolved record ", resolved); + assertEquals(source.id, resolved.id); + assertEquals(source.registrationTime, resolved.registrationTime); + assertEquals(source.description, resolved.description); + assertEquals(source.persistence, resolved.persistence); + } + + /** + * Find an endpoint in a record or fail, + * @param record record + * @param api API + * @param external external? + * @param addressElements expected # of address elements? + * @param addressTupleSize expected size of a type + * @return the endpoint. + */ + public Endpoint findEndpoint(ServiceRecord record, + String api, boolean external, int addressElements, int addressTupleSize) { + Endpoint epr = external ? record.getExternalEndpoint(api) + : record.getInternalEndpoint(api); + if (epr != null) { + assertEquals("wrong # of addresses", + addressElements, epr.addresses.size()); + assertEquals("wrong # of elements in an address tuple", + addressTupleSize, epr.addresses.get(0).size()); + return epr; + } + List endpoints = external ? record.external : record.internal; + StringBuilder builder = new StringBuilder(); + for (Endpoint endpoint : endpoints) { + builder.append("\"").append(endpoint).append("\" "); + } + fail("Did not find " + api + " in endpoints " + builder); + // never reached; here to keep the compiler happy + return null; + } + + /** + * Log a record + * @param name record name + * @param record details + * @throws IOException only if something bizarre goes wrong marshalling + * a record. + */ + public void logRecord(String name, ServiceRecord record) throws + IOException { + LOG.info(" {} = \n{}\n", name, recordMarshal.toJson(record)); + } + + + /** + * Create a service entry with the sample endpoints + * @param persistence persistence policy + * @return the record + * @throws IOException on a failure + */ + protected ServiceRecord buildExampleServiceEntry(int persistence) throws + IOException, + URISyntaxException { + ServiceRecord record = new ServiceRecord(); + record.id = "example-0001"; + record.persistence = persistence; + record.registrationTime = System.currentTimeMillis(); + addSampleEndpoints(record, "namenode"); + return record; + } + + /** + * Add some endpoints + * @param entry entry + */ + protected void addSampleEndpoints(ServiceRecord entry, String hostname) throws + URISyntaxException { + entry.addExternalEndpoint(webEndpoint("web", + new URI("http", hostname + ":80", "/"))); + entry.addExternalEndpoint( + restEndpoint(API_WEBHDFS, + new URI("http", hostname + ":8020", "/"))); + + Endpoint endpoint = ipcEndpoint(API_HDFS, + true, null); + endpoint.addresses.add(tuple(hostname, "8030")); + entry.addInternalEndpoint(endpoint); + InetSocketAddress localhost = new InetSocketAddress("localhost", 8050); + entry.addInternalEndpoint( + inetAddrEndpoint(NNIPC, ProtocolTypes.PROTOCOL_THRIFT, "localhost", + 8050)); + entry.addInternalEndpoint( + RegistryTypeUtils.ipcEndpoint( + IPC2, + true, + RegistryTypeUtils.marshall(localhost))); + } + + /** + * Describe the stage in the process with a box around it -so as + * to highlight it in test logs + * @param log log to use + * @param text text + * @param args logger args + */ + public static void describe(Logger log, String text, Object...args) { + log.info("\n======================================="); + log.info(text, args); + log.info("=======================================\n"); + } + + + /** + * log out from a context if non-null ... exceptions are caught and logged + * @param login login context + * @return null, always + */ + public static LoginContext logout(LoginContext login) { + try { + if (login != null) { + login.logout(); + } + } catch (LoginException e) { + LOG.warn("Exception logging out: {}", e, e); + } + return null; + } + + /** + * Exec the native ktutil to list the keys + * (primarily to verify that the generated keytabs are compatible). + * This operation is not executed on windows. On other platforms + * it requires ktutil to be installed and on the path + *
+   *   ktutil --keytab=target/kdc/zookeeper.keytab list --keys
+   * 
+ * @param keytab keytab to list + * @throws IOException on any execution problem, including the executable + * being missing + */ + public static String ktList(File keytab) throws IOException { + if (!Shell.WINDOWS) { + String path = keytab.getAbsolutePath(); + String out = Shell.execCommand( + KTUTIL, + "--keytab=" + path, + "list", + "--keys" + ); + LOG.info("Listing of keytab {}:\n{}\n", path, out); + return out; + } + return ""; + } + + /** + * Login via a UGI. Requres UGI to have been set up + * @param user + * @param keytab + * @return + * @throws IOException + */ + protected UserGroupInformation loginUGI(String user, File keytab) throws + IOException { + return UserGroupInformation.loginUserFromKeytabAndReturnUGI(user, + keytab.getAbsolutePath()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/binding/TestMarshalling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/binding/TestMarshalling.java new file mode 100644 index 0000000..8822685 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/binding/TestMarshalling.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.binding; + +import org.apache.hadoop.yarn.registry.client.exceptions.InvalidRecordException; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecord; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecordHeader; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.rules.Timeout; + +import java.io.EOFException; + +/** + * Test record marshalling + */ +public class TestMarshalling extends Assert { + @Rule + public final Timeout testTimeout = new Timeout(10000); + @Rule + public TestName methodName = new TestName(); + private static RecordOperations.ServiceRecordMarshal marshal; + + @BeforeClass + public static void setupClass() { + marshal = new RecordOperations.ServiceRecordMarshal(); + } + + @Test + public void testRoundTrip() throws Throwable { + ServiceRecord record = new ServiceRecord("01", "description", 0, null); + byte[] bytes = marshal.toBytes(record); + ServiceRecord r2 = marshal.fromBytes("", bytes, 0); + assertEquals(record.id, r2.id); + assertEquals(record.persistence, r2.persistence); + assertEquals(record.description, r2.description); + } + + @Test + public void testRoundTripHeaders() throws Throwable { + ServiceRecord record = new ServiceRecord("01", "description", 1, null); + byte[] bytes = marshal.toByteswithHeader(record); + ServiceRecord r2 = marshal.fromBytesWithHeader("", bytes); + assertEquals(record.id, r2.id); + assertEquals(record.persistence, r2.persistence); + assertEquals(record.description, r2.description); + } + + @Test(expected = InvalidRecordException.class) + public void testRoundTripBadHeaders() throws Throwable { + ServiceRecord record = new ServiceRecord("01", "description", 0, null); + byte[] bytes = marshal.toByteswithHeader(record); + bytes[1] = 0x01; + marshal.fromBytesWithHeader("src", bytes); + } + + @Test(expected = InvalidRecordException.class) + public void testUnmarshallHeaderTooShort() throws Throwable { + marshal.fromBytesWithHeader("src", new byte[]{'a'}); + } + + @Test(expected = EOFException.class) + public void testUnmarshallNoBody() throws Throwable { + byte[] bytes = ServiceRecordHeader.getData(); + marshal.fromBytesWithHeader("src", bytes); + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/binding/TestRegistryPathUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/binding/TestRegistryPathUtils.java new file mode 100644 index 0000000..865118c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/binding/TestRegistryPathUtils.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.binding; + +import static org.apache.hadoop.yarn.registry.client.binding.RegistryPathUtils.*; +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.yarn.registry.client.exceptions.InvalidPathnameException; +import org.junit.Assert; +import org.junit.Test; + +public class TestRegistryPathUtils extends Assert { + + + public static final String EURO = "\u20AC"; + + @Test + public void testFormatAscii() throws Throwable { + + String in = "hostname01101101-1"; + assertConverted(in, in); + } + + /* + * Euro symbol + */ + @Test + public void testFormatEuroSymbol() throws Throwable { + assertConverted("xn--lzg", EURO); + } + + @Test + public void testFormatIdempotent() throws Throwable { + assertConverted("xn--lzg", RegistryPathUtils.encodeForRegistry(EURO)); + } + + @Test + public void testFormatCyrillicSpaced() throws Throwable { + assertConverted("xn--pa 3-k4di", "\u0413PA\u0414 3"); + } + + protected void assertConverted(String expected, String in) { + String out = RegistryPathUtils.encodeForRegistry(in); + assertEquals("Conversion of " + in, expected, out); + } + + + @Test + public void testPaths() throws Throwable { + assertCreatedPathEquals("/", "/", ""); + assertCreatedPathEquals("/", "", ""); + assertCreatedPathEquals("/", "", "/"); + assertCreatedPathEquals("/", "/", "/"); + + assertCreatedPathEquals("/a", "/a", ""); + assertCreatedPathEquals("/a", "/", "a"); + assertCreatedPathEquals("/a/b", "/a", "b"); + assertCreatedPathEquals("/a/b", "/a/", "b"); + assertCreatedPathEquals("/a/b", "/a", "/b"); + assertCreatedPathEquals("/a/b", "/a", "/b/"); + assertCreatedPathEquals("/a", "/a", "/"); + assertCreatedPathEquals("/alice", "/", "/alice"); + assertCreatedPathEquals("/alice", "/alice", "/"); + } + + + + + @Test + public void testComplexPaths() throws Throwable { + assertCreatedPathEquals("/", "", ""); + assertCreatedPathEquals("/yarn/registry/users/hadoop/org-apache-hadoop", + "/yarn/registry", + "users/hadoop/org-apache-hadoop/"); + } + + + private static void assertCreatedPathEquals(String expected, String base, + String path) throws IOException { + String fullPath = createFullPath(base, path); + assertEquals("\"" + base + "\" + \"" + path + "\" =\"" + fullPath + "\"", + expected, fullPath); + } + + @Test + public void testSplittingEmpty() throws Throwable { + assertEquals(0, split("").size()); + assertEquals(0, split("/").size()); + assertEquals(0, split("///").size()); + } + + + @Test + public void testSplitting() throws Throwable { + assertEquals(1, split("/a").size()); + assertEquals(0, split("/").size()); + assertEquals(3, split("/a/b/c").size()); + assertEquals(3, split("/a/b/c/").size()); + assertEquals(3, split("a/b/c").size()); + assertEquals(3, split("/a/b//c").size()); + assertEquals(3, split("//a/b/c/").size()); + List split = split("//a/b/c/"); + assertEquals("a", split.get(0)); + assertEquals("b", split.get(1)); + assertEquals("c", split.get(2)); + } + + @Test + public void testParentOf() throws Throwable { + assertEquals("/", parentOf("/a")); + assertEquals("/", parentOf("/a/")); + assertEquals("/a", parentOf("/a/b")); + assertEquals("/a/b", parentOf("/a/b/c")); + } + + @Test(expected = PathNotFoundException.class) + public void testParentOfRoot() throws Throwable { + parentOf("/"); + } + + @Test + public void testValidPaths() throws Throwable { + assertValidPath("/"); + assertValidPath("/a/b/c"); + assertValidPath("/users/drwho/org-apache-hadoop/registry/appid-55-55"); + assertValidPath("/a50"); + } + + @Test + public void testInvalidPaths() throws Throwable { + assertInvalidPath("/a_b"); + assertInvalidPath("/UpperAndLowerCase"); + assertInvalidPath("/space in string"); +// Is this valid? assertInvalidPath("/50"); + } + + + private void assertValidPath(String path) throws InvalidPathnameException { + validateZKPath(path); + } + + + private void assertInvalidPath(String path) throws InvalidPathnameException { + try { + validateElementsAsDNS(path); + fail("path considered valid: " + path); + } catch (InvalidPathnameException expected) { + // expected + } + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/services/CuratorEventCatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/services/CuratorEventCatcher.java new file mode 100644 index 0000000..d314f95 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/services/CuratorEventCatcher.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.services; + +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.api.BackgroundCallback; +import org.apache.curator.framework.api.CuratorEvent; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * This is a little event catcher for curator asynchronous + * operations. + */ +public class CuratorEventCatcher implements BackgroundCallback { + + private static final Logger LOG = + LoggerFactory.getLogger(CuratorEventCatcher.class); + + public final BlockingQueue + events = new LinkedBlockingQueue(1); + + private final AtomicInteger eventCounter = new AtomicInteger(0); + + + @Override + public void processResult(CuratorFramework client, + CuratorEvent event) throws + Exception { + LOG.info("received {}", event); + eventCounter.incrementAndGet(); + events.put(event); + } + + + public int getCount() { + return eventCounter.get(); + } + + /** + * Blocking operation to take the first event off the queue + * @return the first event on the queue, when it arrives + * @throws InterruptedException if interrupted + */ + public CuratorEvent take() throws InterruptedException { + return events.take(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/services/TestCuratorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/services/TestCuratorService.java new file mode 100644 index 0000000..3464ed1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/services/TestCuratorService.java @@ -0,0 +1,248 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.services; + +import org.apache.curator.framework.api.CuratorEvent; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.service.ServiceOperations; +import org.apache.hadoop.yarn.registry.AbstractZKRegistryTest; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.client.services.zk.CuratorService; +import org.apache.hadoop.yarn.registry.client.services.zk.RegistrySecurity; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.data.ACL; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; + +/** + * Test the curator service + */ +public class TestCuratorService extends AbstractZKRegistryTest { + private static final Logger LOG = + LoggerFactory.getLogger(TestCuratorService.class); + + + protected CuratorService curatorService; + + public static final String MISSING = "/missing"; + private List rootACL; + + @Before + public void startCurator() throws IOException { + createCuratorService(); + } + + @After + public void stopCurator() { + ServiceOperations.stop(curatorService); + } + + /** + * Create an instance + */ + protected void createCuratorService() throws IOException { + curatorService = new CuratorService("curatorService"); + curatorService.init(createRegistryConfiguration()); + curatorService.start(); + rootACL = RegistrySecurity.WorldReadWriteACL; + curatorService.maybeCreate("", CreateMode.PERSISTENT, rootACL, true); + } + + @Test + public void testLs() throws Throwable { + curatorService.zkList("/"); + } + + @Test(expected = PathNotFoundException.class) + public void testLsNotFound() throws Throwable { + List ls = curatorService.zkList(MISSING); + } + + @Test + public void testExists() throws Throwable { + assertTrue(curatorService.zkPathExists("/")); + } + + @Test + public void testExistsMissing() throws Throwable { + assertFalse(curatorService.zkPathExists(MISSING)); + } + + @Test + public void testVerifyExists() throws Throwable { + pathMustExist("/"); + } + + @Test(expected = PathNotFoundException.class) + public void testVerifyExistsMissing() throws Throwable { + pathMustExist("/file-not-found"); + } + + @Test + public void testMkdirs() throws Throwable { + mkPath("/p1", CreateMode.PERSISTENT); + pathMustExist("/p1"); + mkPath("/p1/p2", CreateMode.EPHEMERAL); + pathMustExist("/p1/p2"); + } + + private void mkPath(String path, CreateMode mode) throws IOException { + curatorService.zkMkPath(path, mode, false, + RegistrySecurity.WorldReadWriteACL); + } + + public void pathMustExist(String path) throws IOException { + curatorService.zkPathMustExist(path); + } + + @Test(expected = PathNotFoundException.class) + public void testMkdirChild() throws Throwable { + mkPath("/testMkdirChild/child", CreateMode.PERSISTENT); + } + + @Test + public void testMaybeCreate() throws Throwable { + assertTrue(curatorService.maybeCreate("/p3", CreateMode.PERSISTENT)); + assertFalse(curatorService.maybeCreate("/p3", CreateMode.PERSISTENT)); + } + + @Test + public void testRM() throws Throwable { + mkPath("/rm", CreateMode.PERSISTENT); + curatorService.zkDelete("/rm", false, null); + verifyNotExists("/rm"); + curatorService.zkDelete("/rm", false, null); + } + + @Test + public void testRMNonRf() throws Throwable { + mkPath("/rm", CreateMode.PERSISTENT); + mkPath("/rm/child", CreateMode.PERSISTENT); + try { + curatorService.zkDelete("/rm", false, null); + fail("expected a failure"); + } catch (PathIsNotEmptyDirectoryException expected) { + + } + } + + @Test + public void testRMRf() throws Throwable { + mkPath("/rm", CreateMode.PERSISTENT); + mkPath("/rm/child", CreateMode.PERSISTENT); + curatorService.zkDelete("/rm", true, null); + verifyNotExists("/rm"); + curatorService.zkDelete("/rm", true, null); + } + + + @Test + public void testBackgroundDelete() throws Throwable { + mkPath("/rm", CreateMode.PERSISTENT); + mkPath("/rm/child", CreateMode.PERSISTENT); + CuratorEventCatcher events = new CuratorEventCatcher(); + curatorService.zkDelete("/rm", true, events); + CuratorEvent taken = events.take(); + LOG.info("took {}", taken); + assertEquals(1, events.getCount()); + } + + @Test + public void testCreate() throws Throwable { + + curatorService.zkCreate("/testcreate", + CreateMode.PERSISTENT, getTestBuffer(), + rootACL + ); + pathMustExist("/testcreate"); + } + + @Test + public void testCreateTwice() throws Throwable { + byte[] buffer = getTestBuffer(); + curatorService.zkCreate("/testcreatetwice", + CreateMode.PERSISTENT, buffer, + rootACL); + try { + curatorService.zkCreate("/testcreatetwice", + CreateMode.PERSISTENT, buffer, + rootACL); + fail(); + } catch (FileAlreadyExistsException e) { + + } + } + + @Test + public void testCreateUpdate() throws Throwable { + byte[] buffer = getTestBuffer(); + curatorService.zkCreate("/testcreateupdate", + CreateMode.PERSISTENT, buffer, + rootACL + ); + curatorService.zkUpdate("/testcreateupdate", buffer); + } + + @Test(expected = PathNotFoundException.class) + public void testUpdateMissing() throws Throwable { + curatorService.zkUpdate("/testupdatemissing", getTestBuffer()); + } + + @Test + public void testUpdateDirectory() throws Throwable { + mkPath("/testupdatedirectory", CreateMode.PERSISTENT); + curatorService.zkUpdate("/testupdatedirectory", getTestBuffer()); + } + + @Test + public void testUpdateDirectorywithChild() throws Throwable { + mkPath("/testupdatedirectorywithchild", CreateMode.PERSISTENT); + mkPath("/testupdatedirectorywithchild/child", CreateMode.PERSISTENT); + curatorService.zkUpdate("/testupdatedirectorywithchild", getTestBuffer()); + } + + @Test + public void testUseZKServiceForBinding() throws Throwable { + CuratorService cs2 = new CuratorService("curator", zookeeper); + cs2.init(new Configuration()); + cs2.start(); + } + + protected byte[] getTestBuffer() { + byte[] buffer = new byte[1]; + buffer[0] = '0'; + return buffer; + } + + + public void verifyNotExists(String path) throws IOException { + if (curatorService.zkPathExists(path)) { + fail("Path should not exist: " + path); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/services/TestMicroZookeeperService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/services/TestMicroZookeeperService.java new file mode 100644 index 0000000..aba3107 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/client/services/TestMicroZookeeperService.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.client.services; + +import org.apache.hadoop.service.ServiceOperations; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.registry.server.services.MicroZookeeperService; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.rules.Timeout; + +import java.io.IOException; + +/** + * Simple tests to look at the micro ZK service itself + */ +public class TestMicroZookeeperService extends Assert { + + private MicroZookeeperService zookeeper; + + @Rule + public final Timeout testTimeout = new Timeout(10000); + @Rule + public TestName methodName = new TestName(); + + @After + public void destroyZKServer() throws IOException { + + ServiceOperations.stop(zookeeper); + } + + @Test + public void testTempDirSupport() throws Throwable { + YarnConfiguration conf = new YarnConfiguration(); + zookeeper = new MicroZookeeperService("t1"); + zookeeper.init(conf); + zookeeper.start(); + zookeeper.stop(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/operations/TestRegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/operations/TestRegistryOperations.java new file mode 100644 index 0000000..ffee31f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/operations/TestRegistryOperations.java @@ -0,0 +1,456 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.operations; + +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.yarn.registry.AbstractRegistryTest; + +import static org.apache.hadoop.yarn.registry.client.binding.RegistryTypeUtils.*; + +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.client.binding.RecordOperations; +import org.apache.hadoop.yarn.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.yarn.registry.client.binding.ZKPathDumper; +import org.apache.hadoop.yarn.registry.client.exceptions.InvalidRecordException; +import org.apache.hadoop.yarn.registry.client.api.CreateFlags; +import org.apache.hadoop.yarn.registry.client.services.CuratorEventCatcher; +import org.apache.hadoop.yarn.registry.client.types.PersistencePolicies; +import org.apache.hadoop.yarn.registry.client.types.RegistryPathStatus; +import org.apache.hadoop.yarn.registry.client.types.ServiceRecord; +import org.apache.hadoop.yarn.registry.server.services.RMRegistryOperationsService; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.URI; +import java.util.Map; +import java.util.concurrent.Future; + +public class TestRegistryOperations extends AbstractRegistryTest { + protected static final Logger LOG = + LoggerFactory.getLogger(TestRegistryOperations.class); + + @Test + public void testPutGetServiceEntry() throws Throwable { + ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0, + PersistencePolicies.APPLICATION); + ServiceRecord resolved = operations.resolve(ENTRY_PATH); + validateEntry(resolved); + assertMatches(written, resolved); + } + + @Test + public void testDeleteServiceEntry() throws Throwable { + putExampleServiceEntry(ENTRY_PATH, 0); + operations.delete(ENTRY_PATH, false); + } + + @Test + public void testDeleteNonexistentEntry() throws Throwable { + operations.delete(ENTRY_PATH, false); + operations.delete(ENTRY_PATH, true); + } + + @Test + public void testStat() throws Throwable { + ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0); + RegistryPathStatus stat = operations.stat(ENTRY_PATH); + assertTrue(stat.size > 0); + assertTrue(stat.time > 0); + assertEquals(ENTRY_PATH, stat.path); + } + + @Test + public void testLsParent() throws Throwable { + ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0); + RegistryPathStatus stat = operations.stat(ENTRY_PATH); + + RegistryPathStatus[] statuses = + operations.list(PARENT_PATH); + assertEquals(1, statuses.length); + assertEquals(stat, statuses[0]); + + Map records = + RecordOperations.extractServiceRecords(operations, statuses); + assertEquals(1, records.size()); + ServiceRecord record = records.get(ENTRY_PATH); + assertMatches(written, record); + } + + @Test + public void testDeleteNonEmpty() throws Throwable { + ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0); + RegistryPathStatus stat = operations.stat(ENTRY_PATH); + try { + operations.delete(PARENT_PATH, false); + fail("Expected a failure"); + } catch (PathIsNotEmptyDirectoryException e) { + + } + operations.delete(PARENT_PATH, true); + + } + + @Test(expected = PathNotFoundException.class) + public void testStatEmptyPath() throws Throwable { + RegistryPathStatus stat = operations.stat(ENTRY_PATH); + } + + @Test(expected = PathNotFoundException.class) + public void testLsEmptyPath() throws Throwable { + RegistryPathStatus[] statuses = + operations.list(PARENT_PATH); + } + + @Test(expected = PathNotFoundException.class) + public void testResolveEmptyPath() throws Throwable { + operations.resolve(ENTRY_PATH); + } + + @Test + public void testMkdirNoParent() throws Throwable { + String path = ENTRY_PATH + "/missing"; + try { + operations.mknode(path, false); + RegistryPathStatus stat = operations.stat(path); + fail("Got a status " + stat); + } catch (PathNotFoundException expected) { + + } + } + + @Test + public void testDoubleMkdir() throws Throwable { + operations.mknode(USERPATH, false); + String path = USERPATH + "newentry"; + assertTrue(operations.mknode(path, false)); + RegistryPathStatus stat = operations.stat(path); + assertFalse(operations.mknode(path, false)); + } + + @Test + public void testPutNoParent() throws Throwable { + ServiceRecord record = new ServiceRecord(); + record.id = "testPutNoParent"; + String path = "/path/without/parent"; + try { + operations.create(path, record, 0); + // didn't get a failure + // trouble + RegistryPathStatus stat = operations.stat(path); + fail("Got a status " + stat); + } catch (PathNotFoundException expected) { + } + } + + @Test(expected = PathNotFoundException.class) + public void testPutNoParent2() throws Throwable { + ServiceRecord record = new ServiceRecord(); + record.id = "testPutNoParent"; + String path = "/path/without/parent"; + operations.create(path, record, 0); + } + + @Test + public void testStatDirectory() throws Throwable { + String empty = "/empty"; + operations.mknode(empty, false); + RegistryPathStatus stat = operations.stat(empty); + } + + @Test + public void testStatRootPath() throws Throwable { + operations.mknode("/", false); + RegistryPathStatus stat = operations.stat("/"); + } + + @Test + public void testStatOneLevelDown() throws Throwable { + operations.mknode("/subdir", true); + RegistryPathStatus stat = operations.stat("/subdir"); + } + + + @Test + public void testLsRootPath() throws Throwable { + String empty = "/"; + operations.mknode(empty, false); + RegistryPathStatus stat = operations.stat(empty); + } + + + @Test + public void testResolvePathThatHasNoEntry() throws Throwable { + String empty = "/empty2"; + operations.mknode(empty, false); + try { + ServiceRecord record = operations.resolve(empty); + fail("expected an exception"); + } catch (InvalidRecordException expected) { + + } + } + + + @Test + public void testOverwrite() throws Throwable { + ServiceRecord written = putExampleServiceEntry(ENTRY_PATH, 0); + ServiceRecord resolved1 = operations.resolve(ENTRY_PATH); + resolved1.description = "resolved1"; + try { + operations.create(ENTRY_PATH, resolved1, 0); + fail("overwrite succeeded when it should have failed"); + } catch (FileAlreadyExistsException expected) { + + } + + // verify there's no changed + ServiceRecord resolved2 = operations.resolve(ENTRY_PATH); + assertMatches(written, resolved2); + operations.create(ENTRY_PATH, resolved1, CreateFlags.OVERWRITE); + ServiceRecord resolved3 = operations.resolve(ENTRY_PATH); + assertMatches(resolved1, resolved3); + } + + /** + * Create a complex example app + * @throws Throwable + */ + @Test + public void testCreateComplexApplication() throws Throwable { + String appId = "application_1408631738011_0001"; + String cid = "container_1408631738011_0001_01_"; + String cid1 = cid +"000001"; + String cid2 = cid +"000002"; + String appPath = USERPATH + "tomcat"; + + ServiceRecord webapp = new ServiceRecord(appId, + "tomcat-based web application", + PersistencePolicies.APPLICATION, null); + webapp.addExternalEndpoint(restEndpoint("www", + new URI("http","//loadbalancer/", null))); + + ServiceRecord comp1 = new ServiceRecord(cid1, null, + PersistencePolicies.CONTAINER, null); + comp1.addExternalEndpoint(restEndpoint("www", + new URI("http", "//rack4server3:43572", null))); + comp1.addInternalEndpoint( + inetAddrEndpoint("jmx", "JMX", "rack4server3", 43573)); + + // Component 2 has a container lifespan + ServiceRecord comp2 = new ServiceRecord(cid2, null, + PersistencePolicies.CONTAINER, null); + comp2.addExternalEndpoint(restEndpoint("www", + new URI("http", "//rack1server28:35881",null))); + comp2.addInternalEndpoint( + inetAddrEndpoint("jmx", "JMX", "rack1server28", 35882)); + + operations.mknode(USERPATH, false); + operations.create(appPath, webapp, CreateFlags.OVERWRITE); + String components = appPath + RegistryConstants.SUBPATH_COMPONENTS + "/"; + operations.mknode(components, false); + String dns1 = RegistryPathUtils.encodeYarnID(cid1); + String dns1path = components + dns1; + operations.create(dns1path, comp1, CreateFlags.CREATE); + String dns2 = RegistryPathUtils.encodeYarnID(cid2); + String dns2path = components + dns2; + operations.create(dns2path, comp2, CreateFlags.CREATE ); + + ZKPathDumper pathDumper = registry.dumpPath(); + LOG.info(pathDumper.toString()); + + logRecord("tomcat", webapp); + logRecord(dns1, comp1); + logRecord(dns2, comp2); + + ServiceRecord dns1resolved = operations.resolve(dns1path); + assertEquals("Persistence policies on resolved entry", + PersistencePolicies.CONTAINER, dns1resolved.persistence); + + + RegistryPathStatus[] componentStats = operations.list(components); + assertEquals(2, componentStats.length); + Map records = + RecordOperations.extractServiceRecords(operations, componentStats); + assertEquals(2, records.size()); + ServiceRecord retrieved1 = records.get(dns1path); + logRecord(retrieved1.id, retrieved1); + assertMatches(dns1resolved, retrieved1); + assertEquals(PersistencePolicies.CONTAINER, retrieved1.persistence); + + // create a listing under components/ + operations.mknode(components + "subdir", false); + RegistryPathStatus[] componentStatsUpdated = operations.list(components); + assertEquals(3, componentStatsUpdated.length); + Map recordsUpdated = + RecordOperations.extractServiceRecords(operations, componentStats); + assertEquals(2, recordsUpdated.size()); + + + + // now do some deletions. + + // synchronous delete container ID 2 + + // fail if the app policy is chosen + assertEquals(0, registry.purgeRecords("/", cid2, + PersistencePolicies.APPLICATION, + RMRegistryOperationsService.PurgePolicy.FailOnChildren, + null)); + // succeed for container + assertEquals(1, registry.purgeRecords("/", cid2, + PersistencePolicies.CONTAINER, + RMRegistryOperationsService.PurgePolicy.FailOnChildren, + null)); + assertPathNotFound(dns2path); + assertPathExists(dns1path); + + // attempt to delete root with policy of fail on children + try { + registry.purgeRecords("/", + appId, + PersistencePolicies.APPLICATION, + RMRegistryOperationsService.PurgePolicy.FailOnChildren, null); + fail("expected a failure"); + } catch (PathIsNotEmptyDirectoryException expected) { + // expected + } + assertPathExists(appPath); + assertPathExists(dns1path); + + // downgrade to a skip on children + assertEquals(0, + registry.purgeRecords("/", appId, + PersistencePolicies.APPLICATION, + RMRegistryOperationsService.PurgePolicy.SkipOnChildren, + null)); + assertPathExists(appPath); + assertPathExists(dns1path); + + // now trigger recursive delete + assertEquals(1, + registry.purgeRecords("/", + appId, + PersistencePolicies.APPLICATION, + RMRegistryOperationsService.PurgePolicy.PurgeAll, + null)); + assertPathNotFound(appPath); + assertPathNotFound(dns1path); + + } + + + @Test + public void testPurgeEntryCuratorCallback() throws Throwable { + + String path = "/users/example/hbase/hbase1/"; + ServiceRecord written = buildExampleServiceEntry( + PersistencePolicies.APPLICATION_ATTEMPT); + written.id = "testAsyncPurgeEntry_attempt_001"; + + operations.mknode(RegistryPathUtils.parentOf(path), true); + operations.create(path, written, 0); + + ZKPathDumper dump = registry.dumpPath(); + CuratorEventCatcher events = new CuratorEventCatcher(); + + LOG.info("Initial state {}", dump); + + // container query + int opcount = registry.purgeRecords("/", + written.id, + PersistencePolicies.CONTAINER, + RMRegistryOperationsService.PurgePolicy.PurgeAll, + events); + assertPathExists(path); + assertEquals(0, opcount); + assertEquals("Event counter", 0, events.getCount()); + + + // now the application attempt + opcount = registry.purgeRecords("/", + written.id, + -1, + RMRegistryOperationsService.PurgePolicy.PurgeAll, + events); + + LOG.info("Final state {}", dump); + + assertPathNotFound(path); + assertEquals("wrong no of delete operations in " + dump, 1, opcount); + // and validate the callback event + assertEquals("Event counter", 1, events.getCount()); + + } + + @Test + public void testAsyncPurgeEntry() throws Throwable { + + String path = "/users/example/hbase/hbase1/"; + ServiceRecord written = buildExampleServiceEntry( + PersistencePolicies.APPLICATION_ATTEMPT); + written.id = "testAsyncPurgeEntry_attempt_001"; + + operations.mknode(RegistryPathUtils.parentOf(path), true); + operations.create(path, written, 0); + + ZKPathDumper dump = registry.dumpPath(); + + LOG.info("Initial state {}", dump); + + Future future = registry.purgeRecordsAsync("/", + written.id, + PersistencePolicies.CONTAINER); + + int opcount = future.get(); + assertPathExists(path); + assertEquals(0, opcount); + + + // now all matching entries + future = registry.purgeRecordsAsync("/", + written.id, + -1); + opcount = future.get(); + LOG.info("Final state {}", dump); + + assertPathNotFound(path); + assertEquals("wrong no of delete operations in " + dump, 1, opcount); + // and validate the callback event + + } + + + @Test + public void testPutGetContainerPersistenceServiceEntry() throws Throwable { + + String path = ENTRY_PATH; + ServiceRecord written = buildExampleServiceEntry( + PersistencePolicies.CONTAINER); + + operations.mknode(RegistryPathUtils.parentOf(path), true); + operations.create(path, written, CreateFlags.CREATE); + ServiceRecord resolved = operations.resolve(path); + validateEntry(resolved); + assertMatches(written, resolved); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/AbstractSecureRegistryTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/AbstractSecureRegistryTest.java new file mode 100644 index 0000000..8f4de3f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/AbstractSecureRegistryTest.java @@ -0,0 +1,328 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.secure; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.Service; +import org.apache.hadoop.service.ServiceOperations; +import org.apache.hadoop.yarn.registry.RegistryTestHelper; +import org.apache.hadoop.yarn.registry.client.services.zk.RegistrySecurity; +import org.apache.hadoop.yarn.registry.client.services.zk.ZookeeperConfigOptions; +import org.apache.hadoop.yarn.registry.server.services.AddingCompositeService; +import org.apache.hadoop.yarn.registry.server.services.MicroZookeeperService; +import org.apache.hadoop.yarn.registry.server.services.MicroZookeeperServiceKeys; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestName; +import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.Subject; +import javax.security.auth.kerberos.KerberosPrincipal; +import javax.security.auth.login.LoginContext; +import javax.security.auth.login.LoginException; +import java.io.File; +import java.io.IOException; +import java.security.Principal; +import java.util.HashSet; +import java.util.Properties; +import java.util.Set; + +/** + * Add kerberos tests. This is based on the (JUnit3) KerberosSecurityTestcase + * and its test case, TestMiniKdc + */ +public class AbstractSecureRegistryTest extends RegistryTestHelper { + public static final String REALM = "EXAMPLE.COM"; + public static final String ZOOKEEPER = "zookeeper"; + public static final String ZOOKEEPER_LOCALHOST = "zookeeper/localhost"; + public static final String ZOOKEEPER_LOCALHOST_REALM = + ZOOKEEPER_LOCALHOST+ "@" + REALM; + public static final String ALICE = "alice"; + public static final String ALICE_LOCALHOST = "alice/localhost"; + public static final String BOB = "bob"; + public static final String BOB_LOCALHOST = "bob/localhost"; + public static final String SASL_AUTH_PROVIDER = + "org.apache.hadoop.yarn.registry.secure.ExtendedSASLAuthenticationProvider"; + private static final Logger LOG = + LoggerFactory.getLogger(AbstractSecureRegistryTest.class); + + public static final Configuration CONF; + + static { + CONF = new Configuration(); + CONF.set("hadoop.security.authentication", "kerberos"); + CONF.setBoolean("hadoop.security.authorization", true); + } + + private static final AddingCompositeService classTeardown = + new AddingCompositeService("classTeardown"); + + // static initializer guarantees it is always started + // ahead of any @BeforeClass methods + static { + classTeardown.init(CONF); + classTeardown.start(); + } + + public static final String SUN_SECURITY_KRB5_DEBUG = + "sun.security.krb5.debug"; + + + private final AddingCompositeService teardown = + new AddingCompositeService("teardown"); + + protected static MiniKdc kdc; + protected static File keytab_zk; + protected static File keytab_bob; + protected static File keytab_alice; + protected static File kdcWorkDir; + protected static Properties kdcConf; + protected static RegistrySecurity registrySecurity; + + + + @Rule + public final Timeout testTimeout = new Timeout(900000); + + @Rule + public TestName methodName = new TestName(); + protected MicroZookeeperService secureZK; + protected static File jaasFile; + private LoginContext zookeeperLogin; + + /** + * All class initialization for this test class + * @throws Exception + */ + @BeforeClass + public static void beforeSecureRegistryTestClass() throws Exception { + registrySecurity = new RegistrySecurity(CONF, ""); + setupKDCAndPrincipals(); + } + + @AfterClass + public static void afterSecureRegistryTestClass() throws + Exception { + classTeardown.close(); + teardownKDC(); + } + + /** + * give our thread a name + */ + @Before + public void nameThread() { + Thread.currentThread().setName("JUnit"); + } + + /** + * For unknown reasons, the before-class setting of the JVM properties were + * not being picked up. This method addresses that by setting them + * before every test case + */ + @Before + public void beforeSecureRegistryTest() { + + resetJaasConfKeys(); + RegistrySecurity.bindJVMtoJAASFile(jaasFile); + initHadoopSecurity(); + } + + @After + public void afterSecureRegistryTest() throws IOException { + teardown.close(); + stopSecureZK(); + } + + protected static void addToClassTeardown(Service svc) { + classTeardown.addService(svc); + } + + protected void addToTeardown(Service svc) { + teardown.addService(svc); + } + + + public static void teardownKDC() throws Exception { + if (kdc != null) { + kdc.stop(); + kdc = null; + } + } + + + /** + * Sets up the KDC and a set of principals in the JAAS file + * + * @throws Exception + */ + public static void setupKDCAndPrincipals() throws Exception { + // set up the KDC + File target = new File(System.getProperty("test.dir", "target")); + kdcWorkDir = new File(target, "kdc"); + kdcWorkDir.mkdirs(); + kdcConf = MiniKdc.createConf(); + kdcConf.setProperty(MiniKdc.DEBUG, "true"); + kdc = new MiniKdc(kdcConf, kdcWorkDir); + kdc.start(); + + keytab_zk = createPrincipalAndKeytab(ZOOKEEPER_LOCALHOST, "zookeeper.keytab"); + keytab_alice = createPrincipalAndKeytab(ALICE_LOCALHOST, "alice.keytab"); + keytab_bob = createPrincipalAndKeytab(BOB_LOCALHOST , "bob.keytab"); + + StringBuilder jaas = new StringBuilder(1024); + jaas.append(registrySecurity.createJAASEntry(ZOOKEEPER, + ZOOKEEPER_LOCALHOST, keytab_zk)); + jaas.append(registrySecurity.createJAASEntry(ALICE, + ALICE_LOCALHOST , keytab_alice)); + jaas.append(registrySecurity.createJAASEntry(BOB, + BOB_LOCALHOST, keytab_bob)); + + jaasFile = new File(kdcWorkDir, "jaas.txt"); + FileUtils.write(jaasFile, jaas.toString()); + RegistrySecurity.bindJVMtoJAASFile(jaasFile); + LOG.info(jaas.toString()); + } + + + + public void resetJaasConfKeys() { + RegistrySecurity.clearJaasSystemProperties(); + } + + + public void initHadoopSecurity() { + UserGroupInformation.setConfiguration(CONF); + } + + /** + * Stop the secure ZK and log out the ZK account + */ + public synchronized void stopSecureZK() { + ServiceOperations.stop(secureZK); + secureZK = null; + logout(zookeeperLogin); + zookeeperLogin = null; + } + + + public static MiniKdc getKdc() { + return kdc; + } + + public static File getKdcWorkDir() { + return kdcWorkDir; + } + + public static Properties getKdcConf() { + return kdcConf; + } + + /** + * Create a secure instance + * @param name + * @return + * @throws Exception + */ + protected static MicroZookeeperService createSecureZKInstance(String name) + throws Exception { + String context = ZOOKEEPER; + Configuration conf = new Configuration(); + + File testdir = new File(System.getProperty("test.dir", "target")); + File workDir = new File(testdir, name); + workDir.mkdirs(); + System.setProperty( + ZookeeperConfigOptions.PROP_ZK_MAINTAIN_CONNECTION_DESPITE_SASL_FAILURE, + "false"); + RegistrySecurity.validateContext(context); + conf.set(MicroZookeeperServiceKeys.KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT, context); + MicroZookeeperService secureZK = new MicroZookeeperService(name); + secureZK.init(conf); + LOG.info(secureZK.getDiagnostics()); + return secureZK; + } + + public static File createPrincipalAndKeytab(String principal, + String filename) throws Exception { + assertNotEmpty("empty principal", principal); + assertNotEmpty("empty host", filename); + assertNotNull("Null KDC", kdc); + File keytab = new File(kdcWorkDir, filename); + kdc.createPrincipal(keytab, principal); + return keytab; + } + + public static String getPrincipalAndRealm(String principal) { + return principal + "@" + getRealm(); + } + + protected static String getRealm() { + return kdc.getRealm(); + } + + + /** + * Log in, defaulting to the client context + * @param principal principal + * @param context context + * @param keytab keytab + * @return the logged in context + * @throws LoginException failure to log in + */ + protected LoginContext login(String principal, + String context, File keytab) throws LoginException { + LOG.info("Logging in as {} in context {} with keytab {}", + principal, context, keytab); + Set principals = new HashSet(); + principals.add(new KerberosPrincipal(principal)); + Subject subject = new Subject(false, principals, new HashSet(), + new HashSet()); + LoginContext login; + login = new LoginContext(context, subject, null, + KerberosConfiguration.createClientConfig(principal, keytab)); + login.login(); + return login; + } + + + /** + * Start the secure ZK instance using the test method name as the path. + * As the entry is saved to the {@link #secureZK} field, it + * is automatically stopped after the test case. + * @throws Exception on any failure + */ + protected synchronized void startSecureZK() throws Exception { + assertNull("Zookeeper is already running", secureZK); + + zookeeperLogin = login(ZOOKEEPER_LOCALHOST, ZOOKEEPER, keytab_zk); + secureZK = createSecureZKInstance("test-" + methodName.getMethodName()); + secureZK.start(); + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/ExtendedSASLAuthenticationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/ExtendedSASLAuthenticationProvider.java new file mode 100644 index 0000000..00ea8cc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/ExtendedSASLAuthenticationProvider.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.secure; + +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.server.ServerCnxn; +import org.apache.zookeeper.server.auth.SASLAuthenticationProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ExtendedSASLAuthenticationProvider extends + SASLAuthenticationProvider { + private static final Logger LOG = + LoggerFactory.getLogger(ExtendedSASLAuthenticationProvider.class); + + @Override + public KeeperException.Code handleAuthentication(ServerCnxn cnxn, + byte[] authData) { + LOG.warn("Received auth request with {} bytes of data ", + authData!=null? authData.length: -1); + return KeeperException.Code.OK; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/KerberosConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/KerberosConfiguration.java new file mode 100644 index 0000000..bbfa902 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/KerberosConfiguration.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.secure; + +import org.apache.hadoop.security.authentication.util.KerberosUtil; + +import javax.security.auth.login.AppConfigurationEntry; +import java.io.File; +import java.util.HashMap; +import java.util.Map; + +class KerberosConfiguration extends javax.security.auth.login.Configuration { + private String principal; + private String keytab; + private boolean isInitiator; + + KerberosConfiguration(String principal, File keytab, + boolean client) { + this.principal = principal; + this.keytab = keytab.getAbsolutePath(); + this.isInitiator = client; + } + + public static javax.security.auth.login.Configuration createClientConfig( + String principal, + File keytab) { + return new KerberosConfiguration(principal, keytab, true); + } + + public static javax.security.auth.login.Configuration createServerConfig( + String principal, + File keytab) { + return new KerberosConfiguration(principal, keytab, false); + } + + @Override + public AppConfigurationEntry[] getAppConfigurationEntry(String name) { + Map options = new HashMap(); + options.put("keyTab", keytab); + options.put("principal", principal); + options.put("useKeyTab", "true"); + options.put("storeKey", "true"); + options.put("doNotPrompt", "true"); + options.put("useTicketCache", "true"); + options.put("renewTGT", "true"); + options.put("refreshKrb5Config", "true"); + options.put("isInitiator", Boolean.toString(isInitiator)); + String ticketCache = System.getenv("KRB5CCNAME"); + if (ticketCache != null) { + options.put("ticketCache", ticketCache); + } + options.put("debug", "true"); + + return new AppConfigurationEntry[]{ + new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(), + AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, + options) + }; + } + + @Override + public String toString() { + return "KerberosConfiguration with principal " + principal; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestRegistrySecurityHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestRegistrySecurityHelper.java new file mode 100644 index 0000000..660614d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestRegistrySecurityHelper.java @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.secure; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.client.services.zk.RegistrySecurity; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.data.ACL; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.yarn.registry.client.api.RegistryConstants.*; + +/** + * Test for registry security operations + */ +public class TestRegistrySecurityHelper extends Assert { + private static final Logger LOG = + LoggerFactory.getLogger(TestRegistrySecurityHelper.class); + + public static final String YARN_EXAMPLE_COM = "yarn@example.com"; + public static final String SASL_YARN_EXAMPLE_COM = + "sasl:" + YARN_EXAMPLE_COM; + public static final String MAPRED_EXAMPLE_COM = "mapred@example.com"; + public static final String SASL_MAPRED_EXAMPLE_COM = + "sasl:" + MAPRED_EXAMPLE_COM; + public static final String SASL_MAPRED_APACHE = "sasl:mapred@APACHE"; + public static final String DIGEST_F0AF = "digest:f0afbeeb00baa"; + public static final String SASL_YARN_SHORT = "sasl:yarn@"; + public static final String SASL_MAPRED_SHORT = "sasl:mapred@"; + public static final String REALM_EXAMPLE_COM = "example.com"; + private static RegistrySecurity registrySecurity; + + @BeforeClass + public static void setupTestRegistrySecurityHelper() throws IOException { + Configuration conf = new Configuration(); + conf.setBoolean(KEY_REGISTRY_SECURE, true); + conf.set(KEY_REGISTRY_KERBEROS_REALM, "KERBEROS"); + registrySecurity = new RegistrySecurity(conf); + // init the ACLs OUTSIDE A KERBEROS CLUSTER + registrySecurity.initACLs(); + } + + @Test + public void testACLSplitRealmed() throws Throwable { + List pairs = + registrySecurity.splitAclPairs( + SASL_YARN_EXAMPLE_COM + + ", " + + SASL_MAPRED_EXAMPLE_COM, + ""); + + assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0)); + assertEquals(SASL_MAPRED_EXAMPLE_COM, pairs.get(1)); + } + + + @Test + public void testBuildAclsRealmed() throws Throwable { + List acls = registrySecurity.buildACLs( + SASL_YARN_EXAMPLE_COM + + ", " + + SASL_MAPRED_EXAMPLE_COM, + "", + ZooDefs.Perms.ALL); + assertEquals(YARN_EXAMPLE_COM, acls.get(0).getId().getId()); + assertEquals(MAPRED_EXAMPLE_COM, acls.get(1).getId().getId()); + } + + @Test + public void testACLDefaultRealm() throws Throwable { + List pairs = + registrySecurity.splitAclPairs( + SASL_YARN_SHORT + + ", " + + SASL_MAPRED_SHORT, + REALM_EXAMPLE_COM); + + assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0)); + assertEquals(SASL_MAPRED_EXAMPLE_COM, pairs.get(1)); + } + + @Test + public void testBuildAclsDefaultRealm() throws Throwable { + List acls = registrySecurity.buildACLs( + SASL_YARN_SHORT + + ", " + + SASL_MAPRED_SHORT, + REALM_EXAMPLE_COM, ZooDefs.Perms.ALL); + + assertEquals(YARN_EXAMPLE_COM, acls.get(0).getId().getId()); + assertEquals(MAPRED_EXAMPLE_COM, acls.get(1).getId().getId()); + } + + @Test + public void testACLSplitNullRealm() throws Throwable { + List pairs = + registrySecurity.splitAclPairs( + SASL_YARN_SHORT + + ", " + + SASL_MAPRED_SHORT, + ""); + + assertEquals(SASL_YARN_SHORT, pairs.get(0)); + assertEquals(SASL_MAPRED_SHORT, pairs.get(1)); + } + + @Test(expected = IllegalArgumentException.class) + public void testBuildAclsNullRealm() throws Throwable { + registrySecurity.buildACLs( + SASL_YARN_SHORT + + ", " + + SASL_MAPRED_SHORT, + "", ZooDefs.Perms.ALL); + fail(""); + + } + + @Test + public void testACLDefaultRealmOnlySASL() throws Throwable { + List pairs = + registrySecurity.splitAclPairs( + SASL_YARN_SHORT + + ", " + + DIGEST_F0AF, + REALM_EXAMPLE_COM); + + assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0)); + assertEquals(DIGEST_F0AF, pairs.get(1)); + } + + @Test + public void testACLSplitMixed() throws Throwable { + List pairs = + registrySecurity.splitAclPairs( + SASL_YARN_SHORT + + ", " + + SASL_MAPRED_APACHE + + ", ,," + + DIGEST_F0AF, + REALM_EXAMPLE_COM); + + assertEquals(SASL_YARN_EXAMPLE_COM, pairs.get(0)); + assertEquals(SASL_MAPRED_APACHE, pairs.get(1)); + assertEquals(DIGEST_F0AF, pairs.get(2)); + } + + @Test + public void testDefaultAClsValid() throws Throwable { + registrySecurity.buildACLs( + RegistryConstants.DEFAULT_REGISTRY_SYSTEM_ACLS, + REALM_EXAMPLE_COM, ZooDefs.Perms.ALL); + } + + @Test + public void testDefaultRealm() throws Throwable { + String realm = RegistrySecurity.getDefaultRealmInJVM(); + LOG.info("Realm {}", realm); + } + + @Test + public void testUGIProperties() throws Throwable { + UserGroupInformation user = UserGroupInformation.getCurrentUser(); + ACL acl = registrySecurity.createACLForUser(user, ZooDefs.Perms.ALL); + assertFalse(RegistrySecurity.ALL_READWRITE_ACCESS.equals(acl)); + LOG.info("User {} has ACL {}", user, acl); + } + + + @Test + public void testSecurityImpliesKerberos() throws Throwable { + Configuration conf = new Configuration(); + conf.getBoolean("hadoop.security.authentication", true); + conf.setBoolean(KEY_REGISTRY_SECURE, true); + conf.set(KEY_REGISTRY_KERBEROS_REALM, "KERBEROS"); + RegistrySecurity security = new RegistrySecurity(conf); + try { + security.initSecurity(); + } catch (IOException e) { + assertTrue( + "did not find "+ RegistrySecurity.E_NO_KERBEROS + " in " + e, + e.toString().contains(RegistrySecurity.E_NO_KERBEROS)); + } + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestSecureLogins.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestSecureLogins.java new file mode 100644 index 0000000..e95e56b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestSecureLogins.java @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.secure; + + + +import com.sun.security.auth.module.Krb5LoginModule; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.registry.client.services.zk.RegistrySecurity; +import org.apache.zookeeper.Environment; +import org.apache.zookeeper.data.ACL; +import org.junit.Assume; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.Subject; +import javax.security.auth.kerberos.KerberosPrincipal; +import javax.security.auth.login.LoginContext; +import java.io.File; +import java.io.IOException; +import java.security.Principal; +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +/** + * Verify that logins work + */ +public class TestSecureLogins extends AbstractSecureRegistryTest { + private static final Logger LOG = + LoggerFactory.getLogger(TestSecureLogins.class); + + @Test + public void testZKinKeytab() throws Throwable { + Assume.assumeTrue(!Shell.WINDOWS); + try { + String listing = ktList(keytab_zk); + assertTrue("no " + ZOOKEEPER_LOCALHOST + " in " + listing, + listing.contains(ZOOKEEPER_LOCALHOST)); + } catch (IOException e) { + LOG.debug(KTUTIL + " failure: {}", e, e); + Assume.assumeTrue("Failed to run "+ KTUTIL+": " + e, false ); + } + } + + @Test + public void testHasRealm() throws Throwable { + assertNotNull(getRealm()); + LOG.info("ZK principal = {}", getPrincipalAndRealm(ZOOKEEPER_LOCALHOST)); + } + + @Test + public void testJaasFileSetup() throws Throwable { + // the JVM has seemed inconsistent on setting up here + assertNotNull("jaasFile", jaasFile); + String confFilename = System.getProperty(Environment.JAAS_CONF_KEY); + assertEquals(jaasFile.getAbsolutePath(), confFilename); + } + + @Test + public void testJaasFileBinding() throws Throwable { + // the JVM has seemed inconsistent on setting up here + assertNotNull("jaasFile", jaasFile); + RegistrySecurity.bindJVMtoJAASFile(jaasFile); + String confFilename = System.getProperty(Environment.JAAS_CONF_KEY); + assertEquals(jaasFile.getAbsolutePath(), confFilename); + } + + + @Test + public void testClientLogin() throws Throwable { + LoginContext client = login(ALICE_LOCALHOST, ALICE, keytab_alice); + + logLoginDetails(ALICE_LOCALHOST, client); + String confFilename = System.getProperty(Environment.JAAS_CONF_KEY); + assertNotNull("Unset: "+ Environment.JAAS_CONF_KEY, confFilename); + String config = FileUtils.readFileToString(new File(confFilename)); + LOG.info("{}=\n{}", confFilename, config); + RegistrySecurity.setZKSaslClientProperties(ALICE, ALICE); + client.logout(); + } + + + @Test + public void testServerLogin() throws Throwable { + String name = ""; + String principalAndRealm = getPrincipalAndRealm(ZOOKEEPER_LOCALHOST); + Set principals = new HashSet(); + principals.add(new KerberosPrincipal(ZOOKEEPER_LOCALHOST)); + Subject subject = new Subject(false, principals, new HashSet(), + new HashSet()); + LoginContext loginContext = new LoginContext(name, subject, null, + KerberosConfiguration.createServerConfig(ZOOKEEPER_LOCALHOST, keytab_zk)); + loginContext.login(); + loginContext.logout(); + } + + + @Test + public void testKerberosAuth() throws Throwable { + File krb5conf = getKdc().getKrb5conf(); + String krbConfig = FileUtils.readFileToString(krb5conf); + LOG.info("krb5.conf at {}:\n{}", krb5conf, krbConfig); + Subject subject = new Subject(); + + final Krb5LoginModule krb5LoginModule = new Krb5LoginModule(); + final Map options = new HashMap(); + options.put("keyTab", keytab_alice.getAbsolutePath()); + options.put("principal", ALICE_LOCALHOST); + options.put("debug", "true"); + options.put("doNotPrompt", "true"); + options.put("isInitiator", "true"); + options.put("refreshKrb5Config", "true"); + options.put("renewTGT", "true"); + options.put("storeKey", "true"); + options.put("useKeyTab", "true"); + options.put("useTicketCache", "true"); + + krb5LoginModule.initialize(subject, null, + new HashMap(), + options); + + boolean loginOk = krb5LoginModule.login(); + assertTrue("Failed to login", loginOk); + boolean commitOk = krb5LoginModule.commit(); + assertTrue("Failed to Commit", commitOk); + } + + @Test + public void testUGILogin() throws Throwable { + + UserGroupInformation ugi = loginUGI(ZOOKEEPER_LOCALHOST, keytab_zk); + + RegistrySecurity.UgiInfo ugiInfo = + new RegistrySecurity.UgiInfo(ugi); + LOG.info("logged in as: {}", ugiInfo); + assertTrue("security is not enabled: " + ugiInfo, + UserGroupInformation.isSecurityEnabled()); + assertTrue("login is keytab based: " + ugiInfo, + ugi.isFromKeytab()); + + // now we are here, build a SASL ACL + ACL acl = ugi.doAs(new PrivilegedExceptionAction() { + @Override + public ACL run() throws Exception { + return registrySecurity.createSaslACLFromCurrentUser(0); + } + }); + assertEquals(ZOOKEEPER_LOCALHOST_REALM, acl.getId().getId()); + assertEquals("sasl", acl.getId().getScheme()); + registrySecurity.addSystemACL(acl); + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestSecureRMRegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestSecureRMRegistryOperations.java new file mode 100644 index 0000000..a90b025 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestSecureRMRegistryOperations.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.secure; + + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.client.api.RegistryOperations; +import org.apache.hadoop.yarn.registry.client.services.zk.RegistrySecurity; +import org.apache.hadoop.yarn.registry.server.services.RMRegistryOperationsService; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.login.LoginException; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +import static org.apache.hadoop.yarn.registry.client.api.RegistryConstants.KEY_REGISTRY_SECURE; +import static org.apache.hadoop.yarn.registry.client.api.RegistryConstants.KEY_REGISTRY_SYSTEM_ACLS; + +/** + * Verify that the {@link RMRegistryOperationsService} works securely + */ +public class TestSecureRMRegistryOperations extends AbstractSecureRegistryTest { + private static final Logger LOG = + LoggerFactory.getLogger(TestSecureRMRegistryOperations.class); + private Configuration secureConf; + private UserGroupInformation zookeeperUGI; + + + @Before + public void setupTestSecureRMRegistryOperations() throws Exception { + System.setProperty("curator-log-events", "true"); + startSecureZK(); + secureConf = new Configuration(); + secureConf.setBoolean(KEY_REGISTRY_SECURE, true); + + // ZK is in charge + secureConf.set(KEY_REGISTRY_SYSTEM_ACLS, "sasl:zookeeper@"); + UserGroupInformation.setConfiguration(CONF); + zookeeperUGI = loginUGI(ZOOKEEPER_LOCALHOST, keytab_zk); + } + + @After + public void teardownTestSecureRMRegistryOperations() { + } + + /** + * Create the RM registry operations as the current user + * @return the service + * @throws LoginException + * @throws FileNotFoundException + */ + public RMRegistryOperationsService createRMRegistryOperations() throws + LoginException, IOException, InterruptedException { + RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER, + ZOOKEEPER); + LOG.info(registrySecurity.buildSecurityDiagnostics()); + RMRegistryOperationsService registryOperations = zookeeperUGI.doAs( + new PrivilegedExceptionAction() { + @Override + public RMRegistryOperationsService run() throws Exception { + RMRegistryOperationsService registryOperations + = new RMRegistryOperationsService("rm", secureZK); + registryOperations.init(secureConf); + registryOperations.start(); + return registryOperations; + } + } + ); + + addToTeardown(registryOperations); + LOG.info(" Binding {}", + registryOperations.bindingDiagnosticDetails()); + // should this be automatic? + + return registryOperations; + } + +/* + + @Test + public void testInsecureClientToZK() throws Throwable { + + userZookeeperToCreateRoot(); + RegistrySecurity.clearZKSaslProperties(); + + CuratorService curatorService = + startCuratorServiceInstance("insecure client", false); + + curatorService.zkList("/"); + curatorService.zkMkPath("", CreateMode.PERSISTENT, false, + RegistrySecurity.WorldReadWriteACL); + } +*/ + + /** + * test that ZK can write as itself + * @throws Throwable + */ + @Test + public void testZookeeperCanWriteUnderSystem() throws Throwable { + + RMRegistryOperationsService rmRegistryOperations = + createRMRegistryOperations(); + RegistryOperations operations = rmRegistryOperations; + operations.mknode(RegistryConstants.PATH_SYSTEM_SERVICES + "hdfs", + false); + } + + + /** + * give the client credentials + * @throws Throwable + */ +// @Test +/* public void testAliceCanWrite() throws Throwable { + + System.setProperty("curator-log-events", "true"); + startSecureZK(); + userZookeeperToCreateRoot(); + RegistrySecurity.clearZKSaslProperties(); + LoginContext aliceLogin = login(ALICE_LOCALHOST, ALICE, keytab_alice); + try { + logLoginDetails(ALICE, aliceLogin); + ktList(keytab_alice); + RegistrySecurity.setZKSaslClientProperties(ALICE, ALICE); + describe(LOG, "Starting Alice Curator"); + CuratorService alice = + startCuratorServiceInstance("alice's", true); + LOG.info(alice.toString()); + + addToTeardown(alice); + + // stat must work + alice.zkStat(""); + + alice.zkList("/"); + alice.zkMkPath("/alice", CreateMode.PERSISTENT, false, + RegistrySecurity.WorldReadWriteACL); + } finally { + logout(aliceLogin); + } + + }*/ + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestSecureRegistry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestSecureRegistry.java new file mode 100644 index 0000000..4154964 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/yarn/registry/secure/TestSecureRegistry.java @@ -0,0 +1,250 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.registry.secure; + + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.PathAccessDeniedException; +import org.apache.hadoop.service.ServiceOperations; +import org.apache.hadoop.yarn.registry.client.exceptions.AuthenticationFailedException; +import org.apache.hadoop.yarn.registry.client.services.zk.CuratorService; +import org.apache.hadoop.yarn.registry.client.services.zk.RegistrySecurity; + +import static org.apache.hadoop.yarn.registry.client.api.RegistryConstants.*; + +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.data.ACL; +import org.apache.zookeeper.data.Stat; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.login.LoginContext; +import java.util.List; + +/** + * Verify that the Mini ZK service can be started up securely + */ +public class TestSecureRegistry extends AbstractSecureRegistryTest { + private static final Logger LOG = + LoggerFactory.getLogger(TestSecureRegistry.class); + + @Before + public void beforeTestSecureZKService() throws Throwable { + enableKerberosDebugging(); +// System.setProperty(ZookeeperConfigOptions.ZK_ENABLE_SASL_CLIENT, "true"); + } + + @After + public void afterTestSecureZKService() throws Throwable { + disableKerberosDebugging(); + RegistrySecurity.clearZKSaslProperties(); + } + + @Test + public void testCreateSecureZK() throws Throwable { + startSecureZK(); + secureZK.stop(); + } + + @Test + public void testInsecureClientToZK() throws Throwable { + startSecureZK(); + userZookeeperToCreateRoot(); + RegistrySecurity.clearZKSaslProperties(); + + CuratorService curatorService = + startCuratorServiceInstance("insecure client", false); + + curatorService.zkList("/"); + curatorService.zkMkPath("", CreateMode.PERSISTENT, false, + RegistrySecurity.WorldReadWriteACL); + } + +// @Test + public void testAuthedClientToZKNoCredentials() throws Throwable { + startSecureZK(); + userZookeeperToCreateRoot(); + RegistrySecurity.clearZKSaslProperties(); + registrySecurity.logCurrentHadoopUser(); + CuratorService curatorService = + startCuratorServiceInstance("authed with no credentials", true); + LOG.info("Started curator client {}", curatorService); + // read only operations MUST work + curatorService.zkStat(""); + curatorService.zkStat(""); + try { + curatorService.zkMkPath("", CreateMode.PERSISTENT, false, + RegistrySecurity.WorldReadWriteACL); + fail("expected to be unauthenticated, but was allowed write access" + + " with binding " + curatorService); + } catch (AuthenticationFailedException expected) { + // expected + } + } + + /** + * test that ZK can write as itself + * @throws Throwable + */ + @Test + public void testZookeeperCanWrite() throws Throwable { + + System.setProperty("curator-log-events", "true"); + startSecureZK(); + CuratorService curator = null; + LoginContext login = login(ZOOKEEPER_LOCALHOST, ZOOKEEPER, keytab_zk); + try { + logLoginDetails(ZOOKEEPER, login); + RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER, ZOOKEEPER); + curator = startCuratorServiceInstance("ZK", true); + LOG.info(curator.toString()); + + addToTeardown(curator); + curator.zkMkPath("/", CreateMode.PERSISTENT, false, + RegistrySecurity.WorldReadWriteACL); + curator.zkList("/"); + curator.zkMkPath("/zookeeper", CreateMode.PERSISTENT, false, + RegistrySecurity.WorldReadWriteACL); + } finally { + logout(login); + ServiceOperations.stop(curator); + } + } + + + /** + * give the client credentials + * @throws Throwable + */ +// @Test + public void testAliceCanWrite() throws Throwable { + + System.setProperty("curator-log-events", "true"); + startSecureZK(); + userZookeeperToCreateRoot(); + RegistrySecurity.clearZKSaslProperties(); + LoginContext aliceLogin = login(ALICE_LOCALHOST, ALICE, keytab_alice); + try { + logLoginDetails(ALICE, aliceLogin); + ktList(keytab_alice); + RegistrySecurity.setZKSaslClientProperties(ALICE, ALICE); + describe(LOG, "Starting Alice Curator"); + CuratorService alice = + startCuratorServiceInstance("alice's", true); + LOG.info(alice.toString()); + + addToTeardown(alice); + + // stat must work + alice.zkStat(""); + + alice.zkList("/"); + alice.zkMkPath("/alice", CreateMode.PERSISTENT, false, + RegistrySecurity.WorldReadWriteACL); + } finally { + logout(aliceLogin); + } + + } + + +// @Test + public void testAliceCanWriteButNotBob() throws Throwable { + startSecureZK(); + // alice + CuratorService alice = null; + LoginContext aliceLogin = + login(ALICE_LOCALHOST, ALICE, keytab_alice); + try { + alice = startCuratorServiceInstance("alice's", true); + alice.zkList("/"); + alice.zkMkPath("/alice", CreateMode.PERSISTENT, false, + RegistrySecurity.WorldReadWriteACL); + Stat stat = alice.zkStat("/alice"); + LOG.info("stat /alice = {}", stat); + List acls = alice.zkGetACLS("/alice"); + LOG.info(RegistrySecurity.aclsToString(acls)); + } finally { + ServiceOperations.stop(alice); + aliceLogin.logout(); + } + CuratorService bobCurator = null; + LoginContext bobLogin = + login(BOB_LOCALHOST, BOB, keytab_bob); + + try { + bobCurator = startCuratorServiceInstance("bob's", true); + bobCurator.zkMkPath("/alice/bob", CreateMode.PERSISTENT, false, + RegistrySecurity.WorldReadWriteACL); + fail("Expected a failure —but bob could create a path under /alice"); + bobCurator.zkDelete("/alice", false, null); + } catch (PathAccessDeniedException expected) { + // expected + } finally { + ServiceOperations.stop(bobCurator); + bobLogin.logout(); + } + + + } + + + protected CuratorService startCuratorServiceInstance(String name, + boolean secure) { + Configuration clientConf = new Configuration(); + clientConf.set(KEY_REGISTRY_ZK_ROOT, "/"); + clientConf.setBoolean(KEY_REGISTRY_SECURE, secure); + describe(LOG, "Starting Curator service"); + CuratorService curatorService = new CuratorService(name, secureZK); + curatorService.init(clientConf); + curatorService.start(); + LOG.info("Curator Binding {}", + curatorService.bindingDiagnosticDetails()); + return curatorService; + } + + /** + * have the ZK user create the root dir. + * This logs out the ZK user after and stops its curator instance, + * to avoid contamination + * @throws Throwable + */ + public void userZookeeperToCreateRoot() throws Throwable { + + System.setProperty("curator-log-events", "true"); + CuratorService curator = null; + LoginContext login = login(ZOOKEEPER_LOCALHOST, ZOOKEEPER, keytab_zk); + try { + logLoginDetails(ZOOKEEPER, login); + RegistrySecurity.setZKSaslClientProperties(ZOOKEEPER, ZOOKEEPER); + curator = startCuratorServiceInstance("ZK", true); + LOG.info(curator.toString()); + + addToTeardown(curator); + curator.zkMkPath("/", CreateMode.PERSISTENT, false, + RegistrySecurity.WorldReadWriteACL); + } finally { + logout(login); + ServiceOperations.stop(curator); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/log4j.properties new file mode 100644 index 0000000..ffee0d1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/log4j.properties @@ -0,0 +1,63 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# log4j configuration used during build and unit tests + +log4j.rootLogger=INFO,stdout +log4j.threshhold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n + +log4j.appender.subprocess=org.apache.log4j.ConsoleAppender +log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout +log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n +#log4j.logger.org.apache.slider.yarn.appmaster.SliderAppMasterer.master=INFO,subprocess + +# packages under test +org.apache.hadoop.yarn.registry=DEBUG +org.apache.hadoop.service=DEBUG + + + +#crank back on some noise +log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR +log4j.logger.org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner=WARN +log4j.logger.org.apache.hadoop.hdfs.server.blockmanagement=WARN +log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN +log4j.logger.org.apache.hadoop.hdfs=WARN + + +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN +log4j.logger.org.apache.zookeeper=INFO +log4j.logger.org.apache.zookeeper.ClientCnxn=DEBUG + +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.security=WARN +log4j.logger.org.apache.hadoop.metrics2=ERROR +log4j.logger.org.apache.hadoop.util.HostsFileReader=WARN +log4j.logger.org.apache.hadoop.yarn.event.AsyncDispatcher=WARN +log4j.logger.org.apache.hadoop.security.token.delegation=WARN +log4j.logger.org.apache.hadoop.yarn.util.AbstractLivelinessMonitor=WARN +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.security=WARN +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMNMInfo=WARN + +# curator noise +log4j.logger.org.apache.curator.framework.imps=WARN +log4j.logger.org.apache.curator.framework.state.ConnectionStateManager=ERROR + +log4j.logger.org.apache.directory.api.ldap=ERROR +log4j.logger.org.apache.directory.server=ERROR \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml index acf330f..6c04f17 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml @@ -59,6 +59,11 @@ + org.apache.hadoop + hadoop-yarn-registry + + + com.google.guava guava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java index 60f88f6..a1b57c1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java @@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; +import org.apache.hadoop.yarn.server.resourcemanager.registry.RMRegistryService; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; @@ -110,4 +111,6 @@ void setRMApplicationHistoryWriter( long getEpoch(); boolean isSchedulerReadyForAllocatingContainers(); + + RMRegistryService getRegistry(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index 36eec04..b164c19 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -36,6 +36,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; +import org.apache.hadoop.yarn.server.resourcemanager.registry.RMRegistryService; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; @@ -95,6 +96,7 @@ private long schedulerRecoveryWaitTime = 0; private boolean printLog = true; private boolean isSchedulerReady = false; + private RMRegistryService registry; private static final Log LOG = LogFactory.getLog(RMContextImpl.class); @@ -117,7 +119,8 @@ public RMContextImpl(Dispatcher rmDispatcher, RMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInRM nmTokenSecretManager, ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager, - RMApplicationHistoryWriter rmApplicationHistoryWriter) { + RMApplicationHistoryWriter rmApplicationHistoryWriter, + RMRegistryService registry) { this(); this.setDispatcher(rmDispatcher); this.setContainerAllocationExpirer(containerAllocationExpirer); @@ -129,6 +132,7 @@ public RMContextImpl(Dispatcher rmDispatcher, this.setNMTokenSecretManager(nmTokenSecretManager); this.setClientToAMTokenSecretManager(clientToAMTokenSecretManager); this.setRMApplicationHistoryWriter(rmApplicationHistoryWriter); + this.setRegistry(registry); RMStateStore nullStore = new NullRMStateStore(); nullStore.setRMDispatcher(rmDispatcher); @@ -421,4 +425,13 @@ public boolean isSchedulerReadyForAllocatingContainers() { public void setSystemClock(Clock clock) { this.systemClock = clock; } + + @Override + public RMRegistryService getRegistry() { + return registry; + } + + void setRegistry(RMRegistryService registry) { + this.registry = registry; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 79af7a6..624793c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -61,6 +61,7 @@ import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; @@ -72,6 +73,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreFactory; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; +import org.apache.hadoop.yarn.server.resourcemanager.registry.RMRegistryService; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; @@ -158,6 +160,11 @@ private AppReportFetcher fetcher = null; protected ResourceTrackerService resourceTracker; + /** + * Registry service + */ + protected RMRegistryService registry; + @VisibleForTesting protected String webAppAddress; private ConfigurationProvider configurationProvider = null; @@ -232,6 +239,15 @@ protected void serviceInit(Configuration conf) throws Exception { if (this.rmContext.isHAEnabled()) { HAUtil.verifyAndSetConfiguration(this.conf); } + + boolean registryEnabled = conf.getBoolean(RegistryConstants.KEY_REGISTRY_ENABLED, + RegistryConstants.DEFAULT_REGISTRY_ENABLED); + if (registryEnabled) { + registry = new RMRegistryService(rmContext); + addService(registry); + rmContext.setRegistry(registry); + } + createAndInitActiveServices(); webAppAddress = WebAppUtils.getWebAppBindURL(this.conf, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/registry/RMRegistryService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/registry/RMRegistryService.java new file mode 100644 index 0000000..7240fd8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/registry/RMRegistryService.java @@ -0,0 +1,304 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.registry; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.DataInputByteBuffer; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.registry.server.services.RMRegistryOperationsService; +import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreAppEvent; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreEvent; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreEventType; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.ByteBuffer; + +/** + * This is the RM service which translates from RM events + * to registry actions + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class RMRegistryService extends CompositeService { + private static final Logger LOG = + LoggerFactory.getLogger(RMRegistryService.class); + + private final RMContext rmContext; + + /** + * Registry service + */ + private final RMRegistryOperationsService registryOperations; + + public RMRegistryService(RMContext rmContext) { + super(RMRegistryService.class.getName()); + this.rmContext = rmContext; + + registryOperations = + new RMRegistryOperationsService("Registry"); + addService(registryOperations); + } + + + @Override + protected void serviceStart() throws Exception { + super.serviceStart(); + + LOG.info("RM registry service started : {}", + registryOperations.bindingDiagnosticDetails()); + // Register self as event handler for RM Events + register(RMAppAttemptEventType.class, new AppEventHandler()); + register(RMAppManagerEventType.class, new AppManagerEventHandler()); + register(RMStateStoreEventType.class, new StateStoreEventHandler()); + register(RMContainerEventType.class, new ContainerEventHandler()); + } + + /** + * register a handler + * @param eventType event type + * @param handler handler + */ + private void register(Class eventType, + EventHandler handler) { + rmContext.getDispatcher().register(eventType, handler); + } + + @SuppressWarnings( + {"EnumSwitchStatementWhichMissesCases", "UnnecessaryDefault"}) + protected void handleAppManagerEvent(RMAppManagerEvent event) throws + IOException { + RMAppManagerEventType eventType = event.getType(); + ApplicationId appId = + event.getApplicationId(); + switch (eventType) { + case APP_COMPLETED: + registryOperations.onApplicationCompleted(appId); + break; + default: + // this isn't in the enum today...just making sure for the + // future + break; + } + } + + @SuppressWarnings("EnumSwitchStatementWhichMissesCases") + private void handleStateStoreEvent(RMStateStoreEvent event) + throws IOException { + RMStateStoreEventType eventType = event.getType(); + switch (eventType) { + case STORE_APP: + RMStateStoreAppEvent storeAppEvent = (RMStateStoreAppEvent) event; + RMStateStore.ApplicationState appState = storeAppEvent.getAppState(); + ApplicationId appId = appState.getAppId(); + registryOperations.onStateStoreEvent(appId, appState.getUser()); + break; + + default: + break; + } + } + + + @SuppressWarnings("EnumSwitchStatementWhichMissesCases") + protected void handleAppAttemptEvent(RMAppAttemptEvent event) throws + IOException { + RMAppAttemptEventType eventType = event.getType(); + ApplicationAttemptId appAttemptId = + event.getApplicationAttemptId(); + + ApplicationId appId = appAttemptId.getApplicationId(); + switch (eventType) { + + case LAUNCHED: + registryOperations.onApplicationLaunched(appId); + break; + + case REGISTERED: + RMAppAttemptRegistrationEvent evt = + (RMAppAttemptRegistrationEvent) event; + registryOperations.onApplicationAttemptRegistered(appAttemptId, + evt.getHost(), + evt.getRpcport(), + evt.getTrackingurl()); + break; + + case UNREGISTERED: + registryOperations.onApplicationAttemptUnregistered(appAttemptId); + break; + + case ATTEMPT_ADDED: + registryOperations.onApplicationAttemptAdded(appAttemptId); + break; + + // container has finished + case CONTAINER_FINISHED: + RMAppAttemptContainerFinishedEvent cfe = + (RMAppAttemptContainerFinishedEvent) event; + ContainerId containerId = cfe.getContainerStatus().getContainerId(); + registryOperations.onAMContainerFinished(containerId); + break; + + + default: + // do nothing + } + } + + /** + * Lifted from RMAppManager + * @param application app submission + * @return credentials + * @throws IOException + */ + private Credentials parseCredentials(ApplicationSubmissionContext application) + throws IOException { + Credentials credentials = new Credentials(); + DataInputByteBuffer dibb = new DataInputByteBuffer(); + ByteBuffer tokens = application.getAMContainerSpec().getTokens(); + if (tokens != null) { + dibb.reset(tokens); + credentials.readTokenStorageStream(dibb); + tokens.rewind(); + } + return credentials; + } + + /** + * Extract the information from the submission to set up the + * registry permissions for a user + * @param applicationId app in question + * @return the credentials in the submission + * @throws IOException problems parsing the credential + */ + private Credentials extractCredentials(ApplicationId applicationId) throws + IOException { + RMApp rmApp = rmContext.getRMApps().get(applicationId); + ApplicationSubmissionContext applicationSubmissionContext = + rmApp.getApplicationSubmissionContext(); + + if (UserGroupInformation.isSecurityEnabled()) { + return parseCredentials(applicationSubmissionContext); + } else { + return null; + } + } + + @SuppressWarnings("EnumSwitchStatementWhichMissesCases") + private void handleContainerEvent(RMContainerEvent event) + throws IOException { + RMContainerEventType eventType = event.getType(); + switch (eventType) { + case FINISHED: + ContainerId containerId = event.getContainerId(); + registryOperations.onContainerFinished(containerId); + break; + + default: + break; + } + } + + + /** + * Handler for app events + */ + private class AppEventHandler implements + EventHandler { + + @Override + public void handle(RMAppAttemptEvent event) { + try { + handleAppAttemptEvent(event); + } catch (IOException e) { + LOG.warn("handling {}: {}", event, e, e); + } + } + } + + /** + * Handler for RM-side App manager events + */ + + private class AppManagerEventHandler + implements EventHandler { + @Override + public void handle(RMAppManagerEvent event) { + try { + handleAppManagerEvent(event); + } catch (IOException e) { + LOG.warn("handling {}: {}", event, e, e); + } + } + } + + /** + * Handler for RM-side state store events. + * This happens early on, and as the data contains the user details, + * it is where paths can be set up in advance of being used. + */ + + private class StateStoreEventHandler implements EventHandler { + @Override + public void handle(RMStateStoreEvent event) { + try { + handleStateStoreEvent(event); + } catch (IOException e) { + LOG.warn("handling {}: {}", event, e, e); + } + } + } + + /** + * Handler for RM-side container events + */ + private class ContainerEventHandler implements EventHandler { + + @Override + public void handle(RMContainerEvent event) { + try { + handleContainerEvent(event); + } catch (IOException e) { + LOG.warn("handling {}: {}", event, e, e); + } + } + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index 333d0cf..f460bf6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -115,7 +115,7 @@ public RMContext mockRMContext(int n, long time) { RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext context = new RMContextImpl(rmDispatcher, containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, - null, null, null, null, null, writer) { + null, null, null, null, null, writer, null) { @Override public ConcurrentMap getRMApps() { return map; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index d877e25..e831a7a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -105,7 +105,8 @@ public void setUp() throws Exception { rmContext = new RMContextImpl(rmDispatcher, null, null, null, - mock(DelegationTokenRenewer.class), null, null, null, null, null); + mock(DelegationTokenRenewer.class), null, null, null, null, null, + null); NodesListManager nodesListManager = mock(NodesListManager.class); HostsFileReader reader = mock(HostsFileReader.class); when(nodesListManager.getHostsReader()).thenReturn(reader); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java index c837450..3f68990 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java @@ -72,7 +72,7 @@ public void setUp() { // Dispatcher that processes events inline Dispatcher dispatcher = new InlineDispatcher(); RMContext context = new RMContextImpl(dispatcher, null, - null, null, null, null, null, null, null, null); + null, null, null, null, null, null, null, null, null); dispatcher.register(SchedulerEventType.class, new InlineDispatcher.EmptyEventHandler()); dispatcher.register(RMNodeEventType.class, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java index d16d551..1b03565 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java @@ -74,7 +74,7 @@ public void setUp() { new TestRMNodeEventDispatcher()); RMContext context = new RMContextImpl(dispatcher, null, - null, null, null, null, null, null, null, null); + null, null, null, null, null, null, null, null, null); dispatcher.register(SchedulerEventType.class, new InlineDispatcher.EmptyEventHandler()); dispatcher.register(RMNodeEventType.class, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java index 4f94695..584a2e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java @@ -71,7 +71,7 @@ public void handle(Event event) { RMContext context = new RMContextImpl(dispatcher, null, null, null, null, null, new RMContainerTokenSecretManager(conf), - new NMTokenSecretManagerInRM(conf), null, null); + new NMTokenSecretManagerInRM(conf), null, null, null); dispatcher.register(RMNodeEventType.class, new ResourceManager.NodeEventDispatcher(context)); NodesListManager nodesListManager = new NodesListManager(context); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index 457f21e..ed2597b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -203,7 +203,7 @@ public void setUp() throws Exception { new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), new ClientToAMTokenSecretManagerInRM(), - writer); + writer, null); ((RMContextImpl)realRMContext).setStateStore(store); publisher = mock(SystemMetricsPublisher.class); ((RMContextImpl)realRMContext).setSystemMetricsPublisher(publisher); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index b8e6f43..546287f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -245,7 +245,7 @@ public void setUp() throws Exception { new RMContainerTokenSecretManager(conf), nmTokenManager, clientToAMTokenManager, - writer); + writer, null); store = mock(RMStateStore.class); ((RMContextImpl) rmContext).setStateStore(store); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index e029749..14d250a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -392,7 +392,7 @@ public void testRefreshQueues() throws Exception { RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM(), null, null); setupQueueConfiguration(conf); cs.setConf(new YarnConfiguration()); cs.setRMContext(resourceManager.getRMContext()); @@ -498,7 +498,7 @@ public void testParseQueue() throws IOException { cs.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null)); + new ClientToAMTokenSecretManagerInRM(), null, null)); } @Test @@ -514,7 +514,7 @@ public void testReconnectedNode() throws Exception { cs.reinitialize(csConf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null)); + new ClientToAMTokenSecretManagerInRM(), null, null)); RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1); RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2); @@ -545,7 +545,7 @@ public void testRefreshQueuesWithNewQueue() throws Exception { cs.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null)); + new ClientToAMTokenSecretManagerInRM(), null, null)); checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY); // Add a new queue b4 @@ -963,7 +963,7 @@ public void testNumClusterNodes() throws Exception { RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM(), null, null); cs.setRMContext(rmContext); CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java index f573f43..12ab5c3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java @@ -82,7 +82,7 @@ public void testQueueMapping() throws Exception { RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM(), null, null); cs.setConf(conf); cs.setRMContext(rmContext); cs.init(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java index a3b990c..a0e67ad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java @@ -46,7 +46,7 @@ public void testQueueParsing() throws Exception { RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM(), null, null); capacityScheduler.setConf(conf); capacityScheduler.setRMContext(rmContext); capacityScheduler.init(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 9cb902d..101f0f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -92,7 +92,7 @@ public EventHandler getEventHandler() { new AMRMTokenSecretManager(conf, null), new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), writer); + new ClientToAMTokenSecretManagerInRM(), writer, null); rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class)); return rmContext; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index b4c4c10..0f1e504 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -145,7 +145,7 @@ public void testAppAttemptMetrics() throws Exception { AsyncDispatcher dispatcher = new InlineDispatcher(); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, - null, null, null, null, null, null, null, writer); + null, null, null, null, null, null, null, writer, null); ((RMContextImpl) rmContext).setSystemMetricsPublisher( mock(SystemMetricsPublisher.class)); @@ -190,7 +190,7 @@ public void testNodeLocalAssignment() throws Exception { nmTokenSecretManager.rollMasterKey(); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, - null, containerTokenSecretManager, nmTokenSecretManager, null, writer); + null, containerTokenSecretManager, nmTokenSecretManager, null, writer, null); ((RMContextImpl) rmContext).setSystemMetricsPublisher( mock(SystemMetricsPublisher.class)); @@ -261,7 +261,8 @@ public void testUpdateResourceOnNode() throws Exception { nmTokenSecretManager.rollMasterKey(); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, - null, containerTokenSecretManager, nmTokenSecretManager, null, writer); + null, containerTokenSecretManager, nmTokenSecretManager, null, writer, + null); ((RMContextImpl) rmContext).setSystemMetricsPublisher( mock(SystemMetricsPublisher.class)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java index 0df7c0d..4e0c810 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java @@ -163,7 +163,7 @@ public static RMContext mockRMContext(int numApps, int racks, int numNodes, deactivatedNodesMap.put(node.getHostName(), node); } return new RMContextImpl(null, null, null, null, - null, null, null, null, null, null) { + null, null, null, null, null, null, null) { @Override public ConcurrentMap getRMApps() { return applicationsMaps; @@ -206,7 +206,7 @@ public static CapacityScheduler mockCapacityScheduler() throws IOException { cs.setRMContext(new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null)); + new ClientToAMTokenSecretManagerInRM(), null, null)); cs.init(conf); return cs; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java index f07cb8d..741f808 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java @@ -150,7 +150,7 @@ public YarnApplicationState createApplicationState() { } return new RMContextImpl(null, null, null, null, - null, null, null, null, null, null) { + null, null, null, null, null, null, null) { @Override public ConcurrentMap getRMApps() { return applicationsMaps; @@ -173,7 +173,7 @@ private static FairScheduler mockFairScheduler() throws IOException { fs.setRMContext(new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null)); + new ClientToAMTokenSecretManagerInRM(), null, null)); fs.init(conf); return fs; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java index b5b7a43..7facc14 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java @@ -49,6 +49,9 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.server.services.MicroZookeeperService; +import org.apache.hadoop.yarn.registry.server.services.MicroZookeeperServiceKeys; import org.apache.hadoop.yarn.server.api.ResourceTracker; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; @@ -106,7 +109,7 @@ private String[] rmIds; private ApplicationHistoryServer appHistoryServer; - + private MicroZookeeperService zookeeper; private boolean useFixedPorts; private boolean useRpc = false; private int failoverTimeout; @@ -121,6 +124,7 @@ // Number of nm-log-dirs per nodemanager private int numLogDirs; private boolean enableAHS; + private final boolean enableRegistry; /** * @param testName name of the test @@ -129,14 +133,17 @@ * @param numLocalDirs the number of nm-local-dirs per nodemanager * @param numLogDirs the number of nm-log-dirs per nodemanager * @param enableAHS enable ApplicationHistoryServer or not + * @param enableRegistry enable the registry or not */ public MiniYARNCluster( String testName, int numResourceManagers, int numNodeManagers, - int numLocalDirs, int numLogDirs, boolean enableAHS) { + int numLocalDirs, int numLogDirs, boolean enableAHS, + boolean enableRegistry) { super(testName.replace("$", "")); this.numLocalDirs = numLocalDirs; this.numLogDirs = numLogDirs; this.enableAHS = enableAHS; + this.enableRegistry = enableRegistry; String testSubDir = testName.replace("$", ""); File targetWorkDir = new File("target", testSubDir); try { @@ -192,6 +199,21 @@ public MiniYARNCluster( * @param numNodeManagers the number of node managers in the cluster * @param numLocalDirs the number of nm-local-dirs per nodemanager * @param numLogDirs the number of nm-log-dirs per nodemanager + * @param enableAHS enable ApplicationHistoryServer or not + */ + public MiniYARNCluster( + String testName, int numResourceManagers, int numNodeManagers, + int numLocalDirs, int numLogDirs, boolean enableAHS) { + this(testName, numResourceManagers, numNodeManagers, numLocalDirs, + numLogDirs, enableAHS, false); + } + + /** + * @param testName name of the test + * @param numResourceManagers the number of resource managers in the cluster + * @param numNodeManagers the number of node managers in the cluster + * @param numLocalDirs the number of nm-local-dirs per nodemanager + * @param numLogDirs the number of nm-log-dirs per nodemanager */ public MiniYARNCluster( String testName, int numResourceManagers, int numNodeManagers, @@ -243,6 +265,15 @@ public void serviceInit(Configuration conf) throws Exception { rmIds = rmIdsCollection.toArray(new String[rmIdsCollection.size()]); } + // enable the in-memory ZK cluster AHEAD of RMs to ensure it starts first + if (enableRegistry) { + zookeeper = new MicroZookeeperService("Local ZK service"); + addService(zookeeper); + conf.setBoolean(RegistryConstants.KEY_REGISTRY_ENABLED, true); + conf.set(MicroZookeeperServiceKeys.KEY_ZKSERVICE_DIR, + new File(testWorkDir, "zookeeper").getAbsolutePath()); + } + for (int i = 0; i < resourceManagers.length; i++) { resourceManagers[i] = createResourceManager(); if (!useFixedPorts) { @@ -742,4 +773,8 @@ protected void doSecureLogin() throws IOException { public int getNumOfResourceManager() { return this.resourceManagers.length; } + + public MicroZookeeperService getZookeeper() { + return zookeeper; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYARNClusterRegistry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYARNClusterRegistry.java new file mode 100644 index 0000000..b6bf1cc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYARNClusterRegistry.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.registry.client.services.RegistryOperationsService; +import org.apache.hadoop.yarn.registry.server.services.MicroZookeeperService; +import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.registry.RMRegistryService; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.rules.Timeout; + +import java.io.IOException; +import java.net.InetSocketAddress; + +/** + * Test registry support in the cluster + */ +public class TestMiniYARNClusterRegistry extends Assert { + + + MiniYARNCluster cluster; + + @Rule + public final Timeout testTimeout = new Timeout(10000); + + @Rule + public TestName methodName = new TestName(); + private Configuration conf; + + @Before + public void setup() throws IOException, InterruptedException { + conf = new YarnConfiguration(); + + cluster = new MiniYARNCluster(methodName.getMethodName(), + 1, 1, 1, 1, false, true); + cluster.init(conf); + cluster.start(); + } + + @Test + public void testZKInstance() throws Exception { + assertNotNull("zookeeper", cluster.getZookeeper()); + } + + @Test + public void testZKConnectionAddress() throws Exception { + MicroZookeeperService zookeeper = cluster.getZookeeper(); + InetSocketAddress address = zookeeper.getConnectionAddress(); + assertTrue("Unconfigured address", address.getPort() != 0); + } + + @Test + public void testZKConfigPatchPropagaton() throws Exception { + MicroZookeeperService zookeeper = cluster.getZookeeper(); + String connectionString = zookeeper.getConnectionString(); + String confConnection = conf.get(RegistryConstants.KEY_REGISTRY_ZK_QUORUM); + assertNotNull(confConnection); + assertEquals(connectionString, confConnection); + } + + @Test + public void testRegistryCreated() throws Exception { + assertTrue("registry not enabled", + cluster.getConfig().getBoolean(RegistryConstants.KEY_REGISTRY_ENABLED, + false)); + MicroZookeeperService zookeeper = cluster.getZookeeper(); + String connectionString = zookeeper.getConnectionString(); + String confConnection = conf.get(RegistryConstants.KEY_REGISTRY_ZK_QUORUM); + ResourceManager rm = cluster.getResourceManager(0); + RMRegistryService registry = rm.getRMContext().getRegistry(); + assertNotNull("null registry", registry); + } + + @Test + public void testPathsExist() throws Throwable { + MicroZookeeperService zookeeper = cluster.getZookeeper(); + // service to directly hook in to the ZK server + RegistryOperationsService operations = + new RegistryOperationsService("operations", zookeeper); + operations.init(new YarnConfiguration()); + operations.start(); + + operations.stat("/"); + //verifies that the RM startup has created the system services path + operations.stat(RegistryConstants.PATH_SYSTEM_SERVICES); + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index f6479bc..e258f21 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -100,5 +100,6 @@ hadoop-yarn-applications hadoop-yarn-site hadoop-yarn-client + hadoop-yarn-registry