diff --git a/LICENSE.txt b/LICENSE.txt index 67472b6..b0cef03 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1777,6 +1777,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The binary distribution of this product bundles these dependencies under the following license: FindBugs-jsr305 3.0.0 +dnsjava 2.1.7, Copyright (c) 1998-2011, Brian Wellington. All rights reserved. -------------------------------------------------------------------------------- (2-clause BSD) Redistribution and use in source and binary forms, with or without diff --git a/NOTICE.txt b/NOTICE.txt index 0718909..bc7a26f 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -581,3 +581,13 @@ The binary distribution of this product bundles binaries of Ehcache 3.3.1, which has the following notices: * Ehcache V3 Copyright 2014-2016 Terracotta, Inc. + +The binary distribution of this product bundles binaries of +snakeyaml (https://bitbucket.org/asomov/snakeyaml), +which has the following notices: + * Copyright (c) 2008, http://www.snakeyaml.org + +The binary distribution of this product bundles binaries of +swagger-annotations (https://github.com/swagger-api/swagger-core), +which has the following notices: + * Copyright 2016 SmartBear Software diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml index 289061f..83633ac 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml @@ -87,6 +87,31 @@ + hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/target + /share/hadoop/${hadoop.component}/sources + + *-sources.jar + + + + hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/conf + etc/hadoop + + + hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples + /share/hadoop/${hadoop.component}/yarn-service-examples + + **/* + + + + hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/target + /share/hadoop/${hadoop.component}/sources + + *-sources.jar + + + hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/target /share/hadoop/${hadoop.component}/sources diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 7490748..1e0aa95 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -89,6 +89,7 @@ 2.12.0 3.0.0 3.1.0-RC1 + 2.1.7 11.0.2 4.0 @@ -142,6 +143,9 @@ ${project.version} + + 1.5.4 + 1.16 @@ -424,6 +428,12 @@ org.apache.hadoop + hadoop-yarn-services-core + ${project.version} + + + + org.apache.hadoop hadoop-mapreduce-client-jobclient ${project.version} test-jar @@ -598,6 +608,11 @@ 3.1.0 + javax.ws.rs + jsr311-api + 1.1.1 + + org.eclipse.jetty jetty-server ${jetty.version} @@ -957,11 +972,6 @@ ${jackson2.version} - com.fasterxml.jackson.jaxrs - jackson-jaxrs-json-provider - ${jackson2.version} - - com.fasterxml.jackson.dataformat jackson-dataformat-cbor ${jackson2.version} @@ -1213,6 +1223,13 @@ + + + dnsjava + dnsjava + ${dnsjava.version} + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml new file mode 100644 index 0000000..ddea2a1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml @@ -0,0 +1,130 @@ + + + 4.0.0 + + org.apache.hadoop + hadoop-yarn-applications + 3.1.0-SNAPSHOT + + hadoop-yarn-services-api + Apache Hadoop YARN Services API + jar + Hadoop YARN REST APIs for services + + + + + + + src/main/resources + true + + + src/main/scripts/ + true + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + + + development + ${project.url} + + + + + + + + + + test-jar + + + + + + + + + + + + + + org.apache.hadoop + hadoop-yarn-services-core + + + org.apache.hadoop + hadoop-yarn-api + + + org.apache.hadoop + hadoop-yarn-common + + + org.apache.hadoop + hadoop-common + + + org.slf4j + slf4j-api + + + org.eclipse.jetty + jetty-webapp + + + com.google.inject + guice + + + javax.ws.rs + jsr311-api + + + org.mockito + mockito-all + test + + + + + + + + org.apache.hadoop + hadoop-common + test-jar + + + junit + junit + test + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java new file mode 100644 index 0000000..1bb6c93 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java @@ -0,0 +1,298 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.webapp; + +import com.google.inject.Inject; +import com.google.inject.Singleton; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ServiceState; +import org.apache.hadoop.yarn.service.api.records.ServiceStatus; +import org.apache.hadoop.yarn.service.client.ServiceClient; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +import static org.apache.hadoop.yarn.service.api.records.ServiceState.ACCEPTED; +import static org.apache.hadoop.yarn.service.conf.RestApiConstants.*; + +/** + * The rest API endpoints for users to manage services on YARN. + */ +@Singleton +@Path(CONTEXT_ROOT) +public class ApiServer { + + public ApiServer() { + super(); + } + + @Inject + public ApiServer(Configuration conf) { + super(); + } + + private static final Logger LOG = + LoggerFactory.getLogger(ApiServer.class); + private static Configuration YARN_CONFIG = new YarnConfiguration(); + private static ServiceClient SERVICE_CLIENT; + + static { + init(); + } + + // initialize all the common resources - order is important + private static void init() { + SERVICE_CLIENT = new ServiceClient(); + SERVICE_CLIENT.init(YARN_CONFIG); + SERVICE_CLIENT.start(); + } + + @GET + @Path(VERSION) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON }) + public Response getVersion() { + String version = VersionInfo.getBuildVersion(); + LOG.info(version); + return Response.ok("{ \"hadoop_version\": \"" + version + "\"}").build(); + } + + @POST + @Path(SERVICE_ROOT_PATH) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON }) + public Response createService(Service service) { + LOG.info("POST: createService = {}", service); + ServiceStatus serviceStatus = new ServiceStatus(); + try { + ApplicationId applicationId = SERVICE_CLIENT.actionCreate(service); + LOG.info("Successfully created service " + service.getName() + + " applicationId = " + applicationId); + serviceStatus.setState(ACCEPTED); + serviceStatus.setUri( + CONTEXT_ROOT + SERVICE_ROOT_PATH + "/" + service + .getName()); + return Response.status(Status.ACCEPTED).entity(serviceStatus).build(); + } catch (IllegalArgumentException e) { + serviceStatus.setDiagnostics(e.getMessage()); + return Response.status(Status.BAD_REQUEST).entity(serviceStatus) + .build(); + } catch (Exception e) { + String message = "Failed to create service " + service.getName(); + LOG.error(message, e); + serviceStatus.setDiagnostics(message + ": " + e.getMessage()); + return Response.status(Status.INTERNAL_SERVER_ERROR) + .entity(serviceStatus).build(); + } + } + + @GET + @Path(SERVICE_PATH) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON }) + public Response getService(@PathParam(SERVICE_NAME) String appName) { + LOG.info("GET: getService for appName = {}", appName); + ServiceStatus serviceStatus = new ServiceStatus(); + try { + Service app = SERVICE_CLIENT.getStatus(appName); + return Response.ok(app).build(); + } catch (IllegalArgumentException e) { + serviceStatus.setDiagnostics(e.getMessage()); + serviceStatus.setCode(ERROR_CODE_APP_NAME_INVALID); + return Response.status(Status.NOT_FOUND).entity(serviceStatus) + .build(); + } catch (Exception e) { + LOG.error("Get service failed", e); + serviceStatus + .setDiagnostics("Failed to retrieve service: " + e.getMessage()); + return Response.status(Status.INTERNAL_SERVER_ERROR) + .entity(serviceStatus).build(); + } + } + + @DELETE + @Path(SERVICE_PATH) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON }) + public Response deleteService(@PathParam(SERVICE_NAME) String appName) { + LOG.info("DELETE: deleteService for appName = {}", appName); + return stopService(appName, true); + } + + private Response stopService(String appName, boolean destroy) { + try { + SERVICE_CLIENT.actionStop(appName, destroy); + if (destroy) { + SERVICE_CLIENT.actionDestroy(appName); + LOG.info("Successfully deleted service {}", appName); + } else { + LOG.info("Successfully stopped service {}", appName); + } + return Response.status(Status.OK).build(); + } catch (ApplicationNotFoundException e) { + ServiceStatus serviceStatus = new ServiceStatus(); + serviceStatus.setDiagnostics( + "Service " + appName + " is not found in YARN: " + e.getMessage()); + return Response.status(Status.BAD_REQUEST).entity(serviceStatus) + .build(); + } catch (Exception e) { + ServiceStatus serviceStatus = new ServiceStatus(); + serviceStatus.setDiagnostics(e.getMessage()); + return Response.status(Status.INTERNAL_SERVER_ERROR) + .entity(serviceStatus).build(); + } + } + + @PUT + @Path(COMPONENT_PATH) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON, MediaType.TEXT_PLAIN }) + public Response updateComponent(@PathParam(SERVICE_NAME) String appName, + @PathParam(COMPONENT_NAME) String componentName, Component component) { + + if (component.getNumberOfContainers() < 0) { + return Response.status(Status.BAD_REQUEST).entity( + "Service = " + appName + ", Component = " + component.getName() + + ": Invalid number of containers specified " + component + .getNumberOfContainers()).build(); + } + ServiceStatus status = new ServiceStatus(); + try { + Map original = SERVICE_CLIENT.flexByRestService(appName, + Collections.singletonMap(component.getName(), + component.getNumberOfContainers())); + status.setDiagnostics( + "Updating component (" + componentName + ") size from " + original + .get(componentName) + " to " + component.getNumberOfContainers()); + return Response.ok().entity(status).build(); + } catch (YarnException | IOException e) { + status.setDiagnostics(e.getMessage()); + return Response.status(Status.INTERNAL_SERVER_ERROR).entity(status) + .build(); + } + } + + @PUT + @Path(SERVICE_PATH) + @Consumes({ MediaType.APPLICATION_JSON }) + @Produces({ MediaType.APPLICATION_JSON }) + public Response updateService(@PathParam(SERVICE_NAME) String appName, + Service updateServiceData) { + LOG.info("PUT: updateService for app = {} with data = {}", appName, + updateServiceData); + + // Ignore the app name provided in updateServiceData and always use appName + // path param + updateServiceData.setName(appName); + + // For STOP the app should be running. If already stopped then this + // operation will be a no-op. For START it should be in stopped state. + // If already running then this operation will be a no-op. + if (updateServiceData.getState() != null + && updateServiceData.getState() == ServiceState.STOPPED) { + return stopService(appName, false); + } + + // If a START is requested + if (updateServiceData.getState() != null + && updateServiceData.getState() == ServiceState.STARTED) { + return startService(appName); + } + + // If new lifetime value specified then update it + if (updateServiceData.getLifetime() != null + && updateServiceData.getLifetime() > 0) { + return updateLifetime(appName, updateServiceData); + } + + // If nothing happens consider it a no-op + return Response.status(Status.NO_CONTENT).build(); + } + + private Response updateLifetime(String appName, Service updateAppData) { + ServiceStatus status = new ServiceStatus(); + try { + String newLifeTime = + SERVICE_CLIENT.updateLifetime(appName, updateAppData.getLifetime()); + status.setDiagnostics( + "Service (" + appName + ")'s lifeTime is updated to " + newLifeTime + + ", " + updateAppData.getLifetime() + + " seconds remaining"); + return Response.ok(status).build(); + } catch (Exception e) { + String message = + "Failed to update service (" + appName + ")'s lifetime to " + + updateAppData.getLifetime(); + LOG.error(message, e); + status.setDiagnostics(message + ": " + e.getMessage()); + return Response.status(Status.INTERNAL_SERVER_ERROR).entity(status) + .build(); + } + } + + private Response startService(String appName) { + ServiceStatus status = new ServiceStatus(); + try { + SERVICE_CLIENT.actionStart(appName); + LOG.info("Successfully started service " + appName); + status.setDiagnostics("Service " + appName + " is successfully started."); + status.setState(ServiceState.ACCEPTED); + return Response.ok(status).build(); + } catch (Exception e) { + String message = "Failed to start service " + appName; + status.setDiagnostics(message + ": " + e.getMessage()); + LOG.info(message, e); + return Response.status(Status.INTERNAL_SERVER_ERROR) + .entity(status).build(); + } + } + + /** + * Used by negative test case. + * + * @param mockServerClient - A mocked version of ServiceClient + */ + public static void setServiceClient(ServiceClient mockServerClient) { + SERVICE_CLIENT = mockServerClient; + SERVICE_CLIENT.init(YARN_CONFIG); + SERVICE_CLIENT.start(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java new file mode 100644 index 0000000..f4acd94 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServerWebApp.java @@ -0,0 +1,161 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.webapp; + +import org.apache.hadoop.http.HttpServer2; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AuthenticationFilterInitializer; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; +import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider; +import org.eclipse.jetty.webapp.Configuration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.*; + +/** + * This class launches the web service using Hadoop HttpServer2 (which uses + * an embedded Jetty container). This is the entry point to your service. + * The Java command used to launch this app should call the main method. + */ +public class ApiServerWebApp extends AbstractService { + private static final Logger logger = LoggerFactory + .getLogger(ApiServerWebApp.class); + private static final String SEP = ";"; + + // REST API server for YARN native services + private HttpServer2 apiServer; + private InetSocketAddress bindAddress; + + public static void main(String[] args) throws IOException { + ApiServerWebApp apiWebApp = new ApiServerWebApp(); + try { + apiWebApp.init(new YarnConfiguration()); + apiWebApp.serviceStart(); + } catch (Exception e) { + logger.error("Got exception starting", e); + apiWebApp.close(); + } + } + + public ApiServerWebApp() { + super(ApiServerWebApp.class.getName()); + } + + @Override + protected void serviceStart() throws Exception { + bindAddress = getConfig().getSocketAddr(API_SERVER_ADDRESS, + DEFAULT_API_SERVER_ADDRESS, DEFAULT_API_SERVER_PORT); + logger.info("YARN API server running on " + bindAddress); + if (UserGroupInformation.isSecurityEnabled()) { + doSecureLogin(getConfig()); + } + startWebApp(); + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + if (apiServer != null) { + apiServer.stop(); + } + super.serviceStop(); + } + + private void doSecureLogin(org.apache.hadoop.conf.Configuration conf) + throws IOException { + SecurityUtil.login(conf, YarnConfiguration.RM_KEYTAB, + YarnConfiguration.RM_PRINCIPAL, bindAddress.getHostName()); + addFilters(conf); + } + + private void addFilters(org.apache.hadoop.conf.Configuration conf) { + // Always load pseudo authentication filter to parse "user.name" in an URL + // to identify a HTTP request's user. + boolean hasHadoopAuthFilterInitializer = false; + String filterInitializerConfKey = "hadoop.http.filter.initializers"; + Class[] initializersClasses = + conf.getClasses(filterInitializerConfKey); + List targets = new ArrayList(); + if (initializersClasses != null) { + for (Class initializer : initializersClasses) { + if (initializer.getName().equals( + AuthenticationFilterInitializer.class.getName())) { + hasHadoopAuthFilterInitializer = true; + break; + } + targets.add(initializer.getName()); + } + } + if (!hasHadoopAuthFilterInitializer) { + targets.add(AuthenticationFilterInitializer.class.getName()); + conf.set(filterInitializerConfKey, StringUtils.join(",", targets)); + } + } + + private void startWebApp() throws IOException { + URI uri = URI.create("http://" + NetUtils.getHostPortString(bindAddress)); + + apiServer = new HttpServer2.Builder() + .setName("api-server") + .setConf(getConfig()) + .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) + .setUsernameConfKey(RM_WEBAPP_SPNEGO_USER_NAME_KEY) + .setKeytabConfKey(RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) + .addEndpoint(uri).build(); + + String apiPackages = + ApiServer.class.getPackage().getName() + SEP + + GenericExceptionHandler.class.getPackage().getName() + SEP + + YarnJacksonJaxbJsonProvider.class.getPackage().getName(); + apiServer.addJerseyResourcePackage(apiPackages, "/*"); + + try { + logger.info("Service starting up. Logging start..."); + apiServer.start(); + logger.info("Server status = {}", apiServer.toString()); + for (Configuration conf : apiServer.getWebAppContext() + .getConfigurations()) { + logger.info("Configurations = {}", conf); + } + logger.info("Context Path = {}", Collections.singletonList( + apiServer.getWebAppContext().getContextPath())); + logger.info("ResourceBase = {}", Collections.singletonList( + apiServer.getWebAppContext().getResourceBase())); + logger.info("War = {}", Collections + .singletonList(apiServer.getWebAppContext().getWar())); + } catch (Exception ex) { + logger.error("Hadoop HttpServer2 App **failed**", ex); + throw ex; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md new file mode 100644 index 0000000..3cd3d48 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md @@ -0,0 +1,245 @@ + + +## Examples + +### Create a simple single-component service with most attribute values as defaults +POST URL - http://localhost:9191/ws/v1/services + +##### POST Request JSON +```json +{ + "name": "hello-world", + "components" : + [ + { + "name": "hello", + "number_of_containers": 1, + "artifact": { + "id": "nginx:latest", + "type": "DOCKER" + }, + "launch_command": "./start_nginx.sh", + "resource": { + "cpus": 1, + "memory": "256" + } + } + ] +} +``` + +##### GET Response JSON +GET URL - http://localhost:9191/ws/v1/services/hello-world + +Note, lifetime value of -1 means unlimited lifetime. + +```json +{ + "name": "hello-world", + "id": "application_1503963985568_0002", + "lifetime": -1, + "components": [ + { + "name": "hello", + "dependencies": [], + "resource": { + "cpus": 1, + "memory": "256" + }, + "configuration": { + "properties": {}, + "env": {}, + "files": [] + }, + "quicklinks": [], + "containers": [ + { + "id": "container_e03_1503963985568_0002_01_000001", + "ip": "10.22.8.143", + "hostname": "myhost.local", + "state": "READY", + "launch_time": 1504051512412, + "bare_host": "10.22.8.143", + "component_name": "hello-0" + }, + { + "id": "container_e03_1503963985568_0002_01_000002", + "ip": "10.22.8.143", + "hostname": "myhost.local", + "state": "READY", + "launch_time": 1504051536450, + "bare_host": "10.22.8.143", + "component_name": "hello-1" + } + ], + "launch_command": "./start_nginx.sh", + "number_of_containers": 1, + "run_privileged_container": false + } + ], + "configuration": { + "properties": {}, + "env": {}, + "files": [] + }, + "quicklinks": {} +} + +``` +### Update to modify the lifetime of a service +PUT URL - http://localhost:9191/ws/v1/services/hello-world + +##### PUT Request JSON + +Note, irrespective of what the current lifetime value is, this update request will set the lifetime of the service to be 3600 seconds (1 hour) from the time the request is submitted. Hence, if a a service has remaining lifetime of 5 mins (say) and would like to extend it to an hour OR if an application has remaining lifetime of 5 hours (say) and would like to reduce it down to an hour, then for both scenarios you need to submit the same request below. + +```json +{ + "lifetime": 3600 +} +``` +### Stop a service +PUT URL - http://localhost:9191/ws/v1/services/hello-world + +##### PUT Request JSON +```json +{ + "state": "STOPPED" +} +``` + +### Start a service +PUT URL - http://localhost:9191/ws/v1/services/hello-world + +##### PUT Request JSON +```json +{ + "state": "STARTED" +} +``` + +### Update to flex up/down the no of containers (instances) of a component of a service +PUT URL - http://localhost:9191/ws/v1/services/hello-world/components/hello + +##### PUT Request JSON +```json +{ + "name": "hello", + "number_of_containers": 3 +} +``` + +### Destroy a service +DELETE URL - http://localhost:9191/ws/v1/services/hello-world + +*** + +### Create a complicated service - HBase +POST URL - http://localhost:9191:/ws/v1/services/hbase-app-1 + +##### POST Request JSON + +```json +{ + "name": "hbase-app-1", + "lifetime": "3600", + "components": [ + { + "name": "hbasemaster", + "number_of_containers": 1, + "artifact": { + "id": "hbase:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/hdp/current/hbase-master/bin/hbase master start", + "resource": { + "cpus": 1, + "memory": "2048" + }, + "configuration": { + "env": { + "HBASE_LOG_DIR": "" + }, + "files": [ + { + "type": "XML", + "dest_file": "/etc/hadoop/conf/core-site.xml", + "props": { + "fs.defaultFS": "${CLUSTER_FS_URI}" + } + }, + { + "type": "XML", + "dest_file": "/etc/hbase/conf/hbase-site.xml", + "props": { + "hbase.cluster.distributed": "true", + "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}", + "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase", + "zookeeper.znode.parent": "${SERVICE_ZK_PATH}", + "hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}", + "hbase.master.info.port": "16010" + } + } + ] + } + }, + { + "name": "regionserver", + "number_of_containers": 3, + "unique_component_support": "true", + "artifact": { + "id": "hbase:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/hdp/current/hbase-regionserver/bin/hbase regionserver start", + "resource": { + "cpus": 1, + "memory": "2048" + }, + "configuration": { + "env": { + "HBASE_LOG_DIR": "" + }, + "files": [ + { + "type": "XML", + "dest_file": "/etc/hadoop/conf/core-site.xml", + "props": { + "fs.defaultFS": "${CLUSTER_FS_URI}" + } + }, + { + "type": "XML", + "dest_file": "/etc/hbase/conf/hbase-site.xml", + "props": { + "hbase.cluster.distributed": "true", + "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}", + "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase", + "zookeeper.znode.parent": "${SERVICE_ZK_PATH}", + "hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}", + "hbase.master.info.port": "16010", + "hbase.regionserver.hostname": "${COMPONENT_INSTANCE_NAME}.${SERVICE_NAME}.${USER}.${DOMAIN}" + } + } + ] + } + } + ], + "quicklinks": { + "HBase Master Status UI": "http://hbasemaster0.${SERVICE_NAME}.${USER}.${DOMAIN}:16010/master-status", + "Proxied HBase Master Status UI": "http://app-proxy/${DOMAIN}/${USER}/${SERVICE_NAME}/hbasemaster/16010/" + } +} +``` diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml new file mode 100644 index 0000000..cc76259 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml @@ -0,0 +1,471 @@ +# Hadoop YARN REST APIs for services v1 spec in YAML + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +swagger: '2.0' +info: + title: "YARN Simplified API layer for services" + description: | + Bringing a new service on YARN today is not a simple experience. The APIs of existing + frameworks are either too low level (native YARN), require writing new code (for frameworks with programmatic APIs) + or writing a complex spec (for declarative frameworks). + + This simplified REST API can be used to create and manage the lifecycle of YARN services. + In most cases, the application owner will not be forced to make any changes to their applications. + This is primarily true if the application is packaged with containerization technologies like Docker. + + This document describes the API specifications (aka. YarnFile) for deploying/managing + containerized services on YARN. The same JSON spec can be used for both REST API + and CLI to manage the services. + + version: "1.0.0" + license: + name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html +# the domain of the service +host: host.mycompany.com +port: 9191(default) +# array of all schemes that your API supports +schemes: + - http +consumes: + - application/json +produces: + - application/json +paths: + /ws/v1/services/version: + get: + summary: Get current version of the API server. + description: Get current version of the API server. + responses: + 200: + description: Successful request + + /ws/v1/services: + get: + summary: (TBD) List of services running in the cluster. + description: Get a list of all currently running services (response includes a minimal projection of the service info). For more details do a GET on a specific service name. + responses: + 200: + description: An array of services + schema: + type: array + items: + $ref: '#/definitions/Service' + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' + post: + summary: Create a service + description: Create a service. The request JSON is a service object with details required for creation. If the request is successful it returns 202 Accepted. A success of this API only confirms success in submission of the service creation request. There is no guarantee that the service will actually reach a RUNNING state. Resource availability and several other factors determines if the service will be deployed in the cluster. It is expected that clients would subsequently call the GET API to get details of the service and determine its state. + parameters: + - name: Service + in: body + description: Service request object + required: true + schema: + $ref: '#/definitions/Service' + responses: + 202: + description: The request to create a service is accepted + 400: + description: Invalid service definition provided in the request body + 500: + description: Failed to create a service + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' + + /ws/v1/services/{service_name}: + put: + summary: Update a service or upgrade the binary version of the components of a running service + description: Update the runtime properties of a service. Currently the following operations are supported - update lifetime, stop/start a service. + The PUT operation is also used to orchestrate an upgrade of the service containers to a newer version of their artifacts (TBD). + parameters: + - name: service_name + in: path + description: Service name + required: true + type: string + - name: Service + in: body + description: The updated service definition. It can contain the updated lifetime of a service or the desired state (STOPPED/STARTED) of a service to initiate a start/stop operation against the specified service + required: true + schema: + $ref: '#/definitions/Service' + responses: + 204: + description: Update or upgrade was successful + 404: + description: Service does not exist + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' + delete: + summary: Destroy a service + description: Destroy a service and release all resources. This API might have to return JSON data providing location of logs (TBD), etc. + parameters: + - name: service_name + in: path + description: Service name + required: true + type: string + responses: + 204: + description: Destroy was successful + 404: + description: Service does not exist + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' + get: + summary: Get details of a service. + description: Return the details (including containers) of a running service + parameters: + - name: service_name + in: path + description: Service name + required: true + type: string + responses: + 200: + description: a service object + schema: + type: object + items: + $ref: '#/definitions/Service' + examples: + service_name: logsearch + artifact: + id: logsearch:latest + type: docker + 404: + description: Service does not exist + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' + /ws/v1/services/{service_name}/components/{component_name}: + put: + summary: Flex a component's number of instances. + description: Set a component's desired number of instanes + parameters: + - name: service_name + in: path + description: Service name + required: true + type: string + - name: component_name + in: path + description: Component name + required: true + type: string + - name: Component + in: body + description: The definition of a component which contains the updated number of instances. + required: true + schema: + $ref: '#/definitions/Component' + responses: + 200: + description: Flex was successful + 404: + description: Service does not exist + default: + description: Unexpected error + schema: + $ref: '#/definitions/ServiceStatus' +definitions: + Service: + description: a service resource has the following attributes. + required: + - name + properties: + name: + type: string + description: A unique service name. If Registry DNS is enabled, the max length is 63 characters. + id: + type: string + description: A unique service id. + artifact: + description: The default artifact for all components of the service except the components which has Artifact type set to SERVICE (optional). + $ref: '#/definitions/Artifact' + resource: + description: The default resource for all components of the service (optional). + $ref: '#/definitions/Resource' + launch_time: + type: string + format: date + description: The time when the service was created, e.g. 2016-03-16T01:01:49.000Z. + number_of_running_containers: + type: integer + format: int64 + description: In get response this provides the total number of running containers for this service (across all components) at the time of request. Note, a subsequent request can return a different number as and when more containers get allocated until it reaches the total number of containers or if a flex request has been made between the two requests. + lifetime: + type: integer + format: int64 + description: Life time (in seconds) of the service from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value. + placement_policy: + description: (TBD) Advanced scheduling and placement policies. If not specified, it defaults to the default placement policy of the service owner. The design of placement policies are in the works. It is not very clear at this point, how policies in conjunction with labels be exposed to service owners. This is a placeholder for now. The advanced structure of this attribute will be determined by YARN-4902. + $ref: '#/definitions/PlacementPolicy' + components: + description: Components of a service. + type: array + items: + $ref: '#/definitions/Component' + configuration: + description: Config properties of a service. Configurations provided at the service/global level are available to all the components. Specific properties can be overridden at the component level. + $ref: '#/definitions/Configuration' + state: + description: State of the service. Specifying a value for this attribute for the PUT payload means update the service to this desired state. + $ref: '#/definitions/ServiceState' + quicklinks: + type: object + description: A blob of key-value pairs of quicklinks to be exported for a service. + additionalProperties: + type: string + queue: + type: string + description: The YARN queue that this service should be submitted to. + Resource: + description: + Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise. + properties: + profile: + type: string + description: Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc. + cpus: + type: integer + format: int32 + description: Amount of vcores allocated to each container (optional but overrides cpus in profile if specified). + memory: + type: string + description: Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB. + PlacementPolicy: + description: Placement policy of an instance of a service. This feature is in the works in YARN-6592. + properties: + label: + type: string + description: Assigns a service to a named partition of the cluster where the service desires to run (optional). If not specified all services are submitted to a default label of the service owner. One or more labels can be setup for each service owner account with required constraints like no-preemption, sla-99999, preemption-ok, etc. + Artifact: + description: Artifact of a service component. If not specified, component will just run the bare launch command and no artifact will be localized. + required: + - id + properties: + id: + type: string + description: Artifact id. Examples are package location uri for tarball based services, image name for docker, name of service, etc. + type: + type: string + description: Artifact type, like docker, tarball, etc. (optional). For TARBALL type, the specified tarball will be localized to the container local working directory under a folder named lib. For SERVICE type, the service specified will be read and its components will be added into this service. The original component with artifact type SERVICE will be removed (any properties specified in the original component will be ignored). + enum: + - DOCKER + - TARBALL + - SERVICE + default: DOCKER + uri: + type: string + description: Artifact location to support multiple artifact stores (optional). + Component: + description: One or more components of the service. If the service is HBase say, then the component can be a simple role like master or regionserver. If the service is a complex business webapp then a component can be other services say Kafka or Storm. Thereby it opens up the support for complex and nested services. + required: + - name + properties: + name: + type: string + description: Name of the service component (mandatory). If Registry DNS is enabled, the max length is 63 characters. If unique component support is enabled, the max length is lowered to 44 characters. + state: + description: The state of the component + $ref: "#/definitions/ComponentState" + dependencies: + type: array + items: + type: string + description: An array of service components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of a service should be represented as a DAG. + readiness_check: + description: Readiness check for this component. + $ref: '#/definitions/ReadinessCheck' + artifact: + description: Artifact of the component (optional). If not specified, the service level global artifact takes effect. + $ref: '#/definitions/Artifact' + launch_command: + type: string + description: The custom launch command of this component (optional for DOCKER component, required otherwise). When specified at the component level, it overrides the value specified at the global level (if any). + resource: + description: Resource of this component (optional). If not specified, the service level global resource takes effect. + $ref: '#/definitions/Resource' + number_of_containers: + type: integer + format: int64 + description: Number of containers for this component (optional). If not specified, the service level global number_of_containers takes effect. + run_privileged_container: + type: boolean + description: Run all containers of this component in privileged mode (YARN-4262). + placement_policy: + description: Advanced scheduling and placement policies for all containers of this component (optional). If not specified, the service level placement_policy takes effect. Refer to the description at the global level for more details. + $ref: '#/definitions/PlacementPolicy' + configuration: + description: Config properties for this component. + $ref: '#/definitions/Configuration' + quicklinks: + type: array + items: + type: string + description: A list of quicklink keys defined at the service level, and to be resolved by this component. + ReadinessCheck: + description: A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every service is different. Hence the need for a simple interface, with scope to support advanced usecases. + required: + - type + properties: + type: + type: string + description: E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content). + enum: + - HTTP + - PORT + props: + type: object + description: A blob of key value pairs that will be used to configure the check. + additionalProperties: + type: string + artifact: + description: Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now. NOT IMPLEMENTED YET + $ref: '#/definitions/Artifact' + Configuration: + description: Set of configuration properties that can be injected into the service components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported. + properties: + properties: + type: object + description: A blob of key-value pairs of common service properties. + additionalProperties: + type: string + env: + type: object + description: A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection. + additionalProperties: + type: string + files: + description: Array of list of files that needs to be created and made available as volumes in the service component containers. + type: array + items: + $ref: '#/definitions/ConfigFile' + ConfigFile: + description: A config file that needs to be created and made available as a volume in a service component container. + properties: + type: + type: string + description: Config file in the standard format like xml, properties, json, yaml, template. + enum: + - XML + - PROPERTIES + - JSON + - YAML + - TEMPLATE + - ENV + - HADOOP_XML + dest_file: + type: string + description: The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers. If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf. + src_file: + type: string + description: This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported. + props: + type: object + description: A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file. + Container: + description: An instance of a running service container. + properties: + id: + type: string + description: Unique container id of a running service, e.g. container_e3751_1458061340047_0008_01_000002. + launch_time: + type: string + format: date + description: The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from cluster launch time. + ip: + type: string + description: IP address of a running container, e.g. 172.31.42.141. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007. + hostname: + type: string + description: Fully qualified hostname of a running container, e.g. ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007. + bare_host: + type: string + description: The bare node or host in which the container is running, e.g. cn008.example.com. + state: + description: State of the container of a service. + $ref: '#/definitions/ContainerState' + component_instance_name: + type: string + description: Name of the component instance that this container instance belongs to. Component instance name is named as $COMPONENT_NAME-i, where i is a + monotonically increasing integer. E.g. A componet called nginx can have multiple component instances named as nginx-0, nginx-1 etc. + Each component instance is backed by a container instance. + resource: + description: Resource used for this container. + $ref: '#/definitions/Resource' + artifact: + description: Artifact used for this container. + $ref: '#/definitions/Artifact' + privileged_container: + type: boolean + description: Container running in privileged mode or not. + ServiceState: + description: The current state of a service. + properties: + state: + type: string + description: enum of the state of the service + enum: + - ACCEPTED + - STARTED + - STABLE + - STOPPED + - FAILED + ContainerState: + description: The current state of the container of a service. + properties: + state: + type: string + description: enum of the state of the container + enum: + - INIT + - STARTED + - READY + ComponentState: + description: The state of the component + properties: + state: + type: string + description: enum of the state of the component + enum: + - FLEXING + - STABLE + ServiceStatus: + description: The current status of a submitted service, returned as a response to the GET API. + properties: + diagnostics: + type: string + description: Diagnostic information (if any) for the reason of the current state of the service. It typically has a non-null value, if the service is in a non-running state. + state: + description: Service state. + $ref: '#/definitions/ServiceState' + code: + type: integer + format: int32 + description: An error code specific to a scenario which service owners should be able to use to understand the failure in addition to the diagnostic information. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/log4j-server.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/log4j-server.properties new file mode 100644 index 0000000..8c679b9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/log4j-server.properties @@ -0,0 +1,76 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is the log4j configuration for YARN Services REST API Server + +# Log rotation based on size (100KB) with a max of 10 backup files +log4j.rootLogger=INFO, restservicelog +log4j.threshhold=ALL + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n + +log4j.appender.restservicelog=org.apache.log4j.RollingFileAppender +log4j.appender.restservicelog.layout=org.apache.log4j.PatternLayout +log4j.appender.restservicelog.File=${REST_SERVICE_LOG_DIR}/restservice.log +log4j.appender.restservicelog.MaxFileSize=1GB +log4j.appender.restservicelog.MaxBackupIndex=10 + +# log layout skips stack-trace creation operations by avoiding line numbers and method +log4j.appender.restservicelog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n + +# debug edition is much more expensive +#log4j.appender.restservicelog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n + +# configure stderr +# set the conversion pattern of stderr +# Print the date in ISO 8601 format +log4j.appender.stderr=org.apache.log4j.ConsoleAppender +log4j.appender.stderr.Target=System.err +log4j.appender.stderr.layout=org.apache.log4j.PatternLayout +log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n + +log4j.appender.subprocess=org.apache.log4j.ConsoleAppender +log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout +log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n + +# for debugging REST API Service +#log4j.logger.org.apache.hadoop.yarn.services=DEBUG + +# uncomment to debug service lifecycle issues +#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG +#log4j.logger.org.apache.hadoop.yarn.service=DEBUG + +# uncomment for YARN operations +#log4j.logger.org.apache.hadoop.yarn.client=DEBUG + +# uncomment this to debug security problems +#log4j.logger.org.apache.hadoop.security=DEBUG + +#crank back on some noise +log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR +log4j.logger.org.apache.hadoop.hdfs=WARN +log4j.logger.org.apache.hadoop.hdfs.shortcircuit=ERROR + +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN +log4j.logger.org.apache.zookeeper=WARN +log4j.logger.org.apache.curator.framework.state=ERROR +log4j.logger.org.apache.curator.framework.imps=WARN + +log4j.logger.org.mortbay.log=DEBUG diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app new file mode 100644 index 0000000..6a077b1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/webapps/api-server/app @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DON'T DELETE. REST WEBAPP RUN SCRIPT WILL STOP WORKING. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml new file mode 100644 index 0000000..1282c9f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/webapp/WEB-INF/web.xml @@ -0,0 +1,36 @@ + + + + + + Jersey REST API + com.sun.jersey.spi.container.servlet.ServletContainer + + com.sun.jersey.config.property.packages + org.apache.hadoop.yarn.service.webapp,org.apache.hadoop.yarn.service.api,org.apache.hadoop.yarn.service.api.records + + + com.sun.jersey.api.json.POJOMappingFeature + true + + 1 + + + Jersey REST API + /* + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java new file mode 100644 index 0000000..3e08c3a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/ServiceClientTest.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.client.ServiceClient; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; + +/** + * A mock version of ServiceClient - This class is design + * to simulate various error conditions that will happen + * when a consumer class calls ServiceClient. + */ +public class ServiceClientTest extends ServiceClient { + + private Configuration conf = new Configuration(); + + protected static void init() { + } + + public ServiceClientTest() { + super(); + } + + @Override + public Configuration getConfig() { + return conf; + } + + @Override + public ApplicationId actionCreate(Service service) { + String serviceName = service.getName(); + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + return ApplicationId.newInstance(System.currentTimeMillis(), 1); + } + + @Override + public Service getStatus(String appName) { + if (appName == null) { + throw new NullPointerException(); + } + if (appName.equals("jenkins")) { + return new Service(); + } else { + throw new IllegalArgumentException(); + } + } + + @Override + public int actionStart(String serviceName) + throws YarnException, IOException { + if (serviceName == null) { + throw new NullPointerException(); + } + if (serviceName.equals("jenkins")) { + return EXIT_SUCCESS; + } else { + throw new ApplicationNotFoundException(""); + } + } + + @Override + public int actionStop(String serviceName, boolean waitForAppStopped) + throws YarnException, IOException { + if (serviceName == null) { + throw new NullPointerException(); + } + if (serviceName.equals("jenkins")) { + return EXIT_SUCCESS; + } else { + throw new ApplicationNotFoundException(""); + } + } + + @Override + public int actionDestroy(String serviceName) { + if (serviceName == null) { + throw new NullPointerException(); + } + if (serviceName.equals("jenkins")) { + return EXIT_SUCCESS; + } else { + throw new IllegalArgumentException(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java new file mode 100644 index 0000000..2b22474 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java @@ -0,0 +1,366 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Resource; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ServiceState; +import org.apache.hadoop.yarn.service.client.ServiceClient; +import org.apache.hadoop.yarn.service.webapp.ApiServer; +import javax.ws.rs.Path; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; + +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.*; + +/** + * Test case for ApiServer REST API. + * + */ +public class TestApiServer { + private ApiServer apiServer; + + @Before + public void setup() throws Exception { + ServiceClient mockServerClient = new ServiceClientTest(); + Configuration conf = new Configuration(); + conf.set("yarn.api-service.service.client.class", + ServiceClientTest.class.getName()); + ApiServer.setServiceClient(mockServerClient); + this.apiServer = new ApiServer(conf); + } + + @Test + public void testPathAnnotation() { + assertNotNull(this.apiServer.getClass().getAnnotation(Path.class)); + assertTrue("The controller has the annotation Path", + this.apiServer.getClass().isAnnotationPresent(Path.class)); + final Path path = this.apiServer.getClass() + .getAnnotation(Path.class); + assertEquals("The path has /ws/v1 annotation", path.value(), + "/ws/v1"); + } + + @Test + public void testGetVersion() { + final Response actual = apiServer.getVersion(); + assertEquals("Version number is", actual.getStatus(), + Response.ok().build().getStatus()); + } + + @Test + public void testBadCreateService() { + Service service = new Service(); + // Test for invalid argument + final Response actual = apiServer.createService(service); + assertEquals("Create service is ", actual.getStatus(), + Response.status(Status.BAD_REQUEST).build().getStatus()); + } + + @Test + public void testGoodCreateService() { + Service service = new Service(); + service.setName("jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(1L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + final Response actual = apiServer.createService(service); + assertEquals("Create service is ", actual.getStatus(), + Response.status(Status.ACCEPTED).build().getStatus()); + } + + @Test + public void testBadGetService() { + final Response actual = apiServer.getService("no-jenkins"); + assertEquals("Get service is ", actual.getStatus(), + Response.status(Status.NOT_FOUND).build().getStatus()); + } + + @Test + public void testBadGetService2() { + final Response actual = apiServer.getService(null); + assertEquals("Get service is ", actual.getStatus(), + Response.status(Status.INTERNAL_SERVER_ERROR) + .build().getStatus()); + } + + @Test + public void testGoodGetService() { + final Response actual = apiServer.getService("jenkins"); + assertEquals("Get service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testBadDeleteService() { + final Response actual = apiServer.deleteService("no-jenkins"); + assertEquals("Delete service is ", actual.getStatus(), + Response.status(Status.BAD_REQUEST).build().getStatus()); + } + + @Test + public void testBadDeleteService2() { + final Response actual = apiServer.deleteService(null); + assertEquals("Delete service is ", actual.getStatus(), + Response.status(Status.INTERNAL_SERVER_ERROR) + .build().getStatus()); + } + + @Test + public void testGoodDeleteService() { + final Response actual = apiServer.deleteService("jenkins"); + assertEquals("Delete service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testDecreaseContainerAndStop() { + Service service = new Service(); + service.setState(ServiceState.STOPPED); + service.setName("jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(0L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + final Response actual = apiServer.updateService("jenkins", + service); + assertEquals("update service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testBadDecreaseContainerAndStop() { + Service service = new Service(); + service.setState(ServiceState.STOPPED); + service.setName("no-jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("no-jenkins"); + c.setNumberOfContainers(-1L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + System.out.println("before stop"); + final Response actual = apiServer.updateService("no-jenkins", + service); + assertEquals("flex service is ", actual.getStatus(), + Response.status(Status.BAD_REQUEST).build().getStatus()); + } + + @Test + public void testIncreaseContainersAndStart() { + Service service = new Service(); + service.setState(ServiceState.STARTED); + service.setName("jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(2L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + final Response actual = apiServer.updateService("jenkins", + service); + assertEquals("flex service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testBadStartServices() { + Service service = new Service(); + service.setState(ServiceState.STARTED); + service.setName("no-jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(2L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + final Response actual = apiServer.updateService("no-jenkins", + service); + assertEquals("start service is ", actual.getStatus(), + Response.status(Status.INTERNAL_SERVER_ERROR).build() + .getStatus()); + } + + @Test + public void testGoodStartServices() { + Service service = new Service(); + service.setState(ServiceState.STARTED); + service.setName("jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(2L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + final Response actual = apiServer.updateService("jenkins", + service); + assertEquals("start service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testBadStopServices() { + Service service = new Service(); + service.setState(ServiceState.STOPPED); + service.setName("no-jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("no-jenkins"); + c.setNumberOfContainers(-1L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + System.out.println("before stop"); + final Response actual = apiServer.updateService("no-jenkins", + service); + assertEquals("stop service is ", actual.getStatus(), + Response.status(Status.BAD_REQUEST).build().getStatus()); + } + + @Test + public void testGoodStopServices() { + Service service = new Service(); + service.setState(ServiceState.STARTED); + service.setName("jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("jenkins"); + c.setNumberOfContainers(-1L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + System.out.println("before stop"); + final Response actual = apiServer.updateService("jenkins", + service); + assertEquals("stop service is ", actual.getStatus(), + Response.status(Status.OK).build().getStatus()); + } + + @Test + public void testUpdateService() { + Service service = new Service(); + service.setState(ServiceState.STARTED); + service.setName("no-jenkins"); + Artifact artifact = new Artifact(); + artifact.setType(TypeEnum.DOCKER); + artifact.setId("jenkins:latest"); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory("2048"); + List components = new ArrayList(); + Component c = new Component(); + c.setName("no-jenkins"); + c.setNumberOfContainers(-1L); + c.setArtifact(artifact); + c.setLaunchCommand(""); + c.setResource(resource); + components.add(c); + service.setComponents(components); + System.out.println("before stop"); + final Response actual = apiServer.updateService("no-jenkins", + service); + assertEquals("update service is ", actual.getStatus(), + Response.status(Status.INTERNAL_SERVER_ERROR) + .build().getStatus()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml new file mode 100644 index 0000000..2814cca --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/dev-support/findbugs-exclude.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/conf/yarnservice-log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/conf/yarnservice-log4j.properties new file mode 100644 index 0000000..58c8e27 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/conf/yarnservice-log4j.properties @@ -0,0 +1,62 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This is the log4j configuration for Slider Application Master + +# Log rotation based on size (256MB) with a max of 20 backup files +log4j.rootLogger=INFO, amlog +log4j.threshhold=ALL +log4j.appender.amlog=org.apache.log4j.RollingFileAppender +log4j.appender.amlog.layout=org.apache.log4j.PatternLayout +log4j.appender.amlog.File=${LOG_DIR}/serviceam.log +log4j.appender.amlog.MaxFileSize=256MB +log4j.appender.amlog.MaxBackupIndex=20 + +# log layout skips stack-trace creation operations by avoiding line numbers and method +log4j.appender.amlog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n + +# debug edition is much more expensive +#log4j.appender.amlog.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n + +# configure stderr +# set the conversion pattern of stderr +# Print the date in ISO 8601 format +log4j.appender.stderr=org.apache.log4j.ConsoleAppender +log4j.appender.stderr.Target=System.err +log4j.appender.stderr.layout=org.apache.log4j.PatternLayout +log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n + +log4j.appender.subprocess=org.apache.log4j.ConsoleAppender +log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout +log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n + +# for debugging yarn-service framework +#log4j.logger.org.apache.hadoop.yarn.service=DEBUG + +# uncomment for YARN operations +#log4j.logger.org.apache.hadoop.yarn.client=DEBUG + +# uncomment this to debug security problems +#log4j.logger.org.apache.hadoop.security=DEBUG + +#crank back on some noise +log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR +log4j.logger.org.apache.hadoop.hdfs=WARN + +log4j.logger.org.apache.zookeeper=WARN +log4j.logger.org.apache.curator.framework.state=ERROR +log4j.logger.org.apache.curator.framework.imps=WARN diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-no-dns.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-no-dns.json new file mode 100644 index 0000000..6b35538 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-no-dns.json @@ -0,0 +1,62 @@ +{ + "name": "httpd-service-no-dns", + "lifetime": "3600", + "components": [ + { + "name": "httpd", + "number_of_containers": 2, + "artifact": { + "id": "centos/httpd-24-centos7:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/bin/run-httpd", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "readiness_check": { + "type": "HTTP", + "props": { + "url": "http://${THIS_HOST}:8080" + } + }, + "configuration": { + "files": [ + { + "type": "ENV", + "dest_file": "/var/www/html/index.html", + "props": { + "content": "
Title
Hello from ${COMPONENT_INSTANCE_NAME}!" + } + } + ] + } + }, + { + "name": "httpd-proxy", + "number_of_containers": 1, + "dependencies": [ "httpd" ], + "artifact": { + "id": "centos/httpd-24-centos7:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/bin/run-httpd", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "files": [ + { + "type": "TEMPLATE", + "dest_file": "/etc/httpd/conf.d/httpd-proxy.conf", + "src_file": "httpd-proxy-no-dns.conf" + } + ] + } + } + ], + "quicklinks": { + "Apache HTTP Server": "http://httpd-proxy-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080" + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-proxy-no-dns.conf b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-proxy-no-dns.conf new file mode 100644 index 0000000..9894e64 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd-no-dns/httpd-proxy-no-dns.conf @@ -0,0 +1,24 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + BalancerMember http://${HTTPD-0_IP}:8080 + BalancerMember http://${HTTPD-1_IP}:8080 + ProxySet lbmethod=bytraffic + + +ProxyPass "/" "balancer://test/" +ProxyPassReverse "/" "balancer://test/" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd-proxy.conf b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd-proxy.conf new file mode 100644 index 0000000..e8651a5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd-proxy.conf @@ -0,0 +1,24 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + BalancerMember http://httpd-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080 + BalancerMember http://httpd-1.${SERVICE_NAME}.${USER}.${DOMAIN}:8080 + ProxySet lbmethod=bytraffic + + +ProxyPass "/" "balancer://test/" +ProxyPassReverse "/" "balancer://test/" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd.json new file mode 100644 index 0000000..e63376d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/httpd/httpd.json @@ -0,0 +1,55 @@ +{ + "name": "httpd-service", + "lifetime": "3600", + "components": [ + { + "name": "httpd", + "number_of_containers": 2, + "artifact": { + "id": "centos/httpd-24-centos7:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/bin/run-httpd", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "files": [ + { + "type": "ENV", + "dest_file": "/var/www/html/index.html", + "props": { + "content": "
Title
Hello from ${COMPONENT_INSTANCE_NAME}!" + } + } + ] + } + }, + { + "name": "httpd-proxy", + "number_of_containers": 1, + "artifact": { + "id": "centos/httpd-24-centos7:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/bin/run-httpd", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "files": [ + { + "type": "TEMPLATE", + "dest_file": "/etc/httpd/conf.d/httpd-proxy.conf", + "src_file": "httpd-proxy.conf" + } + ] + } + } + ], + "quicklinks": { + "Apache HTTP Server": "http://httpd-proxy-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080" + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/sleeper/sleeper.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/sleeper/sleeper.json new file mode 100644 index 0000000..89ce527 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/examples/sleeper/sleeper.json @@ -0,0 +1,15 @@ +{ + "name": "sleeper-service", + "components" : + [ + { + "name": "sleeper", + "number_of_containers": 2, + "launch_command": "sleep 900000", + "resource": { + "cpus": 1, + "memory": "256" + } + } + ] +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml new file mode 100644 index 0000000..6c9a875 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/pom.xml @@ -0,0 +1,255 @@ + + + 4.0.0 + + org.apache.hadoop + hadoop-yarn-services + 3.1.0-SNAPSHOT + + hadoop-yarn-services-core + jar + Apache Hadoop YARN Services Core + + + + ${project.parent.basedir} + + + + + + + src/main/resources + true + + + + + + org.apache.hadoop + hadoop-maven-plugins + + + compile-protoc + + protoc + + + ${protobuf.version} + ${protoc.path} + + ${basedir}/src/main/proto + + + ${basedir}/src/main/proto + + ClientAMProtocol.proto + + + + + + + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + ${java.home} + + + + + + org.apache.rat + apache-rat-plugin + + + **/*.json + + + + + + + + + org.slf4j + slf4j-api + + + + log4j + log4j + runtime + + + + com.google.guava + guava + + + + org.codehaus.jackson + jackson-core-asl + + + + org.codehaus.jackson + jackson-mapper-asl + + + + com.fasterxml.jackson.core + jackson-annotations + + + + org.apache.hadoop + hadoop-hdfs-client + + + + org.apache.hadoop + hadoop-yarn-client + + + + org.apache.hadoop + hadoop-yarn-registry + + + + org.apache.hadoop + hadoop-yarn-common + + + + org.apache.hadoop + hadoop-yarn-server-common + + + + org.apache.hadoop + hadoop-common + + + + org.apache.hadoop + hadoop-annotations + + + + org.apache.hadoop + hadoop-yarn-api + + + + com.google.protobuf + protobuf-java + + + + org.apache.commons + commons-configuration2 + + + + org.apache.commons + commons-compress + + + + commons-io + commons-io + + + + commons-lang + commons-lang + + + + org.apache.curator + curator-client + + + + org.apache.curator + curator-framework + + + + javax.xml.bind + jaxb-api + + + + org.yaml + snakeyaml + + + + io.swagger + swagger-annotations + + + + + + + + junit + junit + test + + + + org.mockito + mockito-all + test + + + + org.apache.hadoop + hadoop-minicluster + test + + + + org.apache.curator + curator-test + test + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMProtocol.java new file mode 100644 index 0000000..516d23d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMProtocol.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto; + +import java.io.IOException; + +public interface ClientAMProtocol { + FlexComponentsResponseProto flexComponents(FlexComponentsRequestProto request) + throws IOException, YarnException; + + GetStatusResponseProto getStatus(GetStatusRequestProto requestProto) + throws IOException, YarnException; + + StopResponseProto stop(StopRequestProto requestProto) + throws IOException, YarnException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java new file mode 100644 index 0000000..8e4c34d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ClientAMService.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto; +import org.apache.hadoop.yarn.service.component.ComponentEvent; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetSocketAddress; + +import static org.apache.hadoop.yarn.service.component.ComponentEventType.FLEX; + +public class ClientAMService extends AbstractService + implements ClientAMProtocol { + + private static final Logger LOG = + LoggerFactory.getLogger(ClientAMService.class); + + private ServiceContext context; + private Server server; + + private InetSocketAddress bindAddress; + + public ClientAMService(ServiceContext context) { + super("Client AM Service"); + this.context = context; + } + + @Override protected void serviceStart() throws Exception { + Configuration conf = getConfig(); + YarnRPC rpc = YarnRPC.create(conf); + InetSocketAddress address = new InetSocketAddress(0); + server = rpc.getServer(ClientAMProtocol.class, this, address, conf, + context.secretManager, 1); + server.start(); + + String nodeHostString = + System.getenv(ApplicationConstants.Environment.NM_HOST.name()); + + bindAddress = NetUtils.createSocketAddrForHost(nodeHostString, + server.getListenerAddress().getPort()); + + LOG.info("Instantiated ClientAMService at " + bindAddress); + super.serviceStart(); + } + + @Override protected void serviceStop() throws Exception { + if (server != null) { + server.stop(); + } + super.serviceStop(); + } + + @Override public FlexComponentsResponseProto flexComponents( + FlexComponentsRequestProto request) throws IOException { + if (!request.getComponentsList().isEmpty()) { + for (ComponentCountProto component : request.getComponentsList()) { + ComponentEvent event = new ComponentEvent(component.getName(), FLEX) + .setDesired(component.getNumberOfContainers()); + context.scheduler.getDispatcher().getEventHandler().handle(event); + LOG.info("Flexing component {} to {}", component.getName(), + component.getNumberOfContainers()); + } + } + return FlexComponentsResponseProto.newBuilder().build(); + } + + @Override + public GetStatusResponseProto getStatus(GetStatusRequestProto request) + throws IOException, YarnException { + String stat = ServiceApiUtil.jsonSerDeser.toJson(context.service); + return GetStatusResponseProto.newBuilder().setStatus(stat).build(); + } + + @Override + public StopResponseProto stop(StopRequestProto requestProto) + throws IOException, YarnException { + LOG.info("Stop the service."); + // Stop the service in 2 seconds delay to make sure this rpc call is completed. + // shutdown hook will be executed which will stop AM gracefully. + Thread thread = new Thread() { + @Override + public void run() { + try { + Thread.sleep(2000); + ExitUtil.terminate(0); + } catch (InterruptedException e) { + LOG.error("Interrupted while stopping", e); + } + } + }; + thread.start(); + return StopResponseProto.newBuilder().build(); + } + + public InetSocketAddress getBindAddress() { + return bindAddress; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java new file mode 100644 index 0000000..4743f28 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.yarn.service.component.Component; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.NODE_BLACKLIST_THRESHOLD; + +/** + * This tracks the container failures per node. If the failure counter exceeds + * the maxFailurePerNode limit, it'll blacklist that node. + * + */ +public class ContainerFailureTracker { + + private static final Logger LOG = + LoggerFactory.getLogger(ContainerFailureTracker.class); + + // Host -> num container failures + private Map failureCountPerNode = new HashMap<>(); + private Set blackListedNodes = new HashSet<>(); + private ServiceContext context; + private int maxFailurePerNode; + private Component component; + + public ContainerFailureTracker(ServiceContext context, Component component) { + this.context = context; + this.component = component; + maxFailurePerNode = component.getComponentSpec().getConfiguration() + .getPropertyInt(NODE_BLACKLIST_THRESHOLD, 3); + } + + + public synchronized void incNodeFailure(String host) { + int num = 0; + if (failureCountPerNode.containsKey(host)) { + num = failureCountPerNode.get(host); + } + num++; + failureCountPerNode.put(host, num); + + // black list the node if exceed max failure + if (num > maxFailurePerNode && !blackListedNodes.contains(host)) { + List blacklists = new ArrayList<>(); + blacklists.add(host); + blackListedNodes.add(host); + context.scheduler.getAmRMClient().updateBlacklist(blacklists, null); + LOG.info("[COMPONENT {}]: Failed {} times on this host, blacklisted {}." + + " Current list of blacklisted nodes: {}", + component.getName(), num, host, blackListedNodes); + } + } + + public synchronized void resetContainerFailures() { + // reset container failure counter per node + failureCountPerNode.clear(); + context.scheduler.getAmRMClient() + .updateBlacklist(null, new ArrayList<>(blackListedNodes)); + LOG.info("[COMPONENT {}]: Clearing blacklisted nodes {} ", + component.getName(), blackListedNodes); + blackListedNodes.clear(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java new file mode 100644 index 0000000..94dbc6e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import com.google.common.cache.LoadingCache; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; + +public class ServiceContext { + public Service service = null; + public SliderFileSystem fs; + public String serviceHdfsDir = ""; + public ApplicationAttemptId attemptId; + public LoadingCache configCache; + public ServiceScheduler scheduler; + public ClientToAMTokenSecretManager secretManager; + public ClientAMService clientAMService; + + public ServiceContext() { + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java new file mode 100644 index 0000000..d58e32b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.Options; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; +import org.apache.hadoop.yarn.service.monitor.ServiceMonitor; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +public class ServiceMaster extends CompositeService { + + private static final Logger LOG = + LoggerFactory.getLogger(ServiceMaster.class); + + public static final String YARNFILE_OPTION = "yarnfile"; + + private static String serviceDefPath; + protected ServiceContext context; + + public ServiceMaster(String name) { + super(name); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + //TODO Deprecate slider conf, make sure works with yarn conf + printSystemEnv(); + if (UserGroupInformation.isSecurityEnabled()) { + UserGroupInformation.setConfiguration(conf); + } + LOG.info("Login user is {}", UserGroupInformation.getLoginUser()); + + context = new ServiceContext(); + Path appDir = getAppDir(); + context.serviceHdfsDir = appDir.toString(); + SliderFileSystem fs = new SliderFileSystem(conf); + context.fs = fs; + fs.setAppDir(appDir); + loadApplicationJson(context, fs); + + ContainerId amContainerId = getAMContainerId(); + + ApplicationAttemptId attemptId = amContainerId.getApplicationAttemptId(); + LOG.info("Service AppAttemptId: " + attemptId); + context.attemptId = attemptId; + + // configure AM to wait forever for RM + conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, -1); + conf.unset(YarnConfiguration.CLIENT_FAILOVER_MAX_ATTEMPTS); + + DefaultMetricsSystem.initialize("ServiceAppMaster"); + + context.secretManager = new ClientToAMTokenSecretManager(attemptId, null); + ClientAMService clientAMService = new ClientAMService(context); + context.clientAMService = clientAMService; + addService(clientAMService); + + ServiceScheduler scheduler = createServiceScheduler(context); + addService(scheduler); + context.scheduler = scheduler; + + ServiceMonitor monitor = new ServiceMonitor("Service Monitor", context); + addService(monitor); + + super.serviceInit(conf); + } + + protected ContainerId getAMContainerId() throws BadClusterStateException { + return ContainerId.fromString(SliderUtils.mandatoryEnvVariable( + ApplicationConstants.Environment.CONTAINER_ID.name())); + } + + protected Path getAppDir() { + return new Path(serviceDefPath).getParent(); + } + + protected ServiceScheduler createServiceScheduler(ServiceContext context) + throws IOException, YarnException { + return new ServiceScheduler(context); + } + + protected void loadApplicationJson(ServiceContext context, + SliderFileSystem fs) throws IOException { + context.service = ServiceApiUtil + .loadServiceFrom(fs, new Path(serviceDefPath)); + LOG.info(context.service.toString()); + } + + @Override + protected void serviceStop() throws Exception { + LOG.info("Stopping app master"); + super.serviceStop(); + } + + private void printSystemEnv() { + for (Map.Entry envs : System.getenv().entrySet()) { + LOG.info("{} = {}", envs.getKey(), envs.getValue()); + } + } + + public static void main(String[] args) throws Exception { + Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); + StringUtils.startupShutdownMessage(ServiceMaster.class, args, LOG); + try { + ServiceMaster serviceMaster = new ServiceMaster("Service Master"); + ShutdownHookManager.get() + .addShutdownHook(new CompositeServiceShutdownHook(serviceMaster), 30); + YarnConfiguration conf = new YarnConfiguration(); + Options opts = new Options(); + opts.addOption(YARNFILE_OPTION, true, "HDFS path to JSON service " + + "specification"); + opts.getOption(YARNFILE_OPTION).setRequired(true); + GenericOptionsParser parser = new GenericOptionsParser(conf, opts, args); + CommandLine cmdLine = parser.getCommandLine(); + serviceMaster.serviceDefPath = cmdLine.getOptionValue(YARNFILE_OPTION); + serviceMaster.init(conf); + serviceMaster.start(); + } catch (Throwable t) { + LOG.error("Error starting service master", t); + ExitUtil.terminate(1, "Error starting service master"); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMetrics.java new file mode 100644 index 0000000..9fc886e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMetrics.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; + +import static org.apache.hadoop.metrics2.lib.Interns.info; + +@Metrics(context = "yarn-native-service") +public class ServiceMetrics implements MetricsSource { + + @Metric("containers requested") + public MutableGaugeInt containersRequested; + + @Metric("anti-affinity containers pending") + public MutableGaugeInt pendingAAContainers; + + @Metric("containers running") + public MutableGaugeInt containersRunning; + + @Metric("containers ready") + public MutableGaugeInt containersReady; + + @Metric("containers desired") + public MutableGaugeInt containersDesired; + + @Metric("containers succeeded") + public MutableGaugeInt containersSucceeded; + + @Metric("containers failed") + public MutableGaugeInt containersFailed; + + @Metric("containers preempted") + public MutableGaugeInt containersPreempted; + + @Metric("containers surplus") + public MutableGaugeInt surplusContainers; + + @Metric("containers failed due to disk failure") + public MutableGaugeInt containersDiskFailure; + + protected final MetricsRegistry registry; + + public ServiceMetrics(MetricsInfo metricsInfo) { + registry = new MetricsRegistry(metricsInfo); + } + + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + registry.snapshot(collector.addRecord(registry.info()), all); + } + + public static ServiceMetrics register(String name, String description) { + ServiceMetrics metrics = new ServiceMetrics(info(name, description)); + DefaultMetricsSystem.instance().register(name, description, metrics); + return metrics; + } + + public void tag(String name, String description, String value) { + registry.tag(name, description, value); + } + + @Override public String toString() { + return "ServiceMetrics{" + + "containersRequested=" + containersRequested.value() + + ", pendingAAContainers=" + pendingAAContainers.value() + + ", containersRunning=" + containersRunning.value() + + ", containersDesired=" + containersDesired.value() + + ", containersSucceeded=" + containersSucceeded.value() + + ", containersFailed=" + containersFailed.value() + + ", containersPreempted=" + containersPreempted.value() + + ", surplusContainers=" + surplusContainers.value() + '}'; + } +} + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java new file mode 100644 index 0000000..631f89e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java @@ -0,0 +1,706 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.registry.client.api.RegistryOperationsFactory; +import org.apache.hadoop.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies; +import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.UpdatedContainer; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.hadoop.yarn.client.api.TimelineV2Client; +import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; +import org.apache.hadoop.yarn.client.api.async.NMClientAsync; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.service.api.ServiceApiConstants; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType; +import org.apache.hadoop.yarn.service.component.Component; +import org.apache.hadoop.yarn.service.component.ComponentEvent; +import org.apache.hadoop.yarn.service.component.ComponentEventType; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.containerlaunch.ContainerLaunchService; +import org.apache.hadoop.yarn.service.provider.ProviderUtils; +import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders; +import org.apache.hadoop.yarn.service.timelineservice.ServiceMetricsSink; +import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils; +import org.apache.hadoop.yarn.util.BoundedAppender; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.nio.ByteBuffer; +import java.text.MessageFormat; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; +import static org.apache.hadoop.registry.client.api.RegistryConstants.*; +import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*; +import static org.apache.hadoop.yarn.service.component.ComponentEventType.*; + +/** + * + */ +public class ServiceScheduler extends CompositeService { + + private static final Logger LOG = + LoggerFactory.getLogger(ServiceScheduler.class); + private Service app; + + // component_name -> component + private final Map componentsByName = + new ConcurrentHashMap<>(); + + // id - > component + protected final Map componentsById = + new ConcurrentHashMap<>(); + + private final Map liveInstances = + new ConcurrentHashMap<>(); + + private ServiceMetrics serviceMetrics; + + private ServiceTimelinePublisher serviceTimelinePublisher; + + // Global diagnostics that will be reported to RM on eRxit. + // The unit the number of characters. This will be limited to 64 * 1024 + // characters. + private BoundedAppender diagnostics = new BoundedAppender(64 * 1024); + + // A cache for loading config files from remote such as hdfs + public LoadingCache configFileCache = null; + + public ScheduledExecutorService executorService; + public Map globalTokens = new HashMap<>(); + + private AMRMClientAsync amRMClient; + private NMClientAsync nmClient; + private AsyncDispatcher dispatcher; + AsyncDispatcher compInstanceDispatcher; + private YarnRegistryViewForProviders yarnRegistryOperations; + private ServiceContext context; + private ContainerLaunchService containerLaunchService; + + public ServiceScheduler(ServiceContext context) { + super(context.service.getName()); + this.context = context; + } + + public void buildInstance(ServiceContext context, Configuration configuration) + throws YarnException { + app = context.service; + executorService = Executors.newScheduledThreadPool(10); + RegistryOperations registryClient = RegistryOperationsFactory + .createInstance("ServiceScheduler", configuration); + addIfService(registryClient); + yarnRegistryOperations = + createYarnRegistryOperations(context, registryClient); + + // register metrics + serviceMetrics = ServiceMetrics + .register(app.getName(), "Metrics for service"); + serviceMetrics.tag("type", "Metrics type [component or service]", "service"); + serviceMetrics.tag("appId", "Service id for service", app.getId()); + + amRMClient = createAMRMClient(); + addIfService(amRMClient); + + nmClient = createNMClient(); + addIfService(nmClient); + + dispatcher = new AsyncDispatcher("Component dispatcher"); + dispatcher.register(ComponentEventType.class, + new ComponentEventHandler()); + dispatcher.setDrainEventsOnStop(); + addIfService(dispatcher); + + compInstanceDispatcher = + new AsyncDispatcher("CompInstance dispatcher"); + compInstanceDispatcher.register(ComponentInstanceEventType.class, + new ComponentInstanceEventHandler()); + addIfService(compInstanceDispatcher); + containerLaunchService = new ContainerLaunchService(context.fs); + addService(containerLaunchService); + + if (YarnConfiguration.timelineServiceV2Enabled(configuration)) { + TimelineV2Client timelineClient = TimelineV2Client + .createTimelineClient(context.attemptId.getApplicationId()); + amRMClient.registerTimelineV2Client(timelineClient); + serviceTimelinePublisher = new ServiceTimelinePublisher(timelineClient); + addService(serviceTimelinePublisher); + DefaultMetricsSystem.instance().register("ServiceMetricsSink", + "For processing metrics to ATS", + new ServiceMetricsSink(serviceTimelinePublisher)); + LOG.info("Timeline v2 is enabled."); + } + + initGlobalTokensForSubstitute(context); + //substitute quicklinks + ProviderUtils.substituteMapWithTokens(app.getQuicklinks(), globalTokens); + createConfigFileCache(context.fs.getFileSystem()); + + createAllComponents(); + } + + protected YarnRegistryViewForProviders createYarnRegistryOperations( + ServiceContext context, RegistryOperations registryClient) { + return new YarnRegistryViewForProviders(registryClient, + RegistryUtils.currentUser(), YarnServiceConstants.APP_TYPE, app.getName(), + context.attemptId); + } + + protected NMClientAsync createNMClient() { + return NMClientAsync.createNMClientAsync(new NMClientCallback()); + } + + protected AMRMClientAsync createAMRMClient() { + return AMRMClientAsync + .createAMRMClientAsync(1000, new AMRMClientCallback()); + } + + @Override + public void serviceInit(Configuration conf) throws Exception { + try { + buildInstance(context, conf); + } catch (YarnException e) { + throw new YarnRuntimeException(e); + } + super.serviceInit(conf); + } + + @Override + public void serviceStop() throws Exception { + LOG.info("Stopping service scheduler"); + + if (executorService != null) { + executorService.shutdownNow(); + } + + DefaultMetricsSystem.shutdown(); + if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) { + serviceTimelinePublisher + .serviceAttemptUnregistered(context, diagnostics.toString()); + } + String msg = diagnostics.toString() + + "Navigate to the failed component for more details."; + amRMClient + .unregisterApplicationMaster(FinalApplicationStatus.ENDED, msg, ""); + LOG.info("Service " + app.getName() + + " unregistered with RM, with attemptId = " + context.attemptId + + ", diagnostics = " + diagnostics); + super.serviceStop(); + } + + @Override + public void serviceStart() throws Exception { + super.serviceStart(); + InetSocketAddress bindAddress = context.clientAMService.getBindAddress(); + RegisterApplicationMasterResponse response = amRMClient + .registerApplicationMaster(bindAddress.getHostName(), + bindAddress.getPort(), "N/A"); + if (response.getClientToAMTokenMasterKey() != null + && response.getClientToAMTokenMasterKey().remaining() != 0) { + context.secretManager + .setMasterKey(response.getClientToAMTokenMasterKey().array()); + } + registerServiceInstance(context.attemptId, app); + + // recover components based on containers sent from RM + recoverComponents(response); + + for (Component component : componentsById.values()) { + // Trigger initial evaluation of components + if (component.areDependenciesReady()) { + LOG.info("Triggering initial evaluation of component {}", + component.getName()); + ComponentEvent event = new ComponentEvent(component.getName(), FLEX) + .setDesired(component.getComponentSpec().getNumberOfContainers()); + component.handle(event); + } + } + } + + private void recoverComponents(RegisterApplicationMasterResponse response) { + List recoveredContainers = response + .getContainersFromPreviousAttempts(); + LOG.info("Received {} containers from previous attempt.", + recoveredContainers.size()); + Map existingRecords = new HashMap<>(); + List existingComps = null; + try { + existingComps = yarnRegistryOperations.listComponents(); + LOG.info("Found {} containers from ZK registry: {}", existingComps.size(), + existingComps); + } catch (Exception e) { + LOG.info("Could not read component paths: {}", e.getMessage()); + } + if (existingComps != null) { + for (String existingComp : existingComps) { + try { + ServiceRecord record = + yarnRegistryOperations.getComponent(existingComp); + existingRecords.put(existingComp, record); + } catch (Exception e) { + LOG.warn("Could not resolve record for component {}: {}", + existingComp, e); + } + } + } + for (Container container : recoveredContainers) { + LOG.info("Handling container {} from previous attempt", + container.getId()); + ServiceRecord record = existingRecords.get(RegistryPathUtils + .encodeYarnID(container.getId().toString())); + if (record != null) { + Component comp = componentsById.get(container.getAllocationRequestId()); + ComponentEvent event = + new ComponentEvent(comp.getName(), CONTAINER_RECOVERED) + .setContainer(container) + .setInstance(comp.getComponentInstance(record.description)); + comp.handle(event); + // do not remove requests in this case because we do not know if they + // have already been removed + } else { + LOG.info("Record not found in registry for container {} from previous" + + " attempt, releasing", container.getId()); + amRMClient.releaseAssignedContainer(container.getId()); + } + } + } + + private void initGlobalTokensForSubstitute(ServiceContext context) { + // ZK + globalTokens.put(ServiceApiConstants.CLUSTER_ZK_QUORUM, getConfig() + .getTrimmed(KEY_REGISTRY_ZK_QUORUM, DEFAULT_REGISTRY_ZK_QUORUM)); + String user = null; + try { + user = UserGroupInformation.getCurrentUser().getShortUserName(); + } catch (IOException e) { + LOG.error("Failed to get user.", e); + } + globalTokens + .put(SERVICE_ZK_PATH, ServiceRegistryUtils.mkClusterPath(user, app.getName())); + + globalTokens.put(ServiceApiConstants.USER, user); + String dnsDomain = getConfig().getTrimmed(KEY_DNS_DOMAIN); + if (dnsDomain != null && !dnsDomain.isEmpty()) { + globalTokens.put(ServiceApiConstants.DOMAIN, dnsDomain); + } + // HDFS + String clusterFs = getConfig().getTrimmed(FS_DEFAULT_NAME_KEY); + if (clusterFs != null && !clusterFs.isEmpty()) { + globalTokens.put(ServiceApiConstants.CLUSTER_FS_URI, clusterFs); + globalTokens.put(ServiceApiConstants.CLUSTER_FS_HOST, + URI.create(clusterFs).getHost()); + } + globalTokens.put(SERVICE_HDFS_DIR, context.serviceHdfsDir); + // service name + globalTokens.put(SERVICE_NAME_LC, app.getName().toLowerCase()); + globalTokens.put(SERVICE_NAME, app.getName()); + } + + private void createConfigFileCache(final FileSystem fileSystem) { + this.configFileCache = + CacheBuilder.newBuilder().expireAfterAccess(10, TimeUnit.MINUTES) + .build(new CacheLoader() { + @Override public Object load(ConfigFile key) throws Exception { + switch (key.getType()) { + case HADOOP_XML: + try (FSDataInputStream input = fileSystem + .open(new Path(key.getSrcFile()))) { + org.apache.hadoop.conf.Configuration confRead = + new org.apache.hadoop.conf.Configuration(false); + confRead.addResource(input); + Map map = new HashMap<>(confRead.size()); + for (Map.Entry entry : confRead) { + map.put(entry.getKey(), entry.getValue()); + } + return map; + } + case TEMPLATE: + try (FSDataInputStream fileInput = fileSystem + .open(new Path(key.getSrcFile()))) { + return IOUtils.toString(fileInput); + } + default: + return null; + } + } + }); + context.configCache = configFileCache; + } + + private void registerServiceInstance(ApplicationAttemptId attemptId, + Service service) throws IOException { + LOG.info("Registering " + attemptId + ", " + service.getName() + + " into registry"); + ServiceRecord serviceRecord = new ServiceRecord(); + serviceRecord.set(YarnRegistryAttributes.YARN_ID, + attemptId.getApplicationId().toString()); + serviceRecord.set(YarnRegistryAttributes.YARN_PERSISTENCE, + PersistencePolicies.APPLICATION); + serviceRecord.description = "YarnServiceMaster"; + + // set any provided attributes + setUserProvidedServiceRecordAttributes(service.getConfiguration(), + serviceRecord); + + executorService.submit(new Runnable() { + @Override public void run() { + try { + yarnRegistryOperations.registerSelf(serviceRecord, false); + LOG.info("Registered service under {}; absolute path {}", + yarnRegistryOperations.getSelfRegistrationPath(), + yarnRegistryOperations.getAbsoluteSelfRegistrationPath()); + boolean isFirstAttempt = 1 == attemptId.getAttemptId(); + // delete the children in case there are any and this is an AM startup. + // just to make sure everything underneath is purged + if (isFirstAttempt) { + yarnRegistryOperations.deleteChildren( + yarnRegistryOperations.getSelfRegistrationPath(), true); + } + } catch (IOException e) { + LOG.error( + "Failed to register app " + app.getName() + " in registry"); + } + } + }); + if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) { + serviceTimelinePublisher.serviceAttemptRegistered(app, getConfig()); + } + } + + private void setUserProvidedServiceRecordAttributes( + org.apache.hadoop.yarn.service.api.records.Configuration conf, ServiceRecord record) { + String prefix = "service.record.attribute"; + for (Map.Entry entry : conf.getProperties().entrySet()) { + if (entry.getKey().startsWith(prefix)) { + String key = entry.getKey().substring(prefix.length() + 1); + record.set(key, entry.getValue().trim()); + } + } + } + + private void createAllComponents() { + long allocateId = 0; + + // sort components by dependencies + Collection sortedComponents = + ServiceApiUtil.sortByDependencies(app.getComponents()); + + for (org.apache.hadoop.yarn.service.api.records.Component compSpec : sortedComponents) { + Component component = new Component(compSpec, allocateId, context); + componentsById.put(allocateId, component); + componentsByName.put(component.getName(), component); + allocateId++; + } + } + + private final class ComponentEventHandler + implements EventHandler { + @Override + public void handle(ComponentEvent event) { + Component component = componentsByName.get(event.getName()); + + if (component == null) { + LOG.error("No component exists for " + event.getName()); + return; + } + try { + component.handle(event); + } catch (Throwable t) { + LOG.error(MessageFormat + .format("[COMPONENT {0}]: Error in handling event type {1}", + component.getName(), event.getType()), t); + } + } + } + + private final class ComponentInstanceEventHandler + implements EventHandler { + @Override + public void handle(ComponentInstanceEvent event) { + ComponentInstance instance = + liveInstances.get(event.getContainerId()); + if (instance == null) { + LOG.error("No component instance exists for " + event.getContainerId()); + return; + } + try { + instance.handle(event); + } catch (Throwable t) { + LOG.error(instance.getCompInstanceId() + + ": Error in handling event type " + event.getType(), t); + } + } + } + + class AMRMClientCallback extends AMRMClientAsync.AbstractCallbackHandler { + + @Override + public void onContainersAllocated(List containers) { + LOG.info(containers.size() + " containers allocated. "); + for (Container container : containers) { + Component comp = componentsById.get(container.getAllocationRequestId()); + ComponentEvent event = + new ComponentEvent(comp.getName(), CONTAINER_ALLOCATED) + .setContainer(container); + dispatcher.getEventHandler().handle(event); + Collection requests = amRMClient + .getMatchingRequests(container.getAllocationRequestId()); + LOG.info("[COMPONENT {}]: {} outstanding container requests.", + comp.getName(), requests.size()); + // remove the corresponding request + if (requests.iterator().hasNext()) { + LOG.info("[COMPONENT {}]: removing one container request.", comp + .getName()); + AMRMClient.ContainerRequest request = requests.iterator().next(); + amRMClient.removeContainerRequest(request); + } + } + } + + @Override + public void onContainersCompleted(List statuses) { + for (ContainerStatus status : statuses) { + ContainerId containerId = status.getContainerId(); + ComponentInstance instance = liveInstances.get(status.getContainerId()); + if (instance == null) { + LOG.warn( + "Container {} Completed. No component instance exists. exitStatus={}. diagnostics={} ", + containerId, status.getExitStatus(), status.getDiagnostics()); + return; + } + ComponentEvent event = + new ComponentEvent(instance.getCompName(), CONTAINER_COMPLETED) + .setStatus(status).setInstance(instance); + dispatcher.getEventHandler().handle(event); + } + } + + @Override + public void onContainersUpdated(List containers) { + } + + @Override public void onShutdownRequest() { + //Was used for non-work-preserving restart in YARN, should be deprecated. + } + + @Override public void onNodesUpdated(List updatedNodes) { + StringBuilder str = new StringBuilder(); + str.append("Nodes updated info: ").append(System.lineSeparator()); + for (NodeReport report : updatedNodes) { + str.append(report.getNodeId()).append(", state = ") + .append(report.getNodeState()).append(", healthDiagnostics = ") + .append(report.getHealthReport()).append(System.lineSeparator()); + } + LOG.warn(str.toString()); + } + + @Override public float getProgress() { + // get running containers over desired containers + long total = 0; + for (org.apache.hadoop.yarn.service.api.records.Component component : app + .getComponents()) { + total += component.getNumberOfContainers(); + } + // Probably due to user flexed down to 0 + if (total == 0) { + return 100; + } + return Math.max((float) liveInstances.size() / total * 100, 100); + } + + @Override public void onError(Throwable e) { + LOG.error("Error in AMRMClient callback handler ", e); + } + } + + + private class NMClientCallback extends NMClientAsync.AbstractCallbackHandler { + + @Override public void onContainerStarted(ContainerId containerId, + Map allServiceResponse) { + ComponentInstance instance = liveInstances.get(containerId); + if (instance == null) { + LOG.error("No component instance exists for " + containerId); + return; + } + ComponentEvent event = + new ComponentEvent(instance.getCompName(), CONTAINER_STARTED) + .setInstance(instance); + dispatcher.getEventHandler().handle(event); + } + + @Override public void onContainerStatusReceived(ContainerId containerId, + ContainerStatus containerStatus) { + + } + + @Override public void onContainerStopped(ContainerId containerId) { + + } + + @Override + public void onStartContainerError(ContainerId containerId, Throwable t) { + ComponentInstance instance = liveInstances.get(containerId); + if (instance == null) { + LOG.error("No component instance exists for " + containerId); + return; + } + LOG.error("Failed to start " + containerId, t); + amRMClient.releaseAssignedContainer(containerId); + // After container released, it'll get CONTAINER_COMPLETED event from RM + // automatically which will trigger stopping COMPONENT INSTANCE + } + + @Override public void onContainerResourceIncreased(ContainerId containerId, + Resource resource) { + + } + + @Override public void onContainerResourceUpdated(ContainerId containerId, + Resource resource) { + + } + + @Override public void onGetContainerStatusError(ContainerId containerId, + Throwable t) { + + } + + @Override + public void onIncreaseContainerResourceError(ContainerId containerId, + Throwable t) { + + } + + @Override + public void onUpdateContainerResourceError(ContainerId containerId, + Throwable t) { + + } + + @Override + public void onStopContainerError(ContainerId containerId, Throwable t) { + + } + } + + public ServiceMetrics getServiceMetrics() { + return serviceMetrics; + } + + public AMRMClientAsync getAmRMClient() { + return amRMClient; + } + + public NMClientAsync getNmClient() { + return nmClient; + } + + public void addLiveCompInstance(ContainerId containerId, + ComponentInstance instance) { + liveInstances.put(containerId, instance); + } + + public void removeLiveCompInstance(ContainerId containerId) { + liveInstances.remove(containerId); + } + + public AsyncDispatcher getCompInstanceDispatcher() { + return compInstanceDispatcher; + } + + public YarnRegistryViewForProviders getYarnRegistryOperations() { + return yarnRegistryOperations; + } + + public ServiceTimelinePublisher getServiceTimelinePublisher() { + return serviceTimelinePublisher; + } + + public Map getLiveInstances() { + return liveInstances; + } + + public ContainerLaunchService getContainerLaunchService() { + return containerLaunchService; + } + + public ServiceContext getContext() { + return context; + } + + public Map getAllComponents() { + return componentsByName; + } + + public Service getApp() { + return app; + } + + public AsyncDispatcher getDispatcher() { + return dispatcher; + } + + public BoundedAppender getDiagnostics() { + return diagnostics; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/ServiceApiConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/ServiceApiConstants.java new file mode 100644 index 0000000..a85191c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/ServiceApiConstants.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.$; + +/** + * This class defines constants that can be used in input spec for + * variable substitutions + */ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public interface ServiceApiConstants { + + // Constants for service + String SERVICE_NAME = $("SERVICE_NAME"); + + String SERVICE_NAME_LC = $("SERVICE_NAME.lc"); + + String USER = $("USER"); + + String DOMAIN = $("DOMAIN"); + + // Constants for component + String COMPONENT_NAME = $("COMPONENT_NAME"); + + String COMPONENT_NAME_LC = $("COMPONENT_NAME.lc"); + + String COMPONENT_INSTANCE_NAME = $("COMPONENT_INSTANCE_NAME"); + + // Constants for component instance + String COMPONENT_ID = $("COMPONENT_ID"); + + String CONTAINER_ID = $("CONTAINER_ID"); + + // Templates for component instance host/IP + String COMPONENT_HOST = $("%s_HOST"); + + String COMPONENT_IP = $("%s_IP"); + + // Constants for default cluster ZK + String CLUSTER_ZK_QUORUM = $("CLUSTER_ZK_QUORUM"); + + // URI for the default cluster fs + String CLUSTER_FS_URI = $("CLUSTER_FS_URI"); + + // the host component of the cluster fs UI + String CLUSTER_FS_HOST = $("CLUSTER_FS_HOST"); + + // Path in zookeeper for a specific service + String SERVICE_ZK_PATH = $("SERVICE_ZK_PATH"); + + // Constants for service specific hdfs dir + String SERVICE_HDFS_DIR = $("SERVICE_HDFS_DIR"); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Artifact.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Artifact.java new file mode 100644 index 0000000..ce062cc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Artifact.java @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.io.Serializable; +import java.util.Objects; + +import javax.xml.bind.annotation.XmlEnum; +import javax.xml.bind.annotation.XmlType; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonValue; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Artifact of an service component. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "Artifact of an service component") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Artifact implements Serializable { + private static final long serialVersionUID = 3608929500111099035L; + + private String id = null; + + /** + * Artifact Type. DOCKER, TARBALL or SERVICE + **/ + @XmlType(name = "artifact_type") + @XmlEnum + public enum TypeEnum { + DOCKER("DOCKER"), TARBALL("TARBALL"), SERVICE("SERVICE"); + + private String value; + + TypeEnum(String value) { + this.value = value; + } + + @Override + @JsonValue + public String toString() { + return value; + } + } + + private TypeEnum type = TypeEnum.DOCKER; + private String uri = null; + + /** + * Artifact id. Examples are package location uri for tarball based services, + * image name for docker, etc. + **/ + public Artifact id(String id) { + this.id = id; + return this; + } + + @ApiModelProperty(example = "null", required = true, value = "Artifact id. Examples are package location uri for tarball based services, image name for docker, etc.") + @JsonProperty("id") + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + /** + * Artifact type, like docker, tarball, etc. (optional). + **/ + public Artifact type(TypeEnum type) { + this.type = type; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact type, like docker, tarball, etc. (optional).") + @JsonProperty("type") + public TypeEnum getType() { + return type; + } + + public void setType(TypeEnum type) { + this.type = type; + } + + /** + * Artifact location to support multiple artifact stores (optional). + **/ + public Artifact uri(String uri) { + this.uri = uri; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact location to support multiple artifact stores (optional).") + @JsonProperty("uri") + public String getUri() { + return uri; + } + + public void setUri(String uri) { + this.uri = uri; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Artifact artifact = (Artifact) o; + return Objects.equals(this.id, artifact.id) + && Objects.equals(this.type, artifact.type) + && Objects.equals(this.uri, artifact.uri); + } + + @Override + public int hashCode() { + return Objects.hash(id, type, uri); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Artifact {\n"); + + sb.append(" id: ").append(toIndentedString(id)).append("\n"); + sb.append(" type: ").append(toIndentedString(type)).append("\n"); + sb.append(" uri: ").append(toIndentedString(uri)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/BaseResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/BaseResource.java new file mode 100644 index 0000000..7ac86d4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/BaseResource.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.io.Serializable; +@InterfaceAudience.Public +@InterfaceStability.Unstable +public class BaseResource implements Serializable { + private static final long serialVersionUID = 1492603053176889431L; + + private String uri; + + /** + * Resource location for a service, e.g. + * /ws/v1/services/helloworld + * + **/ + public String getUri() { + return uri; + } + + public void setUri(String uri) { + this.uri = uri; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("BaseResource [uri="); + builder.append(uri); + builder.append("]"); + return builder.toString(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java new file mode 100644 index 0000000..fe9c043 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java @@ -0,0 +1,430 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * One or more components of the service. If the service is HBase say, + * then the component can be a simple role like master or regionserver. If the + * service is a complex business webapp then a component can be other + * services say Kafka or Storm. Thereby it opens up the support for complex + * and nested services. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "One or more components of the service. If the service is HBase say, then the component can be a simple role like master or regionserver. If the service is a complex business webapp then a component can be other services say Kafka or Storm. Thereby it opens up the support for complex and nested services.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@XmlRootElement +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Component implements Serializable { + private static final long serialVersionUID = -8430058381509087805L; + + private String name = null; + private List dependencies = new ArrayList(); + private ReadinessCheck readinessCheck = null; + private Artifact artifact = null; + private String launchCommand = null; + private Resource resource = null; + private Long numberOfContainers = null; + private Boolean runPrivilegedContainer = false; + private PlacementPolicy placementPolicy = null; + private ComponentState state = ComponentState.FLEXING; + private Configuration configuration = new Configuration(); + private List quicklinks = new ArrayList(); + private List containers = + Collections.synchronizedList(new ArrayList()); + + /** + * Name of the service component (mandatory). + **/ + public Component name(String name) { + this.name = name; + return this; + } + + @ApiModelProperty(example = "null", required = true, value = "Name of the service component (mandatory).") + @JsonProperty("name") + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + /** + * An array of service components which should be in READY state (as + * defined by readiness check), before this component can be started. The + * dependencies across all components of a service should be represented + * as a DAG. + **/ + public Component dependencies(List dependencies) { + this.dependencies = dependencies; + return this; + } + + @ApiModelProperty(example = "null", value = "An array of service components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of an service should be represented as a DAG.") + @JsonProperty("dependencies") + public List getDependencies() { + return dependencies; + } + + public void setDependencies(List dependencies) { + this.dependencies = dependencies; + } + + /** + * Readiness check for this component. + **/ + public Component readinessCheck(ReadinessCheck readinessCheck) { + this.readinessCheck = readinessCheck; + return this; + } + + @ApiModelProperty(example = "null", value = "Readiness check for this component.") + @JsonProperty("readiness_check") + public ReadinessCheck getReadinessCheck() { + return readinessCheck; + } + + @XmlElement(name = "readiness_check") + public void setReadinessCheck(ReadinessCheck readinessCheck) { + this.readinessCheck = readinessCheck; + } + + /** + * Artifact of the component (optional). If not specified, the service + * level global artifact takes effect. + **/ + public Component artifact(Artifact artifact) { + this.artifact = artifact; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact of the component (optional). If not specified, the service level global artifact takes effect.") + @JsonProperty("artifact") + public Artifact getArtifact() { + return artifact; + } + + public void setArtifact(Artifact artifact) { + this.artifact = artifact; + } + + /** + * The custom launch command of this component (optional). When specified at + * the component level, it overrides the value specified at the global level + * (if any). + **/ + public Component launchCommand(String launchCommand) { + this.launchCommand = launchCommand; + return this; + } + + @ApiModelProperty(example = "null", value = "The custom launch command of this component (optional). When specified at the component level, it overrides the value specified at the global level (if any).") + @JsonProperty("launch_command") + public String getLaunchCommand() { + return launchCommand; + } + + @XmlElement(name = "launch_command") + public void setLaunchCommand(String launchCommand) { + this.launchCommand = launchCommand; + } + + /** + * Resource of this component (optional). If not specified, the service + * level global resource takes effect. + **/ + public Component resource(Resource resource) { + this.resource = resource; + return this; + } + + @ApiModelProperty(example = "null", value = "Resource of this component (optional). If not specified, the service level global resource takes effect.") + @JsonProperty("resource") + public Resource getResource() { + return resource; + } + + public void setResource(Resource resource) { + this.resource = resource; + } + + /** + * Number of containers for this component (optional). If not specified, + * the service level global number_of_containers takes effect. + **/ + public Component numberOfContainers(Long numberOfContainers) { + this.numberOfContainers = numberOfContainers; + return this; + } + + @ApiModelProperty(example = "null", value = "Number of containers for this component (optional). If not specified, the service level global number_of_containers takes effect.") + @JsonProperty("number_of_containers") + public Long getNumberOfContainers() { + return numberOfContainers; + } + + @XmlElement(name = "number_of_containers") + public void setNumberOfContainers(Long numberOfContainers) { + this.numberOfContainers = numberOfContainers; + } + + @ApiModelProperty(example = "null", value = "Containers of a started component. Specifying a value for this attribute for the POST payload raises a validation error. This blob is available only in the GET response of a started service.") + @JsonProperty("containers") + public List getContainers() { + return containers; + } + + public void setContainers(List containers) { + this.containers = containers; + } + + public void addContainer(Container container) { + this.containers.add(container); + } + + public void removeContainer(Container container) { + containers.remove(container); + } + public Container getContainer(String id) { + for (Container container : containers) { + if (container.getId().equals(id)) { + return container; + } + } + return null; + } + + /** + * Run all containers of this component in privileged mode (YARN-4262). + **/ + public Component runPrivilegedContainer(Boolean runPrivilegedContainer) { + this.runPrivilegedContainer = runPrivilegedContainer; + return this; + } + + @ApiModelProperty(example = "null", value = "Run all containers of this component in privileged mode (YARN-4262).") + @JsonProperty("run_privileged_container") + public Boolean getRunPrivilegedContainer() { + return runPrivilegedContainer; + } + + @XmlElement(name = "run_privileged_container") + public void setRunPrivilegedContainer(Boolean runPrivilegedContainer) { + this.runPrivilegedContainer = runPrivilegedContainer; + } + + /** + * Advanced scheduling and placement policies for all containers of this + * component (optional). If not specified, the service level placement_policy + * takes effect. Refer to the description at the global level for more + * details. + **/ + public Component placementPolicy(PlacementPolicy placementPolicy) { + this.placementPolicy = placementPolicy; + return this; + } + + @ApiModelProperty(example = "null", value = "Advanced scheduling and placement policies for all containers of this component (optional). If not specified, the service level placement_policy takes effect. Refer to the description at the global level for more details.") + @JsonProperty("placement_policy") + public PlacementPolicy getPlacementPolicy() { + return placementPolicy; + } + + @XmlElement(name = "placement_policy") + public void setPlacementPolicy(PlacementPolicy placementPolicy) { + this.placementPolicy = placementPolicy; + } + + /** + * Config properties for this component. + **/ + public Component configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } + + @ApiModelProperty(example = "null", value = "Config properties for this component.") + @JsonProperty("configuration") + public Configuration getConfiguration() { + return configuration; + } + + public void setConfiguration(Configuration configuration) { + this.configuration = configuration; + } + + /** + * A list of quicklink keys defined at the service level, and to be + * resolved by this component. + **/ + public Component quicklinks(List quicklinks) { + this.quicklinks = quicklinks; + return this; + } + + @ApiModelProperty(example = "null", value = "A list of quicklink keys defined at the service level, and to be resolved by this component.") + @JsonProperty("quicklinks") + public List getQuicklinks() { + return quicklinks; + } + + public void setQuicklinks(List quicklinks) { + this.quicklinks = quicklinks; + } + + public Component state(ComponentState state) { + this.state = state; + return this; + } + + @ApiModelProperty(example = "null", value = "State of the component.") + @JsonProperty("state") + public ComponentState getState() { + return state; + } + + public void setState(ComponentState state) { + this.state = state; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Component component = (Component) o; + return Objects.equals(this.name, component.name) + && Objects.equals(this.dependencies, component.dependencies) + && Objects.equals(this.readinessCheck, component.readinessCheck) + && Objects.equals(this.artifact, component.artifact) + && Objects.equals(this.launchCommand, component.launchCommand) + && Objects.equals(this.resource, component.resource) + && Objects.equals(this.numberOfContainers, component.numberOfContainers) + && Objects.equals(this.runPrivilegedContainer, + component.runPrivilegedContainer) + && Objects.equals(this.placementPolicy, component.placementPolicy) + && Objects.equals(this.configuration, component.configuration) + && Objects.equals(this.quicklinks, component.quicklinks) + && Objects.equals(this.state, component.state); + } + + @Override + public int hashCode() { + return Objects.hash(name, dependencies, readinessCheck, artifact, + launchCommand, resource, numberOfContainers, + runPrivilegedContainer, placementPolicy, configuration, quicklinks, state); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Component {\n"); + + sb.append(" name: ").append(toIndentedString(name)).append("\n"); + sb.append(" state: ").append(toIndentedString(state)).append("\n"); + sb.append(" dependencies: ").append(toIndentedString(dependencies)) + .append("\n"); + sb.append(" readinessCheck: ").append(toIndentedString(readinessCheck)) + .append("\n"); + sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n"); + sb.append(" launchCommand: ").append(toIndentedString(launchCommand)) + .append("\n"); + sb.append(" resource: ").append(toIndentedString(resource)).append("\n"); + sb.append(" numberOfContainers: ") + .append(toIndentedString(numberOfContainers)).append("\n"); + sb.append(" containers: ").append(toIndentedString(containers)) + .append("\n"); + sb.append(" runPrivilegedContainer: ") + .append(toIndentedString(runPrivilegedContainer)).append("\n"); + sb.append(" placementPolicy: ").append(toIndentedString(placementPolicy)) + .append("\n"); + sb.append(" configuration: ").append(toIndentedString(configuration)) + .append("\n"); + sb.append(" quicklinks: ").append(toIndentedString(quicklinks)) + .append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } + + /** + * Merge from another component into this component without overwriting. + */ + public void mergeFrom(Component that) { + if (this.getArtifact() == null) { + this.setArtifact(that.getArtifact()); + } + if (this.getResource() == null) { + this.setResource(that.getResource()); + } + if (this.getNumberOfContainers() == null) { + this.setNumberOfContainers(that.getNumberOfContainers()); + } + if (this.getLaunchCommand() == null) { + this.setLaunchCommand(that.getLaunchCommand()); + } + this.getConfiguration().mergeFrom(that.getConfiguration()); + if (this.getQuicklinks() == null) { + this.setQuicklinks(that.getQuicklinks()); + } + if (this.getRunPrivilegedContainer() == null) { + this.setRunPrivilegedContainer(that.getRunPrivilegedContainer()); + } + if (this.getDependencies() == null) { + this.setDependencies(that.getDependencies()); + } + if (this.getPlacementPolicy() == null) { + this.setPlacementPolicy(that.getPlacementPolicy()); + } + if (this.getReadinessCheck() == null) { + this.setReadinessCheck(that.getReadinessCheck()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ComponentState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ComponentState.java new file mode 100644 index 0000000..702a9ae --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ComponentState.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "The current state of a component.") +public enum ComponentState { + FLEXING, STABLE +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java new file mode 100644 index 0000000..fb088e2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFile.java @@ -0,0 +1,233 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonValue; +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlEnum; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * A config file that needs to be created and made available as a volume in an + * service component container. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "A config file that needs to be created and made available as a volume in an service component container.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@XmlRootElement +@JsonInclude(JsonInclude.Include.NON_NULL) +public class ConfigFile implements Serializable { + private static final long serialVersionUID = -7009402089417704612L; + + /** + * Config Type. XML, JSON, YAML, TEMPLATE, ENV and HADOOP_XML are supported. + **/ + @XmlType(name = "config_type") + @XmlEnum + public enum TypeEnum { + XML("XML"), PROPERTIES("PROPERTIES"), JSON("JSON"), YAML("YAML"), TEMPLATE( + "TEMPLATE"), ENV("ENV"), HADOOP_XML("HADOOP_XML"),; + + private String value; + + TypeEnum(String value) { + this.value = value; + } + + @Override + @JsonValue + public String toString() { + return value; + } + } + + private TypeEnum type = null; + private String destFile = null; + private String srcFile = null; + private Map props = new HashMap<>(); + + public ConfigFile copy() { + ConfigFile copy = new ConfigFile(); + copy.setType(this.getType()); + copy.setSrcFile(this.getSrcFile()); + copy.setDestFile(this.getDestFile()); + if (this.getProps() != null && !this.getProps().isEmpty()) { + copy.getProps().putAll(this.getProps()); + } + return copy; + } + + /** + * Config file in the standard format like xml, properties, json, yaml, + * template. + **/ + public ConfigFile type(TypeEnum type) { + this.type = type; + return this; + } + + @ApiModelProperty(example = "null", value = "Config file in the standard format like xml, properties, json, yaml, template.") + @JsonProperty("type") + public TypeEnum getType() { + return type; + } + + public void setType(TypeEnum type) { + this.type = type; + } + + /** + * The absolute path that this configuration file should be mounted as, in the + * service container. + **/ + public ConfigFile destFile(String destFile) { + this.destFile = destFile; + return this; + } + + @ApiModelProperty(example = "null", value = "The absolute path that this configuration file should be mounted as, in the service container.") + @JsonProperty("dest_file") + public String getDestFile() { + return destFile; + } + + @XmlElement(name = "dest_file") + public void setDestFile(String destFile) { + this.destFile = destFile; + } + + /** + * This provides the source location of the configuration file, the content + * of which is dumped to dest_file post property substitutions, in the format + * as specified in type. Typically the src_file would point to a source + * controlled network accessible file maintained by tools like puppet, chef, + * or hdfs etc. Currently, only hdfs is supported. + **/ + public ConfigFile srcFile(String srcFile) { + this.srcFile = srcFile; + return this; + } + + @ApiModelProperty(example = "null", value = "This provides the source location of the configuration file, " + + "the content of which is dumped to dest_file post property substitutions, in the format as specified in type. " + + "Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.") + @JsonProperty("src_file") + public String getSrcFile() { + return srcFile; + } + + @XmlElement(name = "src_file") + public void setSrcFile(String srcFile) { + this.srcFile = srcFile; + } + + /** + A blob of key value pairs that will be dumped in the dest_file in the format + as specified in type. If src_file is specified, src_file content are dumped + in the dest_file and these properties will overwrite, if any, existing + properties in src_file or be added as new properties in src_file. + **/ + public ConfigFile props(Map props) { + this.props = props; + return this; + } + + @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be dumped in the dest_file in the format as specified in type." + + " If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any," + + " existing properties in src_file or be added as new properties in src_file.") + @JsonProperty("props") + public Map getProps() { + return props; + } + + public void setProps(Map props) { + this.props = props; + } + + public long getLong(String name, long defaultValue) { + if (name == null) { + return defaultValue; + } + String value = props.get(name.trim()); + return Long.parseLong(value); + } + + public boolean getBoolean(String name, boolean defaultValue) { + if (name == null) { + return defaultValue; + } + return Boolean.valueOf(props.get(name.trim())); + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ConfigFile configFile = (ConfigFile) o; + return Objects.equals(this.type, configFile.type) + && Objects.equals(this.destFile, configFile.destFile) + && Objects.equals(this.srcFile, configFile.srcFile); + } + + @Override + public int hashCode() { + return Objects.hash(type, destFile, srcFile, props); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class ConfigFile {\n"); + + sb.append(" type: ").append(toIndentedString(type)).append("\n"); + sb.append(" destFile: ").append(toIndentedString(destFile)).append("\n"); + sb.append(" srcFile: ").append(toIndentedString(srcFile)).append("\n"); + sb.append(" props: ").append(toIndentedString(props)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFormat.java new file mode 100644 index 0000000..e10305a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ConfigFormat.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.util.Locale; + +@InterfaceAudience.Public +@InterfaceStability.Unstable +public enum ConfigFormat { + + JSON("json"), + PROPERTIES("properties"), + XML("xml"), + HADOOP_XML("hadoop_xml"), + ENV("env"), + TEMPLATE("template"), + YAML("yaml"), + ; + ConfigFormat(String suffix) { + this.suffix = suffix; + } + + private final String suffix; + + public String getSuffix() { + return suffix; + } + + + @Override + public String toString() { + return suffix; + } + + /** + * Get a matching format or null + * @param type + * @return the format + */ + public static ConfigFormat resolve(String type) { + for (ConfigFormat format: values()) { + if (format.getSuffix().equals(type.toLowerCase(Locale.ENGLISH))) { + return format; + } + } + return null; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Configuration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Configuration.java new file mode 100644 index 0000000..2f8ca96 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Configuration.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.yarn.service.utils.SliderUtils; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Set of configuration properties that can be injected into the service + * components via envs, files and custom pluggable helper docker containers. + * Files of several standard formats like xml, properties, json, yaml and + * templates will be supported. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "Set of configuration properties that can be injected into the service components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Configuration implements Serializable { + private static final long serialVersionUID = -4330788704981074466L; + + private Map properties = new HashMap(); + private Map env = new HashMap(); + private List files = new ArrayList(); + + /** + * A blob of key-value pairs of common service properties. + **/ + public Configuration properties(Map properties) { + this.properties = properties; + return this; + } + + @ApiModelProperty(example = "null", value = "A blob of key-value pairs of common service properties.") + @JsonProperty("properties") + public Map getProperties() { + return properties; + } + + public void setProperties(Map properties) { + this.properties = properties; + } + + /** + * A blob of key-value pairs which will be appended to the default system + * properties and handed off to the service at start time. All placeholder + * references to properties will be substituted before injection. + **/ + public Configuration env(Map env) { + this.env = env; + return this; + } + + @ApiModelProperty(example = "null", value = "A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection.") + @JsonProperty("env") + public Map getEnv() { + return env; + } + + public void setEnv(Map env) { + this.env = env; + } + + /** + * Array of list of files that needs to be created and made available as + * volumes in the service component containers. + **/ + public Configuration files(List files) { + this.files = files; + return this; + } + + @ApiModelProperty(example = "null", value = "Array of list of files that needs to be created and made available as volumes in the service component containers.") + @JsonProperty("files") + public List getFiles() { + return files; + } + + public void setFiles(List files) { + this.files = files; + } + + public long getPropertyLong(String name, long defaultValue) { + String value = getProperty(name); + if (StringUtils.isEmpty(value)) { + return defaultValue; + } + return Long.parseLong(value); + } + + public int getPropertyInt(String name, int defaultValue) { + String value = getProperty(name); + if (StringUtils.isEmpty(value)) { + return defaultValue; + } + return Integer.parseInt(value); + } + + public boolean getPropertyBool(String name, boolean defaultValue) { + String value = getProperty(name); + if (StringUtils.isEmpty(value)) { + return defaultValue; + } + return Boolean.parseBoolean(value); + } + + public String getProperty(String name, String defaultValue) { + String value = getProperty(name); + if (StringUtils.isEmpty(value)) { + return defaultValue; + } + return value; + } + + public void setProperty(String name, String value) { + properties.put(name, value); + } + + public String getProperty(String name) { + return properties.get(name.trim()); + } + + public String getEnv(String name) { + return env.get(name.trim()); + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Configuration configuration = (Configuration) o; + return Objects.equals(this.properties, configuration.properties) + && Objects.equals(this.env, configuration.env) + && Objects.equals(this.files, configuration.files); + } + + @Override + public int hashCode() { + return Objects.hash(properties, env, files); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Configuration {\n"); + + sb.append(" properties: ").append(toIndentedString(properties)) + .append("\n"); + sb.append(" env: ").append(toIndentedString(env)).append("\n"); + sb.append(" files: ").append(toIndentedString(files)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } + + /** + * Merge all properties and envs from that configuration to this configration. + * For ConfigFiles, all properties and envs of that ConfigFile are merged into + * this ConfigFile. + */ + public synchronized void mergeFrom(Configuration that) { + SliderUtils.mergeMapsIgnoreDuplicateKeys(this.properties, that + .getProperties()); + SliderUtils.mergeMapsIgnoreDuplicateKeys(this.env, that.getEnv()); + + Map thatMap = new HashMap<>(); + for (ConfigFile file : that.getFiles()) { + thatMap.put(file.getDestFile(), file.copy()); + } + for (ConfigFile thisFile : files) { + if(thatMap.containsKey(thisFile.getDestFile())) { + ConfigFile thatFile = thatMap.get(thisFile.getDestFile()); + SliderUtils.mergeMapsIgnoreDuplicateKeys(thisFile.getProps(), + thatFile.getProps()); + thatMap.remove(thisFile.getDestFile()); + } + } + // add remaining new files from that Configration + for (ConfigFile thatFile : thatMap.values()) { + files.add(thatFile.copy()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java new file mode 100644 index 0000000..af06542 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java @@ -0,0 +1,298 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.util.Date; +import java.util.Objects; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * An instance of a running service container. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "An instance of a running service container") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@XmlRootElement +@JsonInclude(JsonInclude.Include.NON_NULL) +public class Container extends BaseResource { + private static final long serialVersionUID = -8955788064529288L; + + private String id = null; + private Date launchTime = null; + private String ip = null; + private String hostname = null; + private String bareHost = null; + private ContainerState state = null; + private String componentInstanceName = null; + private Resource resource = null; + private Artifact artifact = null; + private Boolean privilegedContainer = null; + + /** + * Unique container id of a running service, e.g. + * container_e3751_1458061340047_0008_01_000002. + **/ + public Container id(String id) { + this.id = id; + return this; + } + + @ApiModelProperty(example = "null", value = "Unique container id of a running service, e.g. container_e3751_1458061340047_0008_01_000002.") + @JsonProperty("id") + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + /** + * The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. + * This will most likely be different from cluster launch time. + **/ + public Container launchTime(Date launchTime) { + this.launchTime = launchTime == null ? null : (Date) launchTime.clone(); + return this; + } + + @ApiModelProperty(example = "null", value = "The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from cluster launch time.") + @JsonProperty("launch_time") + public Date getLaunchTime() { + return launchTime == null ? null : (Date) launchTime.clone(); + } + + @XmlElement(name = "launch_time") + public void setLaunchTime(Date launchTime) { + this.launchTime = launchTime == null ? null : (Date) launchTime.clone(); + } + + /** + * IP address of a running container, e.g. 172.31.42.141. The IP address and + * hostname attribute values are dependent on the cluster/docker network setup + * as per YARN-4007. + **/ + public Container ip(String ip) { + this.ip = ip; + return this; + } + + @ApiModelProperty(example = "null", value = "IP address of a running container, e.g. 172.31.42.141. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.") + @JsonProperty("ip") + public String getIp() { + return ip; + } + + public void setIp(String ip) { + this.ip = ip; + } + + /** + * Fully qualified hostname of a running container, e.g. + * ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and + * hostname attribute values are dependent on the cluster/docker network setup + * as per YARN-4007. + **/ + public Container hostname(String hostname) { + this.hostname = hostname; + return this; + } + + @ApiModelProperty(example = "null", value = "Fully qualified hostname of a running container, e.g. ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.") + @JsonProperty("hostname") + public String getHostname() { + return hostname; + } + + public void setHostname(String hostname) { + this.hostname = hostname; + } + + /** + * The bare node or host in which the container is running, e.g. + * cn008.example.com. + **/ + public Container bareHost(String bareHost) { + this.bareHost = bareHost; + return this; + } + + @ApiModelProperty(example = "null", value = "The bare node or host in which the container is running, e.g. cn008.example.com.") + @JsonProperty("bare_host") + public String getBareHost() { + return bareHost; + } + + @XmlElement(name = "bare_host") + public void setBareHost(String bareHost) { + this.bareHost = bareHost; + } + + /** + * State of the container of an service. + **/ + public Container state(ContainerState state) { + this.state = state; + return this; + } + + @ApiModelProperty(example = "null", value = "State of the container of an service.") + @JsonProperty("state") + public ContainerState getState() { + return state; + } + + public void setState(ContainerState state) { + this.state = state; + } + + /** + * Name of the component that this container instance belongs to. + **/ + public Container componentName(String componentName) { + this.componentInstanceName = componentName; + return this; + } + + @ApiModelProperty(example = "null", value = "Name of the component that this container instance belongs to.") + @JsonProperty("component_name") + public String getComponentInstanceName() { + return componentInstanceName; + } + + @XmlElement(name = "component_name") + public void setComponentInstanceName(String componentInstanceName) { + this.componentInstanceName = componentInstanceName; + } + + /** + * Resource used for this container. + **/ + public Container resource(Resource resource) { + this.resource = resource; + return this; + } + + @ApiModelProperty(example = "null", value = "Resource used for this container.") + @JsonProperty("resource") + public Resource getResource() { + return resource; + } + + public void setResource(Resource resource) { + this.resource = resource; + } + + /** + * Artifact used for this container. + **/ + public Container artifact(Artifact artifact) { + this.artifact = artifact; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact used for this container.") + @JsonProperty("artifact") + public Artifact getArtifact() { + return artifact; + } + + public void setArtifact(Artifact artifact) { + this.artifact = artifact; + } + + /** + * Container running in privileged mode or not. + **/ + public Container privilegedContainer(Boolean privilegedContainer) { + this.privilegedContainer = privilegedContainer; + return this; + } + + @ApiModelProperty(example = "null", value = "Container running in privileged mode or not.") + @JsonProperty("privileged_container") + public Boolean getPrivilegedContainer() { + return privilegedContainer; + } + + public void setPrivilegedContainer(Boolean privilegedContainer) { + this.privilegedContainer = privilegedContainer; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Container container = (Container) o; + return Objects.equals(this.id, container.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Container {\n"); + + sb.append(" id: ").append(toIndentedString(id)).append("\n"); + sb.append(" launchTime: ").append(toIndentedString(launchTime)) + .append("\n"); + sb.append(" ip: ").append(toIndentedString(ip)).append("\n"); + sb.append(" hostname: ").append(toIndentedString(hostname)).append("\n"); + sb.append(" bareHost: ").append(toIndentedString(bareHost)).append("\n"); + sb.append(" state: ").append(toIndentedString(state)).append("\n"); + sb.append(" componentInstanceName: ").append(toIndentedString( + componentInstanceName)) + .append("\n"); + sb.append(" resource: ").append(toIndentedString(resource)).append("\n"); + sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n"); + sb.append(" privilegedContainer: ") + .append(toIndentedString(privilegedContainer)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ContainerState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ContainerState.java new file mode 100644 index 0000000..bf09ff2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ContainerState.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * The current state of the container of an application. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +public enum ContainerState { + RUNNING_BUT_UNREADY, READY, STOPPED +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Error.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Error.java new file mode 100644 index 0000000..c64b1b5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Error.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModelProperty; + +import java.util.Objects; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Unstable +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +public class Error { + + private Integer code = null; + private String message = null; + private String fields = null; + + /** + **/ + public Error code(Integer code) { + this.code = code; + return this; + } + + @ApiModelProperty(example = "null", value = "") + @JsonProperty("code") + public Integer getCode() { + return code; + } + + public void setCode(Integer code) { + this.code = code; + } + + /** + **/ + public Error message(String message) { + this.message = message; + return this; + } + + @ApiModelProperty(example = "null", value = "") + @JsonProperty("message") + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + /** + **/ + public Error fields(String fields) { + this.fields = fields; + return this; + } + + @ApiModelProperty(example = "null", value = "") + @JsonProperty("fields") + public String getFields() { + return fields; + } + + public void setFields(String fields) { + this.fields = fields; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Error error = (Error) o; + return Objects.equals(this.code, error.code) + && Objects.equals(this.message, error.message) + && Objects.equals(this.fields, error.fields); + } + + @Override + public int hashCode() { + return Objects.hash(code, message, fields); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Error {\n"); + + sb.append(" code: ").append(toIndentedString(code)).append("\n"); + sb.append(" message: ").append(toIndentedString(message)).append("\n"); + sb.append(" fields: ").append(toIndentedString(fields)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/PlacementPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/PlacementPolicy.java new file mode 100644 index 0000000..6f6fe6f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/PlacementPolicy.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.io.Serializable; +import java.util.Objects; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Placement policy of an instance of an service. This feature is in the + * works in YARN-4902. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "Placement policy of an instance of an service. This feature is in the works in YARN-4902.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +public class PlacementPolicy implements Serializable { + private static final long serialVersionUID = 4341110649551172231L; + + private String label = null; + + /** + * Assigns a service to a named partition of the cluster where the service + * desires to run (optional). If not specified all services are submitted to + * a default label of the service owner. One or more labels can be setup for + * each service owner account with required constraints like no-preemption, + * sla-99999, preemption-ok, etc. + **/ + public PlacementPolicy label(String label) { + this.label = label; + return this; + } + + @ApiModelProperty(example = "null", value = "Assigns a service to a named partition of the cluster where the service desires to run (optional). If not specified all services are submitted to a default label of the service owner. One or more labels can be setup for each service owner account with required constraints like no-preemption, sla-99999, preemption-ok, etc.") + @JsonProperty("label") + public String getLabel() { + return label; + } + + public void setLabel(String label) { + this.label = label; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PlacementPolicy placementPolicy = (PlacementPolicy) o; + return Objects.equals(this.label, placementPolicy.label); + } + + @Override + public int hashCode() { + return Objects.hash(label); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class PlacementPolicy {\n"); + + sb.append(" label: ").append(toIndentedString(label)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java new file mode 100644 index 0000000..b25828f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java @@ -0,0 +1,183 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import javax.xml.bind.annotation.XmlEnum; +import javax.xml.bind.annotation.XmlType; + +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonValue; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * A custom command or a pluggable helper container to determine the readiness + * of a container of a component. Readiness for every service is different. + * Hence the need for a simple interface, with scope to support advanced + * usecases. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every service is different. Hence the need for a simple interface, with scope to support advanced usecases.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +public class ReadinessCheck implements Serializable { + private static final long serialVersionUID = -3836839816887186801L; + + /** + * Type. HTTP and PORT + **/ + @XmlType(name = "type") + @XmlEnum + public enum TypeEnum { + HTTP("HTTP"), + PORT("PORT"); + + private String value; + + TypeEnum(String value) { + this.value = value; + } + + @Override + @JsonValue + public String toString() { + return value; + } + } + + private TypeEnum type = null; + private Map props = new HashMap(); + private Artifact artifact = null; + + /** + * E.g. HTTP (YARN will perform a simple REST call at a regular interval and + * expect a 204 No content). + **/ + public ReadinessCheck type(TypeEnum type) { + this.type = type; + return this; + } + + @ApiModelProperty(example = "null", value = "E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).") + @JsonProperty("type") + public TypeEnum getType() { + return type; + } + + public void setType(TypeEnum type) { + this.type = type; + } + + public ReadinessCheck props(Map props) { + this.props = props; + return this; + } + + public ReadinessCheck putPropsItem(String key, String propsItem) { + this.props.put(key, propsItem); + return this; + } + + /** + * A blob of key value pairs that will be used to configure the check. + * @return props + **/ + @ApiModelProperty(example = "null", value = "A blob of key value pairs that will be used to configure the check.") + public Map getProps() { + return props; + } + + public void setProps(Map props) { + this.props = props; + } + + /** + * Artifact of the pluggable readiness check helper container (optional). If + * specified, this helper container typically hosts the http uri and + * encapsulates the complex scripts required to perform actual container + * readiness check. At the end it is expected to respond a 204 No content just + * like the simplified use case. This pluggable framework benefits service + * owners who can run services without any packaging modifications. Note, + * artifacts of type docker only is supported for now. + **/ + public ReadinessCheck artifact(Artifact artifact) { + this.artifact = artifact; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now.") + @JsonProperty("artifact") + public Artifact getArtifact() { + return artifact; + } + + public void setArtifact(Artifact artifact) { + this.artifact = artifact; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ReadinessCheck readinessCheck = (ReadinessCheck) o; + return Objects.equals(this.type, readinessCheck.type) && + Objects.equals(this.props, readinessCheck.props) && + Objects.equals(this.artifact, readinessCheck.artifact); + } + + @Override + public int hashCode() { + return Objects.hash(type, props, artifact); + } + + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class ReadinessCheck {\n"); + + sb.append(" type: ").append(toIndentedString(type)).append("\n"); + sb.append(" props: ").append(toIndentedString(props)).append("\n"); + sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java new file mode 100644 index 0000000..dfdf92a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Resource.java @@ -0,0 +1,161 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.util.Objects; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Resource determines the amount of resources (vcores, memory, network, etc.) + * usable by a container. This field determines the resource to be applied for + * all the containers of a component or service. The resource specified at + * the service (or global) level can be overriden at the component level. Only one + * of profile OR cpu & memory are expected. It raises a validation + * exception otherwise. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +public class Resource extends BaseResource implements Cloneable { + private static final long serialVersionUID = -6431667797380250037L; + + private String profile = null; + private Integer cpus = 1; + private String memory = null; + + /** + * Each resource profile has a unique id which is associated with a + * cluster-level predefined memory, cpus, etc. + **/ + public Resource profile(String profile) { + this.profile = profile; + return this; + } + + @ApiModelProperty(example = "null", value = "Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.") + @JsonProperty("profile") + public String getProfile() { + return profile; + } + + public void setProfile(String profile) { + this.profile = profile; + } + + /** + * Amount of vcores allocated to each container (optional but overrides cpus + * in profile if specified). + **/ + public Resource cpus(Integer cpus) { + this.cpus = cpus; + return this; + } + + @ApiModelProperty(example = "null", value = "Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).") + @JsonProperty("cpus") + public Integer getCpus() { + return cpus; + } + + public void setCpus(Integer cpus) { + this.cpus = cpus; + } + + /** + * Amount of memory allocated to each container (optional but overrides memory + * in profile if specified). Currently accepts only an integer value and + * default unit is in MB. + **/ + public Resource memory(String memory) { + this.memory = memory; + return this; + } + + @ApiModelProperty(example = "null", value = "Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.") + @JsonProperty("memory") + public String getMemory() { + return memory; + } + + public void setMemory(String memory) { + this.memory = memory; + } + + @JsonIgnore + public long getMemoryMB() { + if (this.memory == null) { + return 0; + } + return Long.parseLong(memory); + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Resource resource = (Resource) o; + return Objects.equals(this.profile, resource.profile) + && Objects.equals(this.cpus, resource.cpus) + && Objects.equals(this.memory, resource.memory); + } + + @Override + public int hashCode() { + return Objects.hash(profile, cpus, memory); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Resource {\n"); + + sb.append(" profile: ").append(toIndentedString(profile)).append("\n"); + sb.append(" cpus: ").append(toIndentedString(cpus)).append("\n"); + sb.append(" memory: ").append(toIndentedString(memory)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } + + @Override + public Object clone() throws CloneNotSupportedException { + return super.clone(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java new file mode 100644 index 0000000..77a2610 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java @@ -0,0 +1,390 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.annotation.JsonPropertyOrder; +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * An Service resource has the following attributes. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "An Service resource has the following attributes.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@XmlRootElement +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonPropertyOrder({ "name", "state", "resource", "number_of_containers", + "lifetime", "containers" }) +public class Service extends BaseResource { + private static final long serialVersionUID = -4491694636566094885L; + + private String name = null; + private String id = null; + private Artifact artifact = null; + private Resource resource = null; + private Date launchTime = null; + private Long numberOfRunningContainers = null; + private Long lifetime = null; + private PlacementPolicy placementPolicy = null; + private List components = new ArrayList<>(); + private Configuration configuration = new Configuration(); + private ServiceState state = null; + private Map quicklinks = new HashMap<>(); + private String queue = null; + + /** + * A unique service name. + **/ + public Service name(String name) { + this.name = name; + return this; + } + + @ApiModelProperty(example = "null", required = true, value = "A unique service name.") + @JsonProperty("name") + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + /** + * A unique service id. + **/ + public Service id(String id) { + this.id = id; + return this; + } + + @ApiModelProperty(example = "null", value = "A unique service id.") + @JsonProperty("id") + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + /** + * Artifact of single-component services. Mandatory if components + * attribute is not specified. + **/ + public Service artifact(Artifact artifact) { + this.artifact = artifact; + return this; + } + + @ApiModelProperty(example = "null", value = "Artifact of single-component services. Mandatory if components attribute is not specified.") + @JsonProperty("artifact") + public Artifact getArtifact() { + return artifact; + } + + public void setArtifact(Artifact artifact) { + this.artifact = artifact; + } + + /** + * Resource of single-component services or the global default for + * multi-component services. Mandatory if it is a single-component + * service and if cpus and memory are not specified at the Service + * level. + **/ + public Service resource(Resource resource) { + this.resource = resource; + return this; + } + + @ApiModelProperty(example = "null", value = "Resource of single-component services or the global default for multi-component services. Mandatory if it is a single-component service and if cpus and memory are not specified at the Service level.") + @JsonProperty("resource") + public Resource getResource() { + return resource; + } + + public void setResource(Resource resource) { + this.resource = resource; + } + + /** + * The time when the service was created, e.g. 2016-03-16T01:01:49.000Z. + **/ + public Service launchTime(Date launchTime) { + this.launchTime = launchTime == null ? null : (Date) launchTime.clone(); + return this; + } + + @ApiModelProperty(example = "null", value = "The time when the service was created, e.g. 2016-03-16T01:01:49.000Z.") + @JsonProperty("launch_time") + public Date getLaunchTime() { + return launchTime == null ? null : (Date) launchTime.clone(); + } + + @XmlElement(name = "launch_time") + public void setLaunchTime(Date launchTime) { + this.launchTime = launchTime == null ? null : (Date) launchTime.clone(); + } + + /** + * In get response this provides the total number of running containers for + * this service (across all components) at the time of request. Note, a + * subsequent request can return a different number as and when more + * containers get allocated until it reaches the total number of containers or + * if a flex request has been made between the two requests. + **/ + public Service numberOfRunningContainers(Long numberOfRunningContainers) { + this.numberOfRunningContainers = numberOfRunningContainers; + return this; + } + + @ApiModelProperty(example = "null", value = "In get response this provides the total number of running containers for this service (across all components) at the time of request. Note, a subsequent request can return a different number as and when more containers get allocated until it reaches the total number of containers or if a flex request has been made between the two requests.") + @JsonProperty("number_of_running_containers") + public Long getNumberOfRunningContainers() { + return numberOfRunningContainers; + } + + @XmlElement(name = "number_of_running_containers") + public void setNumberOfRunningContainers(Long numberOfRunningContainers) { + this.numberOfRunningContainers = numberOfRunningContainers; + } + + /** + * Life time (in seconds) of the service from the time it reaches the + * RUNNING_BUT_UNREADY state (after which it is automatically destroyed by YARN). For + * unlimited lifetime do not set a lifetime value. + **/ + public Service lifetime(Long lifetime) { + this.lifetime = lifetime; + return this; + } + + @ApiModelProperty(example = "null", value = "Life time (in seconds) of the service from the time it reaches the RUNNING_BUT_UNREADY state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.") + @JsonProperty("lifetime") + public Long getLifetime() { + return lifetime; + } + + public void setLifetime(Long lifetime) { + this.lifetime = lifetime; + } + + /** + * Advanced scheduling and placement policies (optional). If not specified, it + * defaults to the default placement policy of the service owner. The design of + * placement policies are in the works. It is not very clear at this point, + * how policies in conjunction with labels be exposed to service owners. + * This is a placeholder for now. The advanced structure of this attribute + * will be determined by YARN-4902. + **/ + public Service placementPolicy(PlacementPolicy placementPolicy) { + this.placementPolicy = placementPolicy; + return this; + } + + @ApiModelProperty(example = "null", value = "Advanced scheduling and placement policies (optional). If not specified, it defaults to the default placement policy of the service owner. The design of placement policies are in the works. It is not very clear at this point, how policies in conjunction with labels be exposed to service owners. This is a placeholder for now. The advanced structure of this attribute will be determined by YARN-4902.") + @JsonProperty("placement_policy") + public PlacementPolicy getPlacementPolicy() { + return placementPolicy; + } + + @XmlElement(name = "placement_policy") + public void setPlacementPolicy(PlacementPolicy placementPolicy) { + this.placementPolicy = placementPolicy; + } + + /** + * Components of an service. + **/ + public Service components(List components) { + this.components = components; + return this; + } + + @ApiModelProperty(example = "null", value = "Components of an service.") + @JsonProperty("components") + public List getComponents() { + return components; + } + + public void setComponents(List components) { + this.components = components; + } + + public void addComponent(Component component) { + components.add(component); + } + + public Component getComponent(String name) { + for (Component component : components) { + if (component.getName().equals(name)) { + return component; + } + } + return null; + } + + /** + * Config properties of an service. Configurations provided at the + * service/global level are available to all the components. Specific + * properties can be overridden at the component level. + **/ + public Service configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } + + @ApiModelProperty(example = "null", value = "Config properties of an service. Configurations provided at the service/global level are available to all the components. Specific properties can be overridden at the component level.") + @JsonProperty("configuration") + public Configuration getConfiguration() { + return configuration; + } + + public void setConfiguration(Configuration configuration) { + this.configuration = configuration; + } + + /** + * State of the service. Specifying a value for this attribute for the + * POST payload raises a validation error. This attribute is available only in + * the GET response of a started service. + **/ + public Service state(ServiceState state) { + this.state = state; + return this; + } + + @ApiModelProperty(example = "null", value = "State of the service. Specifying a value for this attribute for the POST payload raises a validation error. This attribute is available only in the GET response of a started service.") + @JsonProperty("state") + public ServiceState getState() { + return state; + } + + public void setState(ServiceState state) { + this.state = state; + } + + /** + * A blob of key-value pairs of quicklinks to be exported for an service. + **/ + public Service quicklinks(Map quicklinks) { + this.quicklinks = quicklinks; + return this; + } + + @ApiModelProperty(example = "null", value = "A blob of key-value pairs of quicklinks to be exported for an service.") + @JsonProperty("quicklinks") + public Map getQuicklinks() { + return quicklinks; + } + + public void setQuicklinks(Map quicklinks) { + this.quicklinks = quicklinks; + } + + /** + * The YARN queue that this service should be submitted to. + **/ + public Service queue(String queue) { + this.queue = queue; + return this; + } + + @ApiModelProperty(example = "null", value = "The YARN queue that this service should be submitted to.") + @JsonProperty("queue") + public String getQueue() { + return queue; + } + + public void setQueue(String queue) { + this.queue = queue; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Service service = (Service) o; + return Objects.equals(this.name, service.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class Service {\n"); + + sb.append(" name: ").append(toIndentedString(name)).append("\n"); + sb.append(" id: ").append(toIndentedString(id)).append("\n"); + sb.append(" artifact: ").append(toIndentedString(artifact)).append("\n"); + sb.append(" resource: ").append(toIndentedString(resource)).append("\n"); + sb.append(" launchTime: ").append(toIndentedString(launchTime)) + .append("\n"); + sb.append(" numberOfRunningContainers: ") + .append(toIndentedString(numberOfRunningContainers)).append("\n"); + sb.append(" lifetime: ").append(toIndentedString(lifetime)).append("\n"); + sb.append(" placementPolicy: ").append(toIndentedString(placementPolicy)) + .append("\n"); + sb.append(" components: ").append(toIndentedString(components)) + .append("\n"); + sb.append(" configuration: ").append(toIndentedString(configuration)) + .append("\n"); + sb.append(" state: ").append(toIndentedString(state)).append("\n"); + sb.append(" quicklinks: ").append(toIndentedString(quicklinks)) + .append("\n"); + sb.append(" queue: ").append(toIndentedString(queue)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceState.java new file mode 100644 index 0000000..d2f5d06 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceState.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * The current state of an service. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "The current state of an service.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +public enum ServiceState { + ACCEPTED, STARTED, STABLE, STOPPED, FAILED; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceStatus.java new file mode 100644 index 0000000..2cee23c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ServiceStatus.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.api.records; + +import io.swagger.annotations.ApiModel; +import io.swagger.annotations.ApiModelProperty; + +import java.util.Objects; + +import javax.xml.bind.annotation.XmlRootElement; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * The current status of a submitted service, returned as a response to the + * GET API. + **/ +@InterfaceAudience.Public +@InterfaceStability.Unstable +@ApiModel(description = "The current status of a submitted service, returned as a response to the GET API.") +@javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") +@XmlRootElement +@JsonInclude(JsonInclude.Include.NON_NULL) +public class ServiceStatus extends BaseResource { + private static final long serialVersionUID = -3469885905347851034L; + + private String diagnostics = null; + private ServiceState state = null; + private Integer code = null; + + /** + * Diagnostic information (if any) for the reason of the current state of the + * service. It typically has a non-null value, if the service is in a + * non-running state. + **/ + public ServiceStatus diagnostics(String diagnostics) { + this.diagnostics = diagnostics; + return this; + } + + @ApiModelProperty(example = "null", value = "Diagnostic information (if any) for the reason of the current state of the service. It typically has a non-null value, if the service is in a non-running state.") + @JsonProperty("diagnostics") + public String getDiagnostics() { + return diagnostics; + } + + public void setDiagnostics(String diagnostics) { + this.diagnostics = diagnostics; + } + + /** + * Service state. + **/ + public ServiceStatus state(ServiceState state) { + this.state = state; + return this; + } + + @ApiModelProperty(example = "null", value = "Service state.") + @JsonProperty("state") + public ServiceState getState() { + return state; + } + + public void setState(ServiceState state) { + this.state = state; + } + + /** + * An error code specific to a scenario which service owners should be able to use + * to understand the failure in addition to the diagnostic information. + **/ + public ServiceStatus code(Integer code) { + this.code = code; + return this; + } + + @ApiModelProperty(example = "null", value = "An error code specific to a scenario which service owners should be able to use to understand the failure in addition to the diagnostic information.") + @JsonProperty("code") + public Integer getCode() { + return code; + } + + public void setCode(Integer code) { + this.code = code; + } + + @Override + public boolean equals(java.lang.Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ServiceStatus serviceStatus = (ServiceStatus) o; + return Objects.equals(this.diagnostics, serviceStatus.diagnostics) + && Objects.equals(this.state, serviceStatus.state) + && Objects.equals(this.code, serviceStatus.code); + } + + @Override + public int hashCode() { + return Objects.hash(diagnostics, state, code); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class ServiceStatus {\n"); + + sb.append(" diagnostics: ").append(toIndentedString(diagnostics)) + .append("\n"); + sb.append(" state: ").append(toIndentedString(state)).append("\n"); + sb.append(" code: ").append(toIndentedString(code)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(java.lang.Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java new file mode 100644 index 0000000..e17c0c4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.client.ServerProxy; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; + +import java.net.InetSocketAddress; + +import static org.apache.hadoop.io.retry.RetryPolicies.TRY_ONCE_THEN_FAIL; + +public class ClientAMProxy extends ServerProxy{ + + public static T createProxy(final Configuration conf, + final Class protocol, final UserGroupInformation ugi, + final YarnRPC rpc, final InetSocketAddress serverAddress) { + Configuration confClone = new Configuration(conf); + confClone.setInt( + CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); + confClone.setInt(CommonConfigurationKeysPublic. + IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, 0); + RetryPolicy retryPolicy; + + if (conf.getLong(YarnServiceConf.CLIENT_AM_RETRY_MAX_WAIT_MS, 0) == 0) { + // by default no retry + retryPolicy = TRY_ONCE_THEN_FAIL; + } else { + retryPolicy = + createRetryPolicy(conf, YarnServiceConf.CLIENT_AM_RETRY_MAX_WAIT_MS, + 15 * 60 * 1000, YarnServiceConf.CLIENT_AM_RETRY_MAX_INTERVAL_MS, + 2 * 1000); + } + return createRetriableProxy(confClone, protocol, ugi, rpc, serverAddress, + retryPolicy); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java new file mode 100644 index 0000000..636b127 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -0,0 +1,943 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.client; + +import org.apache.commons.lang.StringUtils; +import org.apache.curator.framework.CuratorFramework; +import org.apache.curator.framework.CuratorFrameworkFactory; +import org.apache.curator.retry.RetryNTimes; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.registry.client.api.RegistryOperationsFactory; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ApplicationTimeout; +import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.client.api.AppAdminClient; +import org.apache.hadoop.yarn.client.api.YarnClient; +import org.apache.hadoop.yarn.client.api.YarnClientApplication; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.ComponentCountProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto; +import org.apache.hadoop.yarn.service.ClientAMProtocol; +import org.apache.hadoop.yarn.service.ServiceMaster; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ServiceState; +import org.apache.hadoop.yarn.service.conf.SliderExitCodes; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.containerlaunch.ClasspathConstructor; +import org.apache.hadoop.yarn.service.containerlaunch.JavaCommandLineBuilder; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.provider.ProviderUtils; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.apache.hadoop.yarn.service.utils.ZookeeperUtils; +import org.apache.hadoop.yarn.util.Records; +import org.apache.hadoop.yarn.util.Times; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.text.MessageFormat; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import static org.apache.hadoop.yarn.api.records.YarnApplicationState.*; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_QUEUE; +import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.jsonSerDeser; +import static org.apache.hadoop.yarn.service.utils.SliderUtils.*; + +@InterfaceAudience.Public +@InterfaceStability.Unstable +public class ServiceClient extends AppAdminClient implements SliderExitCodes, + YarnServiceConstants { + private static final Logger LOG = + LoggerFactory.getLogger(ServiceClient.class); + private SliderFileSystem fs; + //TODO disable retry so that client / rest API doesn't block? + protected YarnClient yarnClient; + // Avoid looking up applicationId from fs all the time. + private Map cachedAppIds = new ConcurrentHashMap<>(); + + private RegistryOperations registryClient; + private CuratorFramework curatorClient; + private YarnRPC rpc; + + private static EnumSet terminatedStates = + EnumSet.of(FINISHED, FAILED, KILLED); + private static EnumSet liveStates = + EnumSet.of(NEW, NEW_SAVING, SUBMITTED, ACCEPTED, RUNNING); + private static EnumSet preRunningStates = + EnumSet.of(NEW, NEW_SAVING, SUBMITTED, ACCEPTED); + + @Override protected void serviceInit(Configuration configuration) + throws Exception { + fs = new SliderFileSystem(configuration); + yarnClient = YarnClient.createYarnClient(); + rpc = YarnRPC.create(configuration); + addService(yarnClient); + super.serviceInit(configuration); + } + + @Override + protected void serviceStop() throws Exception { + if (registryClient != null) { + registryClient.stop(); + } + super.serviceStop(); + } + + public Service loadAppJsonFromLocalFS(String fileName, String serviceName, + Long lifetime, String queue) throws IOException, YarnException { + File file = new File(fileName); + if (!file.exists() && fileName.equals(file.getName())) { + String examplesDirStr = System.getenv("YARN_SERVICE_EXAMPLES_DIR"); + String[] examplesDirs; + if (examplesDirStr == null) { + String yarnHome = System + .getenv(ApplicationConstants.Environment.HADOOP_YARN_HOME.key()); + examplesDirs = new String[]{ + yarnHome + "/share/hadoop/yarn/yarn-service-examples", + yarnHome + "/yarn-service-examples" + }; + } else { + examplesDirs = StringUtils.split(examplesDirStr, ":"); + } + for (String dir : examplesDirs) { + file = new File(MessageFormat.format("{0}/{1}/{2}.json", + dir, fileName, fileName)); + if (file.exists()) { + break; + } + // Then look for secondary location. + file = new File(MessageFormat.format("{0}/{1}.json", + dir, fileName)); + if (file.exists()) { + break; + } + } + } + if (!file.exists()) { + throw new YarnException("File or example could not be found: " + + fileName); + } + Path filePath = new Path(file.getAbsolutePath()); + LOG.info("Loading service definition from: " + filePath); + Service service = jsonSerDeser + .load(FileSystem.getLocal(getConfig()), filePath); + if (!StringUtils.isEmpty(serviceName)) { + service.setName(serviceName); + } + if (lifetime != null && lifetime > 0) { + service.setLifetime(lifetime); + } + if (!StringUtils.isEmpty(queue)) { + service.setQueue(queue); + } + return service; + } + + public int actionSave(String fileName, String serviceName, Long lifetime, + String queue) throws IOException, YarnException { + return actionBuild(loadAppJsonFromLocalFS(fileName, serviceName, + lifetime, queue)); + } + + public int actionBuild(Service service) + throws YarnException, IOException { + Path appDir = checkAppNotExistOnHdfs(service); + ServiceApiUtil.validateAndResolveService(service, fs, getConfig()); + createDirAndPersistApp(appDir, service); + return EXIT_SUCCESS; + } + + public int actionLaunch(String fileName, String serviceName, Long lifetime, + String queue) throws IOException, YarnException { + actionCreate(loadAppJsonFromLocalFS(fileName, serviceName, lifetime, + queue)); + return EXIT_SUCCESS; + } + + public ApplicationId actionCreate(Service service) + throws IOException, YarnException { + String serviceName = service.getName(); + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + ServiceApiUtil.validateAndResolveService(service, fs, getConfig()); + verifyNoLiveAppInRM(serviceName, "create"); + Path appDir = checkAppNotExistOnHdfs(service); + + // Write the definition first and then submit - AM will read the definition + createDirAndPersistApp(appDir, service); + ApplicationId appId = submitApp(service); + cachedAppIds.put(serviceName, appId); + service.setId(appId.toString()); + // update app definition with appId + persistAppDef(appDir, service); + return appId; + } + + public int actionFlex(String serviceName, Map + componentCountStrings) throws YarnException, IOException { + Map componentCounts = + new HashMap<>(componentCountStrings.size()); + Service persistedService = + ServiceApiUtil.loadService(fs, serviceName); + if (!StringUtils.isEmpty(persistedService.getId())) { + cachedAppIds.put(persistedService.getName(), + ApplicationId.fromString(persistedService.getId())); + } else { + throw new YarnException(persistedService.getName() + + " appId is null, may be not submitted to YARN yet"); + } + + for (Map.Entry entry : componentCountStrings.entrySet()) { + String compName = entry.getKey(); + ServiceApiUtil.validateNameFormat(compName, getConfig()); + Component component = persistedService.getComponent(compName); + if (component == null) { + throw new IllegalArgumentException(entry.getKey() + " does not exist !"); + } + long numberOfContainers = + parseNumberOfContainers(component, entry.getValue()); + componentCounts.put(compName, numberOfContainers); + } + flexComponents(serviceName, componentCounts, persistedService); + return EXIT_SUCCESS; + } + + // Parse the number of containers requested by user, e.g. + // +5 means add 5 additional containers + // -5 means reduce 5 containers, if it goes to negative, sets it to 0 + // 5 means sets it to 5 containers. + private long parseNumberOfContainers(Component component, String newNumber) { + + long orig = component.getNumberOfContainers(); + if (newNumber.startsWith("+")) { + return orig + Long.parseLong(newNumber.substring(1)); + } else if (newNumber.startsWith("-")) { + long ret = orig - Long.parseLong(newNumber.substring(1)); + if (ret < 0) { + LOG.warn(MessageFormat.format( + "[COMPONENT {}]: component count goes to negative ({}{} = {}), reset it to 0.", + component.getName(), orig, newNumber, ret)); + ret = 0; + } + return ret; + } else { + return Long.parseLong(newNumber); + } + } + + // Called by Rest Service + public Map flexByRestService(String serviceName, + Map componentCounts) throws YarnException, IOException { + // load app definition + Service persistedService = ServiceApiUtil.loadService(fs, serviceName); + if (StringUtils.isEmpty(persistedService.getId())) { + throw new YarnException( + serviceName + " appId is null, may be not submitted to YARN yet"); + } + cachedAppIds.put(persistedService.getName(), + ApplicationId.fromString(persistedService.getId())); + return flexComponents(serviceName, componentCounts, persistedService); + } + + private Map flexComponents(String serviceName, + Map componentCounts, Service persistedService) + throws YarnException, IOException { + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + + Map original = new HashMap<>(componentCounts.size()); + + ComponentCountProto.Builder countBuilder = ComponentCountProto.newBuilder(); + FlexComponentsRequestProto.Builder requestBuilder = + FlexComponentsRequestProto.newBuilder(); + + for (Component persistedComp : persistedService.getComponents()) { + String name = persistedComp.getName(); + if (componentCounts.containsKey(persistedComp.getName())) { + original.put(name, persistedComp.getNumberOfContainers()); + persistedComp.setNumberOfContainers(componentCounts.get(name)); + + // build the request + countBuilder.setName(persistedComp.getName()) + .setNumberOfContainers(persistedComp.getNumberOfContainers()); + requestBuilder.addComponents(countBuilder.build()); + } + } + if (original.size() < componentCounts.size()) { + componentCounts.keySet().removeAll(original.keySet()); + throw new YarnException("Components " + componentCounts.keySet() + + " do not exist in app definition."); + } + jsonSerDeser + .save(fs.getFileSystem(), ServiceApiUtil.getServiceJsonPath(fs, serviceName), + persistedService, true); + + ApplicationReport appReport = + yarnClient.getApplicationReport(getAppId(serviceName)); + if (appReport.getYarnApplicationState() != RUNNING) { + String message = + serviceName + " is at " + appReport.getYarnApplicationState() + + " state, flex can only be invoked when service is running"; + LOG.error(message); + throw new YarnException(message); + } + if (StringUtils.isEmpty(appReport.getHost())) { + throw new YarnException(serviceName + " AM hostname is empty"); + } + ClientAMProtocol proxy = + createAMProxy(appReport.getHost(), appReport.getRpcPort()); + proxy.flexComponents(requestBuilder.build()); + for (Map.Entry entry : original.entrySet()) { + LOG.info("[COMPONENT {}]: number of containers changed from {} to {}", + entry.getKey(), entry.getValue(), + componentCounts.get(entry.getKey())); + } + return original; + } + + public int actionStop(String serviceName) + throws YarnException, IOException { + return actionStop(serviceName, true); + } + + public int actionStop(String serviceName, boolean waitForAppStopped) + throws YarnException, IOException { + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + ApplicationId currentAppId = getAppId(serviceName); + ApplicationReport report = yarnClient.getApplicationReport(currentAppId); + if (terminatedStates.contains(report.getYarnApplicationState())) { + LOG.info("Service {} is already in a terminated state {}", serviceName, + report.getYarnApplicationState()); + return EXIT_SUCCESS; + } + if (preRunningStates.contains(report.getYarnApplicationState())) { + String msg = serviceName + " is at " + report.getYarnApplicationState() + + ", forcefully killed by user!"; + yarnClient.killApplication(currentAppId, msg); + LOG.info(msg); + return EXIT_SUCCESS; + } + if (StringUtils.isEmpty(report.getHost())) { + throw new YarnException(serviceName + " AM hostname is empty"); + } + LOG.info("Stopping service {}, with appId = {}", serviceName, currentAppId); + try { + ClientAMProtocol proxy = + createAMProxy(report.getHost(), report.getRpcPort()); + cachedAppIds.remove(serviceName); + if (proxy != null) { + // try to stop the app gracefully. + StopRequestProto request = StopRequestProto.newBuilder().build(); + proxy.stop(request); + LOG.info("Service " + serviceName + " is being gracefully stopped..."); + } else { + yarnClient.killApplication(currentAppId, + serviceName + " is forcefully killed by user!"); + LOG.info("Forcefully kill the service: " + serviceName); + return EXIT_SUCCESS; + } + + if (!waitForAppStopped) { + return EXIT_SUCCESS; + } + // Wait until the app is killed. + long startTime = System.currentTimeMillis(); + int pollCount = 0; + while (true) { + Thread.sleep(2000); + report = yarnClient.getApplicationReport(currentAppId); + if (terminatedStates.contains(report.getYarnApplicationState())) { + LOG.info("Service " + serviceName + " is stopped."); + break; + } + // Forcefully kill after 10 seconds. + if ((System.currentTimeMillis() - startTime) > 10000) { + LOG.info("Stop operation timeout stopping, forcefully kill the app " + + serviceName); + yarnClient.killApplication(currentAppId, + "Forcefully kill the app by user"); + break; + } + if (++pollCount % 10 == 0) { + LOG.info("Waiting for service " + serviceName + " to be stopped."); + } + } + } catch (IOException | YarnException | InterruptedException e) { + LOG.info("Failed to stop " + serviceName + + " gracefully, forcefully kill the app."); + yarnClient.killApplication(currentAppId, "Forcefully kill the app"); + } + return EXIT_SUCCESS; + } + + public int actionDestroy(String serviceName) throws YarnException, + IOException { + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + verifyNoLiveAppInRM(serviceName, "destroy"); + + Path appDir = fs.buildClusterDirPath(serviceName); + FileSystem fileSystem = fs.getFileSystem(); + // remove from the appId cache + cachedAppIds.remove(serviceName); + if (fileSystem.exists(appDir)) { + if (fileSystem.delete(appDir, true)) { + LOG.info("Successfully deleted service dir for " + serviceName + ": " + + appDir); + } else { + String message = + "Failed to delete service + " + serviceName + " at: " + appDir; + LOG.info(message); + throw new YarnException(message); + } + } + try { + deleteZKNode(serviceName); + } catch (Exception e) { + throw new IOException("Could not delete zk node for " + serviceName, e); + } + String registryPath = ServiceRegistryUtils.registryPathForInstance(serviceName); + try { + getRegistryClient().delete(registryPath, true); + } catch (IOException e) { + LOG.warn("Error deleting registry entry {}", registryPath, e); + } + LOG.info("Destroyed cluster {}", serviceName); + return EXIT_SUCCESS; + } + + private synchronized RegistryOperations getRegistryClient() + throws SliderException, IOException { + + if (registryClient == null) { + registryClient = + RegistryOperationsFactory.createInstance("ServiceClient", getConfig()); + registryClient.init(getConfig()); + registryClient.start(); + } + return registryClient; + } + + private void deleteZKNode(String clusterName) throws Exception { + CuratorFramework curatorFramework = getCuratorClient(); + String user = RegistryUtils.currentUser(); + String zkPath = ServiceRegistryUtils.mkClusterPath(user, clusterName); + if (curatorFramework.checkExists().forPath(zkPath) != null) { + curatorFramework.delete().deletingChildrenIfNeeded().forPath(zkPath); + LOG.info("Deleted zookeeper path: " + zkPath); + } + } + + private synchronized CuratorFramework getCuratorClient() + throws BadConfigException { + String registryQuorum = + getConfig().get(RegistryConstants.KEY_REGISTRY_ZK_QUORUM); + + // though if neither is set: trouble + if (SliderUtils.isUnset(registryQuorum)) { + throw new BadConfigException( + "No Zookeeper quorum provided in the" + " configuration property " + + RegistryConstants.KEY_REGISTRY_ZK_QUORUM); + } + ZookeeperUtils.splitToHostsAndPortsStrictly(registryQuorum); + + if (curatorClient == null) { + curatorClient = + CuratorFrameworkFactory.builder().connectString(registryQuorum) + .sessionTimeoutMs(10000).retryPolicy(new RetryNTimes(5, 2000)) + .build(); + curatorClient.start(); + } + return curatorClient; + } + + private void verifyNoLiveAppInRM(String serviceName, String action) + throws IOException, YarnException { + Set types = new HashSet<>(1); + types.add(YarnServiceConstants.APP_TYPE); + Set tags = null; + if (serviceName != null) { + tags = Collections.singleton(SliderUtils.createNameTag(serviceName)); + } + GetApplicationsRequest request = GetApplicationsRequest.newInstance(); + request.setApplicationTypes(types); + request.setApplicationTags(tags); + request.setApplicationStates(liveStates); + List reports = yarnClient.getApplications(request); + if (!reports.isEmpty()) { + String message = ""; + if (action.equals("destroy")) { + message = "Failed to destroy service " + serviceName + + ", because it is still running."; + } else { + message = "Failed to " + action + " service " + serviceName + + ", because it already exists."; + } + throw new YarnException(message); + } + } + + private ApplicationId submitApp(Service app) + throws IOException, YarnException { + String serviceName = app.getName(); + Configuration conf = getConfig(); + Path appRootDir = fs.buildClusterDirPath(app.getName()); + + YarnClientApplication yarnApp = yarnClient.createApplication(); + ApplicationSubmissionContext submissionContext = + yarnApp.getApplicationSubmissionContext(); + ServiceApiUtil.validateCompResourceSize( + yarnApp.getNewApplicationResponse().getMaximumResourceCapability(), + app); + + submissionContext.setKeepContainersAcrossApplicationAttempts(true); + if (app.getLifetime() > 0) { + Map appTimeout = new HashMap<>(); + appTimeout.put(ApplicationTimeoutType.LIFETIME, app.getLifetime()); + submissionContext.setApplicationTimeouts(appTimeout); + } + submissionContext.setMaxAppAttempts(conf.getInt( + YarnServiceConf.AM_RESTART_MAX, 2)); + + Map localResources = new HashMap<>(); + + // copy local slideram-log4j.properties to hdfs and add to localResources + boolean hasAMLog4j = + addAMLog4jResource(serviceName, conf, localResources); + // copy jars to hdfs and add to localResources + addJarResource(serviceName, localResources); + // add keytab if in secure env + addKeytabResourceIfSecure(fs, localResources, conf, serviceName); + if (LOG.isDebugEnabled()) { + printLocalResources(localResources); + } + Map env = addAMEnv(conf); + + // create AM CLI + String cmdStr = + buildCommandLine(serviceName, conf, appRootDir, hasAMLog4j); + submissionContext.setResource(Resource.newInstance(YarnServiceConf + .getLong(YarnServiceConf.AM_RESOURCE_MEM, YarnServiceConf.DEFAULT_KEY_AM_RESOURCE_MEM, + app.getConfiguration(), conf), 1)); + String queue = app.getQueue(); + if (StringUtils.isEmpty(queue)) { + queue = conf.get(YARN_QUEUE, "default"); + } + submissionContext.setQueue(queue); + submissionContext.setApplicationName(serviceName); + submissionContext.setApplicationType(YarnServiceConstants.APP_TYPE); + Set appTags = + AbstractClientProvider.createApplicationTags(serviceName, null, null); + if (!appTags.isEmpty()) { + submissionContext.setApplicationTags(appTags); + } + ContainerLaunchContext amLaunchContext = + Records.newRecord(ContainerLaunchContext.class); + amLaunchContext.setCommands(Collections.singletonList(cmdStr)); + amLaunchContext.setEnvironment(env); + amLaunchContext.setLocalResources(localResources); + submissionContext.setAMContainerSpec(amLaunchContext); + yarnClient.submitApplication(submissionContext); + return submissionContext.getApplicationId(); + } + + private void printLocalResources(Map map) { + LOG.debug("Added LocalResource for localization: "); + StringBuilder builder = new StringBuilder(); + for (Map.Entry entry : map.entrySet()) { + builder.append(entry.getKey()).append(" -> ") + .append(entry.getValue().getResource().getFile()) + .append(System.lineSeparator()); + } + LOG.debug(builder.toString()); + } + + private String buildCommandLine(String serviceName, Configuration conf, + Path appRootDir, boolean hasSliderAMLog4j) throws BadConfigException { + JavaCommandLineBuilder CLI = new JavaCommandLineBuilder(); + CLI.forceIPv4().headless(); + //TODO CLI.setJVMHeap + //TODO CLI.addJVMOPTS + if (hasSliderAMLog4j) { + CLI.sysprop(SYSPROP_LOG4J_CONFIGURATION, YARN_SERVICE_LOG4J_FILENAME); + CLI.sysprop(SYSPROP_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR); + } + CLI.add(ServiceMaster.class.getCanonicalName()); + //TODO debugAM CLI.add(Arguments.ARG_DEBUG) + CLI.add("-" + ServiceMaster.YARNFILE_OPTION, new Path(appRootDir, + serviceName + ".json")); + // pass the registry binding + CLI.addConfOptionToCLI(conf, RegistryConstants.KEY_REGISTRY_ZK_ROOT, + RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT); + CLI.addMandatoryConfOption(conf, RegistryConstants.KEY_REGISTRY_ZK_QUORUM); + + // write out the path output + CLI.addOutAndErrFiles(STDOUT_AM, STDERR_AM); + String cmdStr = CLI.build(); + LOG.info("AM launch command: {}", cmdStr); + return cmdStr; + } + + private Map addAMEnv(Configuration conf) throws IOException { + Map env = new HashMap<>(); + ClasspathConstructor classpath = + buildClasspath(YarnServiceConstants.SUBMITTED_CONF_DIR, "lib", fs, getConfig() + .getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)); + env.put("CLASSPATH", classpath.buildClasspath()); + env.put("LANG", "en_US.UTF-8"); + env.put("LC_ALL", "en_US.UTF-8"); + env.put("LANGUAGE", "en_US.UTF-8"); + String jaas = System.getenv("HADOOP_JAAS_DEBUG"); + if (jaas != null) { + env.put("HADOOP_JAAS_DEBUG", jaas); + } + if (!UserGroupInformation.isSecurityEnabled()) { + String userName = UserGroupInformation.getCurrentUser().getUserName(); + LOG.info("Run as user " + userName); + // HADOOP_USER_NAME env is used by UserGroupInformation when log in + // This env makes AM run as this user + env.put("HADOOP_USER_NAME", userName); + } + LOG.info("AM env: \n{}", stringifyMap(env)); + return env; + } + + protected Path addJarResource(String serviceName, + Map localResources) + throws IOException, SliderException { + Path libPath = fs.buildClusterDirPath(serviceName); + ProviderUtils + .addProviderJar(localResources, ServiceMaster.class, SERVICE_CORE_JAR, fs, + libPath, "lib", false); + Path dependencyLibTarGzip = fs.getDependencyTarGzip(); + if (fs.isFile(dependencyLibTarGzip)) { + LOG.info("Loading lib tar from " + fs.getFileSystem().getScheme() + ":/" + + dependencyLibTarGzip); + SliderUtils.putAmTarGzipAndUpdate(localResources, fs); + } else { + String[] libs = SliderUtils.getLibDirs(); + for (String libDirProp : libs) { + ProviderUtils.addAllDependencyJars(localResources, fs, libPath, "lib", + libDirProp); + } + } + return libPath; + } + + private boolean addAMLog4jResource(String serviceName, Configuration conf, + Map localResources) + throws IOException, BadClusterStateException { + boolean hasAMLog4j = false; + String hadoopConfDir = + System.getenv(ApplicationConstants.Environment.HADOOP_CONF_DIR.name()); + if (hadoopConfDir != null) { + File localFile = + new File(hadoopConfDir, YarnServiceConstants.YARN_SERVICE_LOG4J_FILENAME); + if (localFile.exists()) { + Path localFilePath = createLocalPath(localFile); + Path appDirPath = fs.buildClusterDirPath(serviceName); + Path remoteConfPath = + new Path(appDirPath, YarnServiceConstants.SUBMITTED_CONF_DIR); + Path remoteFilePath = + new Path(remoteConfPath, YarnServiceConstants.YARN_SERVICE_LOG4J_FILENAME); + copy(conf, localFilePath, remoteFilePath); + LocalResource localResource = + fs.createAmResource(remoteConfPath, LocalResourceType.FILE); + localResources.put(localFilePath.getName(), localResource); + hasAMLog4j = true; + } else { + LOG.warn("AM log4j property file doesn't exist: " + localFile); + } + } + return hasAMLog4j; + } + + public int actionStart(String serviceName) throws YarnException, IOException { + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + Path appDir = checkAppExistOnHdfs(serviceName); + Service service = ServiceApiUtil.loadService(fs, serviceName); + ServiceApiUtil.validateAndResolveService(service, fs, getConfig()); + // see if it is actually running and bail out; + verifyNoLiveAppInRM(serviceName, "thaw"); + ApplicationId appId = submitApp(service); + service.setId(appId.toString()); + // write app definition on to hdfs + createDirAndPersistApp(appDir, service); + return 0; + } + + private Path checkAppNotExistOnHdfs(Service service) + throws IOException, SliderException { + Path appDir = fs.buildClusterDirPath(service.getName()); + fs.verifyDirectoryNonexistent( + new Path(appDir, service.getName() + ".json")); + return appDir; + } + + private Path checkAppExistOnHdfs(String serviceName) + throws IOException, SliderException { + Path appDir = fs.buildClusterDirPath(serviceName); + fs.verifyPathExists(new Path(appDir, serviceName + ".json")); + return appDir; + } + + private void createDirAndPersistApp(Path appDir, Service service) + throws IOException, SliderException { + FsPermission appDirPermission = new FsPermission("750"); + fs.createWithPermissions(appDir, appDirPermission); + persistAppDef(appDir, service); + } + + private void persistAppDef(Path appDir, Service service) + throws IOException { + Path appJson = new Path(appDir, service.getName() + ".json"); + jsonSerDeser + .save(fs.getFileSystem(), appJson, service, true); + LOG.info( + "Persisted service " + service.getName() + " at " + appJson); + } + + private void addKeytabResourceIfSecure(SliderFileSystem fileSystem, + Map localResource, Configuration conf, + String serviceName) throws IOException, BadConfigException { + if (!UserGroupInformation.isSecurityEnabled()) { + return; + } + String keytabPreInstalledOnHost = + conf.get(YarnServiceConf.KEY_AM_KEYTAB_LOCAL_PATH); + if (StringUtils.isEmpty(keytabPreInstalledOnHost)) { + String amKeytabName = + conf.get(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_NAME); + String keytabDir = conf.get(YarnServiceConf.KEY_HDFS_KEYTAB_DIR); + Path keytabPath = + fileSystem.buildKeytabPath(keytabDir, amKeytabName, serviceName); + if (fileSystem.getFileSystem().exists(keytabPath)) { + LocalResource keytabRes = + fileSystem.createAmResource(keytabPath, LocalResourceType.FILE); + localResource + .put(YarnServiceConstants.KEYTAB_DIR + "/" + amKeytabName, keytabRes); + LOG.info("Adding AM keytab on hdfs: " + keytabPath); + } else { + LOG.warn("No keytab file was found at {}.", keytabPath); + if (conf.getBoolean(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_REQUIRED, false)) { + throw new BadConfigException("No keytab file was found at %s.", + keytabPath); + } else { + LOG.warn("The AM will be " + + "started without a kerberos authenticated identity. " + + "The service is therefore not guaranteed to remain " + + "operational beyond 24 hours."); + } + } + } + } + + public String updateLifetime(String serviceName, long lifetime) + throws YarnException, IOException { + ApplicationId currentAppId = getAppId(serviceName); + ApplicationReport report = yarnClient.getApplicationReport(currentAppId); + if (report == null) { + throw new YarnException("Service not found for " + serviceName); + } + ApplicationId appId = report.getApplicationId(); + LOG.info("Updating lifetime of an service: serviceName = " + serviceName + + ", appId = " + appId + ", lifetime = " + lifetime); + Map map = new HashMap<>(); + String newTimeout = + Times.formatISO8601(System.currentTimeMillis() + lifetime * 1000); + map.put(ApplicationTimeoutType.LIFETIME, newTimeout); + UpdateApplicationTimeoutsRequest request = + UpdateApplicationTimeoutsRequest.newInstance(appId, map); + yarnClient.updateApplicationTimeouts(request); + LOG.info( + "Successfully updated lifetime for an service: serviceName = " + serviceName + + ", appId = " + appId + ". New expiry time in ISO8601 format is " + + newTimeout); + return newTimeout; + } + + public ServiceState convertState(FinalApplicationStatus status) { + switch (status) { + case UNDEFINED: + return ServiceState.ACCEPTED; + case FAILED: + case KILLED: + return ServiceState.FAILED; + case ENDED: + case SUCCEEDED: + return ServiceState.STOPPED; + } + return ServiceState.ACCEPTED; + } + + public String getStatusString(String appId) + throws IOException, YarnException { + ApplicationReport appReport = + yarnClient.getApplicationReport(ApplicationId.fromString(appId)); + + if (appReport.getYarnApplicationState() != RUNNING) { + return ""; + } + if (StringUtils.isEmpty(appReport.getHost())) { + return ""; + } + ClientAMProtocol amProxy = + createAMProxy(appReport.getHost(), appReport.getRpcPort()); + GetStatusResponseProto response = + amProxy.getStatus(GetStatusRequestProto.newBuilder().build()); + return response.getStatus(); + } + + public Service getStatus(String serviceName) + throws IOException, YarnException { + ServiceApiUtil.validateNameFormat(serviceName, getConfig()); + ApplicationId currentAppId = getAppId(serviceName); + ApplicationReport appReport = yarnClient.getApplicationReport(currentAppId); + Service appSpec = new Service(); + appSpec.setName(serviceName); + appSpec.setState(convertState(appReport.getFinalApplicationStatus())); + ApplicationTimeout lifetime = + appReport.getApplicationTimeouts().get(ApplicationTimeoutType.LIFETIME); + if (lifetime != null) { + appSpec.setLifetime(lifetime.getRemainingTime()); + } + + if (appReport.getYarnApplicationState() != RUNNING) { + LOG.info("Service {} is at {} state", serviceName, + appReport.getYarnApplicationState()); + return appSpec; + } + if (StringUtils.isEmpty(appReport.getHost())) { + LOG.warn(serviceName + " AM hostname is empty"); + return appSpec; + } + ClientAMProtocol amProxy = + createAMProxy(appReport.getHost(), appReport.getRpcPort()); + GetStatusResponseProto response = + amProxy.getStatus(GetStatusRequestProto.newBuilder().build()); + appSpec = jsonSerDeser.fromJson(response.getStatus()); + + return appSpec; + } + + public YarnClient getYarnClient() { + return this.yarnClient; + } + + public int enableFastLaunch() throws IOException, YarnException { + return actionDependency(true); + } + + public int actionDependency(boolean overwrite) + throws IOException, YarnException { + String currentUser = RegistryUtils.currentUser(); + LOG.info("Running command as user {}", currentUser); + + Path dependencyLibTarGzip = fs.getDependencyTarGzip(); + + // Check if dependency has already been uploaded, in which case log + // appropriately and exit success (unless overwrite has been requested) + if (fs.isFile(dependencyLibTarGzip) && !overwrite) { + System.out.println(String.format( + "Dependency libs are already uploaded to %s.", dependencyLibTarGzip + .toUri())); + return EXIT_SUCCESS; + } + + String[] libDirs = SliderUtils.getLibDirs(); + if (libDirs.length > 0) { + File tempLibTarGzipFile = File.createTempFile( + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_NAME + "_", + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_EXT); + // copy all jars + tarGzipFolder(libDirs, tempLibTarGzipFile, createJarFilter()); + + LOG.info("Version Info: " + VersionInfo.getBuildVersion()); + fs.copyLocalFileToHdfs(tempLibTarGzipFile, dependencyLibTarGzip, + new FsPermission(YarnServiceConstants.DEPENDENCY_DIR_PERMISSIONS)); + return EXIT_SUCCESS; + } else { + return EXIT_FALSE; + } + } + + protected ClientAMProtocol createAMProxy(String host, int port) + throws IOException { + InetSocketAddress address = + NetUtils.createSocketAddrForHost(host, port); + return ClientAMProxy.createProxy(getConfig(), ClientAMProtocol.class, + UserGroupInformation.getCurrentUser(), rpc, address); + } + + private synchronized ApplicationId getAppId(String serviceName) + throws IOException, YarnException { + if (cachedAppIds.containsKey(serviceName)) { + return cachedAppIds.get(serviceName); + } + Service persistedService = ServiceApiUtil.loadService(fs, serviceName); + if (persistedService == null) { + throw new YarnException("Service " + serviceName + + " doesn't exist on hdfs. Please check if the app exists in RM"); + } + ApplicationId currentAppId = ApplicationId.fromString(persistedService.getId()); + cachedAppIds.put(serviceName, currentAppId); + return currentAppId; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java new file mode 100644 index 0000000..a5dd39c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java @@ -0,0 +1,589 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; +import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId; +import org.apache.hadoop.yarn.service.ContainerFailureTracker; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.apache.hadoop.yarn.service.ServiceScheduler; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent; +import org.apache.hadoop.yarn.service.ServiceMetrics; +import org.apache.hadoop.yarn.service.provider.ProviderUtils; +import org.apache.hadoop.yarn.state.InvalidStateTransitionException; +import org.apache.hadoop.yarn.state.MultipleArcTransition; +import org.apache.hadoop.yarn.state.SingleArcTransition; +import org.apache.hadoop.yarn.state.StateMachine; +import org.apache.hadoop.yarn.state.StateMachineFactory; +import org.apache.hadoop.yarn.util.Apps; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.apache.hadoop.yarn.service.monitor.probe.MonitorUtils; +import org.apache.hadoop.yarn.service.monitor.probe.Probe; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import static org.apache.hadoop.yarn.api.records.ContainerExitStatus.*; +import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*; +import static org.apache.hadoop.yarn.service.component.ComponentEventType.*; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.START; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.STOP; +import static org.apache.hadoop.yarn.service.component.ComponentState.*; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_THRESHOLD; + +public class Component implements EventHandler { + private static final Logger LOG = LoggerFactory.getLogger(Component.class); + + private org.apache.hadoop.yarn.service.api.records.Component componentSpec; + private long allocateId; + private Priority priority; + private ServiceMetrics componentMetrics; + private ServiceScheduler scheduler; + private ServiceContext context; + private AMRMClientAsync amrmClient; + private AtomicLong instanceIdCounter = new AtomicLong(); + private Map compInstances = + new ConcurrentHashMap<>(); + // component instances to be assigned with a container + private List pendingInstances = new LinkedList<>(); + private ContainerFailureTracker failureTracker; + private Probe probe; + private final ReentrantReadWriteLock.ReadLock readLock; + private final ReentrantReadWriteLock.WriteLock writeLock; + public int maxContainerFailurePerComp; + // The number of containers failed since last reset. This excludes preempted, + // disk_failed containers etc. This will be reset to 0 periodically. + public AtomicInteger currentContainerFailure = new AtomicInteger(0); + + private StateMachine + stateMachine; + private AsyncDispatcher compInstanceDispatcher; + private static final StateMachineFactory + stateMachineFactory = + new StateMachineFactory( + INIT) + // INIT will only got to FLEXING + .addTransition(INIT, EnumSet.of(STABLE, FLEXING), + FLEX, new FlexComponentTransition()) + // container recovered on AM restart + .addTransition(INIT, INIT, CONTAINER_RECOVERED, + new ContainerRecoveredTransition()) + + // container allocated by RM + .addTransition(FLEXING, FLEXING, CONTAINER_ALLOCATED, + new ContainerAllocatedTransition()) + // container launched on NM + .addTransition(FLEXING, EnumSet.of(STABLE, FLEXING), + CONTAINER_STARTED, new ContainerStartedTransition()) + // container failed while flexing + .addTransition(FLEXING, FLEXING, CONTAINER_COMPLETED, + new ContainerCompletedTransition()) + // Flex while previous flex is still in progress + .addTransition(FLEXING, EnumSet.of(FLEXING), FLEX, + new FlexComponentTransition()) + + // container failed while stable + .addTransition(STABLE, FLEXING, CONTAINER_COMPLETED, + new ContainerCompletedTransition()) + // Ignore surplus container + .addTransition(STABLE, STABLE, CONTAINER_ALLOCATED, + new ContainerAllocatedTransition()) + // Flex by user + // For flex up, go to FLEXING state + // For flex down, go to STABLE state + .addTransition(STABLE, EnumSet.of(STABLE, FLEXING), + FLEX, new FlexComponentTransition()) + .installTopology(); + + public Component( + org.apache.hadoop.yarn.service.api.records.Component component, + long allocateId, ServiceContext context) { + this.allocateId = allocateId; + this.priority = Priority.newInstance((int) allocateId); + this.componentSpec = component; + componentMetrics = ServiceMetrics.register(component.getName(), + "Metrics for component " + component.getName()); + componentMetrics + .tag("type", "Metrics type [component or service]", "component"); + this.scheduler = context.scheduler; + this.context = context; + amrmClient = scheduler.getAmRMClient(); + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + this.readLock = lock.readLock(); + this.writeLock = lock.writeLock(); + this.stateMachine = stateMachineFactory.make(this); + compInstanceDispatcher = scheduler.getCompInstanceDispatcher(); + failureTracker = + new ContainerFailureTracker(context, this); + probe = MonitorUtils.getProbe(componentSpec.getReadinessCheck()); + maxContainerFailurePerComp = componentSpec.getConfiguration() + .getPropertyInt(CONTAINER_FAILURE_THRESHOLD, 10); + createNumCompInstances(component.getNumberOfContainers()); + } + + private void createNumCompInstances(long count) { + for (int i = 0; i < count; i++) { + createOneCompInstance(); + } + } + + private void createOneCompInstance() { + ComponentInstanceId id = + new ComponentInstanceId(instanceIdCounter.getAndIncrement(), + componentSpec.getName()); + ComponentInstance instance = new ComponentInstance(this, id); + compInstances.put(instance.getCompInstanceName(), instance); + pendingInstances.add(instance); + } + + private static class FlexComponentTransition implements + MultipleArcTransition { + // For flex up, go to FLEXING state + // For flex down, go to STABLE state + @Override + public ComponentState transition(Component component, + ComponentEvent event) { + component.setDesiredContainers((int)event.getDesired()); + if (!component.areDependenciesReady()) { + LOG.info("[FLEX COMPONENT {}]: Flex deferred because dependencies not" + + " satisfied.", component.getName()); + return component.getState(); + } + if (component.getState() == INIT) { + // This happens on init + LOG.info("[INIT COMPONENT " + component.getName() + "]: " + event + .getDesired() + " instances."); + component.requestContainers(component.pendingInstances.size()); + return checkIfStable(component); + } + long before = component.getComponentSpec().getNumberOfContainers(); + long delta = event.getDesired() - before; + component.getComponentSpec().setNumberOfContainers(event.getDesired()); + if (delta > 0) { + // Scale up + LOG.info("[FLEX UP COMPONENT " + component.getName() + "]: scaling up from " + + before + " to " + event.getDesired()); + component.requestContainers(delta); + component.createNumCompInstances(delta); + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING); + return FLEXING; + } else if (delta < 0){ + delta = 0 - delta; + // scale down + LOG.info("[FLEX DOWN COMPONENT " + component.getName() + + "]: scaling down from " + before + " to " + event.getDesired()); + List list = + new ArrayList<>(component.getAllComponentInstances()); + + // sort in Most recent -> oldest order, destroy most recent ones. + Collections.sort(list, Collections.reverseOrder()); + for (int i = 0; i < delta; i++) { + ComponentInstance instance = list.get(i); + // remove the instance + component.compInstances.remove(instance.getCompInstanceName()); + component.pendingInstances.remove(instance); + component.componentMetrics.containersFailed.incr(); + component.componentMetrics.containersRunning.decr(); + // decrement id counter + component.instanceIdCounter.decrementAndGet(); + instance.destroy(); + } + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE); + return STABLE; + } else { + LOG.info("[FLEX COMPONENT " + component.getName() + "]: already has " + + event.getDesired() + " instances, ignoring"); + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE); + return STABLE; + } + } + } + + private static class ContainerAllocatedTransition extends BaseTransition { + @Override + public void transition(Component component, ComponentEvent event) { + component.assignContainerToCompInstance(event.getContainer()); + } + } + + private static class ContainerRecoveredTransition extends BaseTransition { + @Override + public void transition(Component component, ComponentEvent event) { + ComponentInstance instance = event.getInstance(); + Container container = event.getContainer(); + if (instance == null) { + LOG.info("[COMPONENT {}]: Trying to recover {} but event did not " + + "specify component instance", + component.getName(), container.getId()); + component.releaseContainer(container); + return; + } + if (instance.hasContainer()) { + LOG.info( + "[COMPONENT {}]: Instance {} already has container, release " + + "surplus container {}", + instance.getCompName(), instance.getCompInstanceId(), container + .getId()); + component.releaseContainer(container); + return; + } + component.pendingInstances.remove(instance); + LOG.info("[COMPONENT {}]: Recovered {} for component instance {} on " + + "host {}, num pending component instances reduced to {} ", + component.getName(), container.getId(), instance + .getCompInstanceName(), container.getNodeId(), component + .pendingInstances.size()); + instance.setContainer(container); + ProviderUtils.initCompInstanceDir(component.getContext().fs, instance); + component.getScheduler().addLiveCompInstance(container.getId(), instance); + LOG.info("[COMPONENT {}]: Marking {} as started for component " + + "instance {}", component.getName(), event.getContainer().getId(), + instance.getCompInstanceId()); + component.compInstanceDispatcher.getEventHandler().handle( + new ComponentInstanceEvent(instance.getContainerId(), + START)); + component.incRunningContainers(); + } + } + + private static class ContainerStartedTransition implements + MultipleArcTransition { + + @Override public ComponentState transition(Component component, + ComponentEvent event) { + component.compInstanceDispatcher.getEventHandler().handle( + new ComponentInstanceEvent(event.getInstance().getContainerId(), + START)); + component.incRunningContainers(); + return checkIfStable(component); + } + } + + private static ComponentState checkIfStable(Component component) { + // if desired == running + if (component.componentMetrics.containersRunning.value() == component + .getComponentSpec().getNumberOfContainers()) { + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.STABLE); + return STABLE; + } else { + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING); + return FLEXING; + } + } + + private static class ContainerCompletedTransition extends BaseTransition { + @Override + public void transition(Component component, ComponentEvent event) { + component.updateMetrics(event.getStatus()); + + // add back to pending list + component.pendingInstances.add(event.getInstance()); + LOG.info( + "[COMPONENT {}]: {} completed, num pending comp instances increased to {}.", + component.getName(), event.getStatus().getContainerId(), + component.pendingInstances.size()); + component.compInstanceDispatcher.getEventHandler().handle( + new ComponentInstanceEvent(event.getStatus().getContainerId(), + STOP).setStatus(event.getStatus())); + component.componentSpec.setState( + org.apache.hadoop.yarn.service.api.records.ComponentState.FLEXING); + } + } + + public ServiceMetrics getCompMetrics () { + return componentMetrics; + } + + private void releaseContainer(Container container) { + scheduler.getAmRMClient().releaseAssignedContainer(container.getId()); + componentMetrics.surplusContainers.incr(); + scheduler.getServiceMetrics().surplusContainers.incr(); + } + + private void assignContainerToCompInstance(Container container) { + if (pendingInstances.size() == 0) { + LOG.info( + "[COMPONENT {}]: No pending component instance left, release surplus container {}", + getName(), container.getId()); + releaseContainer(container); + return; + } + ComponentInstance instance = pendingInstances.remove(0); + LOG.info( + "[COMPONENT {}]: {} allocated, num pending component instances reduced to {}", + getName(), container.getId(), pendingInstances.size()); + instance.setContainer(container); + scheduler.addLiveCompInstance(container.getId(), instance); + LOG.info( + "[COMPONENT {}]: Assigned {} to component instance {} and launch on host {} ", + getName(), container.getId(), instance.getCompInstanceName(), + container.getNodeId()); + scheduler.getContainerLaunchService() + .launchCompInstance(scheduler.getApp(), instance, container); + } + + @SuppressWarnings({ "unchecked" }) + public void requestContainers(long count) { + Resource resource = Resource + .newInstance(componentSpec.getResource().getMemoryMB(), + componentSpec.getResource().getCpus()); + + for (int i = 0; i < count; i++) { + //TODO Once YARN-5468 is done, use that for anti-affinity + ContainerRequest request = + ContainerRequest.newBuilder().capability(resource).priority(priority) + .allocationRequestId(allocateId).relaxLocality(true).build(); + amrmClient.addContainerRequest(request); + } + } + + private void setDesiredContainers(int n) { + int delta = n - scheduler.getServiceMetrics().containersDesired.value(); + if (delta > 0) { + scheduler.getServiceMetrics().containersDesired.incr(delta); + } else { + scheduler.getServiceMetrics().containersDesired.decr(delta); + } + componentMetrics.containersDesired.set(n); + } + + + + private void updateMetrics(ContainerStatus status) { + switch (status.getExitStatus()) { + case SUCCESS: + componentMetrics.containersSucceeded.incr(); + scheduler.getServiceMetrics().containersSucceeded.incr(); + return; + case PREEMPTED: + componentMetrics.containersPreempted.incr(); + scheduler.getServiceMetrics().containersPreempted.incr(); + break; + case DISKS_FAILED: + componentMetrics.containersDiskFailure.incr(); + scheduler.getServiceMetrics().containersDiskFailure.incr(); + break; + default: + break; + } + + // containersFailed include preempted, disks_failed etc. + componentMetrics.containersFailed.incr(); + scheduler.getServiceMetrics().containersFailed.incr(); + + // dec running container + decRunningContainers(); + + if (Apps.shouldCountTowardsNodeBlacklisting(status.getExitStatus())) { + String host = scheduler.getLiveInstances().get(status.getContainerId()) + .getNodeId().getHost(); + failureTracker.incNodeFailure(host); + currentContainerFailure.getAndIncrement() ; + } + } + + public boolean areDependenciesReady() { + List dependencies = componentSpec.getDependencies(); + if (SliderUtils.isEmpty(dependencies)) { + return true; + } + for (String dependency : dependencies) { + Component dependentComponent = + scheduler.getAllComponents().get(dependency); + if (dependentComponent == null) { + LOG.error("Couldn't find dependency {} for {} (should never happen)", + dependency, getName()); + continue; + } + if (dependentComponent.getNumReadyInstances() < dependentComponent + .getNumDesiredInstances()) { + LOG.info("[COMPONENT {}]: Dependency {} not satisfied, only {} of {}" + + " instances are ready.", getName(), dependency, + dependentComponent.getNumReadyInstances(), + dependentComponent.getNumDesiredInstances()); + return false; + } + } + return true; + } + + public Map getDependencyHostIpTokens() { + Map tokens = new HashMap<>(); + List dependencies = componentSpec.getDependencies(); + if (SliderUtils.isEmpty(dependencies)) { + return tokens; + } + for (String dependency : dependencies) { + Collection instances = scheduler.getAllComponents() + .get(dependency).getAllComponentInstances(); + for (ComponentInstance instance : instances) { + if (instance.getContainerStatus() == null) { + continue; + } + if (SliderUtils.isEmpty(instance.getContainerStatus().getIPs()) || + SliderUtils.isUnset(instance.getContainerStatus().getHost())) { + continue; + } + String ip = instance.getContainerStatus().getIPs().get(0); + String host = instance.getContainerStatus().getHost(); + tokens.put(String.format(COMPONENT_IP, + instance.getCompInstanceName().toUpperCase()), ip); + tokens.put(String.format(COMPONENT_HOST, + instance.getCompInstanceName().toUpperCase()), host); + } + } + return tokens; + } + + private void incRunningContainers() { + componentMetrics.containersRunning.incr(); + scheduler.getServiceMetrics().containersRunning.incr(); + } + + public void incContainersReady() { + componentMetrics.containersReady.incr(); + } + + public void decContainersReady() { + componentMetrics.containersReady.decr(); + } + + private void decRunningContainers() { + componentMetrics.containersRunning.decr(); + scheduler.getServiceMetrics().containersRunning.decr(); + } + + public int getNumReadyInstances() { + return componentMetrics.containersReady.value(); + } + + public int getNumRunningInstances() { + return componentMetrics.containersRunning.value(); + } + + public int getNumDesiredInstances() { + return componentMetrics.containersDesired.value(); + } + + public ComponentInstance getComponentInstance(String componentInstanceName) { + return compInstances.get(componentInstanceName); + } + + public Collection getAllComponentInstances() { + return compInstances.values(); + } + + public org.apache.hadoop.yarn.service.api.records.Component getComponentSpec() { + return this.componentSpec; + } + + public void resetCompFailureCount() { + LOG.info("[COMPONENT {}]: Reset container failure count from {} to 0.", + getName(), currentContainerFailure.get()); + currentContainerFailure.set(0); + failureTracker.resetContainerFailures(); + } + + public Probe getProbe() { + return probe; + } + + public Priority getPriority() { + return priority; + } + + public long getAllocateId() { + return allocateId; + } + + public String getName () { + return componentSpec.getName(); + } + + public ComponentState getState() { + this.readLock.lock(); + + try { + return this.stateMachine.getCurrentState(); + } finally { + this.readLock.unlock(); + } + } + public ServiceScheduler getScheduler() { + return scheduler; + } + + @Override + public void handle(ComponentEvent event) { + try { + writeLock.lock(); + ComponentState oldState = getState(); + try { + stateMachine.doTransition(event.getType(), event); + } catch (InvalidStateTransitionException e) { + LOG.error(MessageFormat.format("[COMPONENT {0}]: Invalid event {1} at {2}", + componentSpec.getName(), event.getType(), oldState), e); + } + if (oldState != getState()) { + LOG.info("[COMPONENT {}] Transitioned from {} to {} on {} event.", + componentSpec.getName(), oldState, getState(), event.getType()); + } + } finally { + writeLock.unlock(); + } + } + + private static class BaseTransition implements + SingleArcTransition { + + @Override public void transition(Component component, + ComponentEvent event) { + } + } + + public ServiceContext getContext() { + return context; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java new file mode 100644 index 0000000..d93dcf1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.event.AbstractEvent; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; + +public class ComponentEvent extends AbstractEvent { + private long desired; + private final String name; + private final ComponentEventType type; + private Container container; + private ComponentInstance instance; + private ContainerStatus status; + + public ComponentEvent(String name, ComponentEventType type) { + super(type); + this.name = name; + this.type = type; + } + + public String getName() { + return name; + } + + public ComponentEventType getType() { + return type; + } + + public long getDesired() { + return desired; + } + + public ComponentEvent setDesired(long desired) { + this.desired = desired; + return this; + } + + public Container getContainer() { + return container; + } + + public ComponentEvent setContainer(Container container) { + this.container = container; + return this; + } + + public ComponentInstance getInstance() { + return instance; + } + + public ComponentEvent setInstance(ComponentInstance instance) { + this.instance = instance; + return this; + } + + public ContainerStatus getStatus() { + return status; + } + + public ComponentEvent setStatus(ContainerStatus status) { + this.status = status; + return this; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEventType.java new file mode 100644 index 0000000..067302d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEventType.java @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component; + +public enum ComponentEventType { + FLEX, + CONTAINER_ALLOCATED, + CONTAINER_RECOVERED, + CONTAINER_STARTED, + CONTAINER_COMPLETED +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentState.java new file mode 100644 index 0000000..a5f9ff4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentState.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component; + +public enum ComponentState { + INIT, + FLEXING, + STABLE +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java new file mode 100644 index 0000000..6e585aa --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java @@ -0,0 +1,532 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component.instance; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies; +import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.client.api.NMClient; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; +import org.apache.hadoop.yarn.service.ServiceScheduler; +import org.apache.hadoop.yarn.service.api.records.ContainerState; +import org.apache.hadoop.yarn.service.component.Component; +import org.apache.hadoop.yarn.state.InvalidStateTransitionException; +import org.apache.hadoop.yarn.state.SingleArcTransition; +import org.apache.hadoop.yarn.state.StateMachine; +import org.apache.hadoop.yarn.state.StateMachineFactory; +import org.apache.hadoop.yarn.util.BoundedAppender; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher; +import org.apache.hadoop.yarn.service.monitor.probe.ProbeStatus; +import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Date; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; + +import static org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes.*; +import static org.apache.hadoop.yarn.api.records.ContainerExitStatus.KILLED_BY_APPMASTER; +import static org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.*; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.*; + +public class ComponentInstance implements EventHandler, + Comparable { + private static final Logger LOG = + LoggerFactory.getLogger(ComponentInstance.class); + + private StateMachine stateMachine; + private Component component; + private final ReadLock readLock; + private final WriteLock writeLock; + + private ComponentInstanceId compInstanceId = null; + private Path compInstanceDir; + private Container container; + private YarnRegistryViewForProviders yarnRegistryOperations; + private FileSystem fs; + private boolean timelineServiceEnabled = false; + private ServiceTimelinePublisher serviceTimelinePublisher; + private ServiceScheduler scheduler; + private BoundedAppender diagnostics = new BoundedAppender(64 * 1024); + private volatile ScheduledFuture containerStatusFuture; + private volatile ContainerStatus status; + private long containerStartedTime = 0; + // This container object is used for rest API query + private org.apache.hadoop.yarn.service.api.records.Container containerSpec; + + private static final StateMachineFactory + stateMachineFactory = + new StateMachineFactory(INIT) + .addTransition(INIT, STARTED, START, + new ContainerStartedTransition()) + .addTransition(INIT, INIT, STOP, + // container failed before launching, nothing to cleanup from registry + // This could happen if NMClient#startContainerAsync failed, container + // will be completed, but COMP_INSTANCE is still at INIT. + new ContainerStoppedTransition(true)) + + //From Running + .addTransition(STARTED, INIT, STOP, + new ContainerStoppedTransition()) + .addTransition(STARTED, READY, BECOME_READY, + new ContainerBecomeReadyTransition()) + + // FROM READY + .addTransition(READY, STARTED, BECOME_NOT_READY, + new ContainerBecomeNotReadyTransition()) + .addTransition(READY, INIT, STOP, new ContainerStoppedTransition()) + .installTopology(); + + + + public ComponentInstance(Component component, + ComponentInstanceId compInstanceId) { + this.stateMachine = stateMachineFactory.make(this); + this.component = component; + this.compInstanceId = compInstanceId; + this.scheduler = component.getScheduler(); + this.yarnRegistryOperations = + component.getScheduler().getYarnRegistryOperations(); + this.serviceTimelinePublisher = + component.getScheduler().getServiceTimelinePublisher(); + if (YarnConfiguration + .timelineServiceV2Enabled(component.getScheduler().getConfig())) { + this.timelineServiceEnabled = true; + } + ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + this.readLock = lock.readLock(); + this.writeLock = lock.writeLock(); + this.fs = scheduler.getContext().fs.getFileSystem(); + } + + private static class ContainerStartedTransition extends BaseTransition { + @Override public void transition(ComponentInstance compInstance, + ComponentInstanceEvent event) { + // Query container status for ip and host + compInstance.containerStatusFuture = + compInstance.scheduler.executorService.scheduleAtFixedRate( + new ContainerStatusRetriever(compInstance.scheduler, + compInstance.getContainerId(), compInstance), 0, 1, + TimeUnit.SECONDS); + + long containerStartTime = System.currentTimeMillis(); + try { + ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils + .newContainerTokenIdentifier(compInstance.getContainer() + .getContainerToken()); + containerStartTime = containerTokenIdentifier.getCreationTime(); + } catch (Exception e) { + LOG.info("Could not get container creation time, using current time"); + } + org.apache.hadoop.yarn.service.api.records.Container container = + new org.apache.hadoop.yarn.service.api.records.Container(); + container.setId(compInstance.getContainerId().toString()); + container.setLaunchTime(new Date(containerStartTime)); + container.setState(ContainerState.RUNNING_BUT_UNREADY); + container.setBareHost(compInstance.container.getNodeId().getHost()); + container.setComponentInstanceName(compInstance.getCompInstanceName()); + if (compInstance.containerSpec != null) { + // remove the previous container. + compInstance.getCompSpec().removeContainer(compInstance.containerSpec); + } + compInstance.containerSpec = container; + compInstance.getCompSpec().addContainer(container); + compInstance.containerStartedTime = containerStartTime; + + if (compInstance.timelineServiceEnabled) { + compInstance.serviceTimelinePublisher + .componentInstanceStarted(container, compInstance); + } + } + } + + private static class ContainerBecomeReadyTransition extends BaseTransition { + @Override + public void transition(ComponentInstance compInstance, + ComponentInstanceEvent event) { + compInstance.component.incContainersReady(); + compInstance.containerSpec.setState(ContainerState.READY); + } + } + + private static class ContainerBecomeNotReadyTransition extends BaseTransition { + @Override + public void transition(ComponentInstance compInstance, + ComponentInstanceEvent event) { + compInstance.component.decContainersReady(); + compInstance.containerSpec.setState(ContainerState.RUNNING_BUT_UNREADY); + } + } + + private static class ContainerStoppedTransition extends BaseTransition { + // whether the container failed before launched by AM or not. + boolean failedBeforeLaunching = false; + public ContainerStoppedTransition(boolean failedBeforeLaunching) { + this.failedBeforeLaunching = failedBeforeLaunching; + } + + public ContainerStoppedTransition() { + this(false); + } + + @Override + public void transition(ComponentInstance compInstance, + ComponentInstanceEvent event) { + // re-ask the failed container. + Component comp = compInstance.component; + comp.requestContainers(1); + LOG.info(compInstance.getCompInstanceId() + + ": Container completed. Requested a new container." + System + .lineSeparator() + " exitStatus={}, diagnostics={}.", + event.getStatus().getExitStatus(), + event.getStatus().getDiagnostics()); + String containerDiag = + compInstance.getCompInstanceId() + ": " + event.getStatus() + .getDiagnostics(); + compInstance.diagnostics.append(containerDiag + System.lineSeparator()); + + boolean shouldExit = false; + // check if it exceeds the failure threshold + if (comp.currentContainerFailure.get() > comp.maxContainerFailurePerComp) { + String exitDiag = MessageFormat.format( + "[COMPONENT {0}]: Failed {1} times, exceeded the limit - {2}. Shutting down now... " + + System.lineSeparator(), + comp.getName(), comp.currentContainerFailure.get(), comp.maxContainerFailurePerComp); + compInstance.diagnostics.append(exitDiag); + // append to global diagnostics that will be reported to RM. + comp.getScheduler().getDiagnostics().append(containerDiag); + comp.getScheduler().getDiagnostics().append(exitDiag); + LOG.warn(exitDiag); + shouldExit = true; + } + + if (!failedBeforeLaunching) { + // clean up registry + // If the container failed before launching, no need to cleanup registry, + // because it was not registered before. + // hdfs dir content will be overwritten when a new container gets started, + // so no need remove. + compInstance.scheduler.executorService + .submit(compInstance::cleanupRegistry); + if (compInstance.timelineServiceEnabled) { + // record in ATS + compInstance.serviceTimelinePublisher + .componentInstanceFinished(compInstance, + event.getStatus().getExitStatus(), event.getStatus().getState(), + containerDiag); + } + compInstance.containerSpec.setState(ContainerState.STOPPED); + } + + // remove the failed ContainerId -> CompInstance mapping + comp.getScheduler().removeLiveCompInstance(event.getContainerId()); + + if (shouldExit) { + // Sleep for 5 seconds in hope that the state can be recorded in ATS. + // in case there's a client polling the comp state, it can be notified. + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + LOG.error("Interrupted on sleep while exiting.", e); + } + ExitUtil.terminate(-1); + } + + compInstance.removeContainer(); + } + } + + public ComponentInstanceState getState() { + this.readLock.lock(); + + try { + return this.stateMachine.getCurrentState(); + } finally { + this.readLock.unlock(); + } + } + + @Override + public void handle(ComponentInstanceEvent event) { + try { + writeLock.lock(); + ComponentInstanceState oldState = getState(); + try { + stateMachine.doTransition(event.getType(), event); + } catch (InvalidStateTransitionException e) { + LOG.error(getCompInstanceId() + ": Invalid event " + event.getType() + + " at " + oldState, e); + } + if (oldState != getState()) { + LOG.info(getCompInstanceId() + " Transitioned from " + oldState + " to " + + getState() + " on " + event.getType() + " event"); + } + } finally { + writeLock.unlock(); + } + } + + public boolean hasContainer() { + return this.container != null; + } + + public void removeContainer() { + this.container = null; + this.compInstanceId.setContainerId(null); + } + + public void setContainer(Container container) { + this.container = container; + this.compInstanceId.setContainerId(container.getId()); + } + + public String getCompInstanceName() { + return compInstanceId.getCompInstanceName(); + } + + public ContainerStatus getContainerStatus() { + return status; + } + + public void updateContainerStatus(ContainerStatus status) { + this.status = status; + org.apache.hadoop.yarn.service.api.records.Container container = + getCompSpec().getContainer(getContainerId().toString()); + if (container != null) { + container.setIp(StringUtils.join(",", status.getIPs())); + container.setHostname(status.getHost()); + if (timelineServiceEnabled) { + serviceTimelinePublisher.componentInstanceUpdated(container); + } + } + updateServiceRecord(yarnRegistryOperations, status); + } + + public ContainerId getContainerId() { + return container.getId(); + } + + public String getCompName() { + return compInstanceId.getCompName(); + } + + public void setCompInstanceDir(Path dir) { + this.compInstanceDir = dir; + } + + public Component getComponent() { + return component; + } + + public Container getContainer() { + return container; + } + + public ComponentInstanceId getCompInstanceId() { + return compInstanceId; + } + + public NodeId getNodeId() { + return this.container.getNodeId(); + } + + public org.apache.hadoop.yarn.service.api.records.Component getCompSpec() { + return component.getComponentSpec(); + } + + private static class BaseTransition implements + SingleArcTransition { + + @Override public void transition(ComponentInstance compInstance, + ComponentInstanceEvent event) { + } + } + + public ProbeStatus ping() { + if (component.getProbe() == null) { + ProbeStatus status = new ProbeStatus(); + status.setSuccess(true); + return status; + } + return component.getProbe().ping(this); + } + + // Write service record into registry + private void updateServiceRecord( + YarnRegistryViewForProviders yarnRegistry, ContainerStatus status) { + ServiceRecord record = new ServiceRecord(); + String containerId = status.getContainerId().toString(); + record.set(YARN_ID, containerId); + record.description = getCompInstanceName(); + record.set(YARN_PERSISTENCE, PersistencePolicies.CONTAINER); + record.set(YARN_IP, status.getIPs().get(0)); + record.set(YARN_HOSTNAME, status.getHost()); + try { + yarnRegistry + .putComponent(RegistryPathUtils.encodeYarnID(containerId), record); + } catch (IOException e) { + LOG.error( + "Failed to update service record in registry: " + containerId + ""); + } + } + + // Release the container , cleanup registry, hdfs dir, and record in ATS + public void destroy() { + LOG.info(getCompInstanceId() + ": Flexed down by user, destroying."); + diagnostics.append(getCompInstanceId() + ": Flexed down by user"); + if (container != null) { + scheduler.removeLiveCompInstance(container.getId()); + component.getScheduler().getAmRMClient() + .releaseAssignedContainer(container.getId()); + getCompSpec().removeContainer(containerSpec); + } + if (timelineServiceEnabled) { + serviceTimelinePublisher + .componentInstanceFinished(this, KILLED_BY_APPMASTER, COMPLETE, + diagnostics.toString()); + } + scheduler.executorService.submit(this::cleanupRegistryAndCompHdfsDir); + } + + private void cleanupRegistry() { + ContainerId containerId = getContainerId(); + String cid = RegistryPathUtils.encodeYarnID(containerId.toString()); + try { + yarnRegistryOperations.deleteComponent(getCompInstanceId(), cid); + } catch (IOException e) { + LOG.error(getCompInstanceId() + ": Failed to delete registry", e); + } + } + + //TODO Maybe have a dedicated cleanup service. + public void cleanupRegistryAndCompHdfsDir() { + cleanupRegistry(); + try { + if (compInstanceDir != null && fs.exists(compInstanceDir)) { + boolean deleted = fs.delete(compInstanceDir, true); + if (!deleted) { + LOG.error(getCompInstanceId() + + ": Failed to delete component instance dir: " + + compInstanceDir); + } else { + LOG.info(getCompInstanceId() + ": Deleted component instance dir: " + + compInstanceDir); + } + } + } catch (IOException e) { + LOG.warn(getCompInstanceId() + ": Failed to delete directory", e); + } + } + + // Query container status until ip and hostname are available and update + // the service record into registry service + private static class ContainerStatusRetriever implements Runnable { + private ContainerId containerId; + private NodeId nodeId; + private NMClient nmClient; + private ComponentInstance instance; + ContainerStatusRetriever(ServiceScheduler scheduler, + ContainerId containerId, ComponentInstance instance) { + this.containerId = containerId; + this.nodeId = instance.getNodeId(); + this.nmClient = scheduler.getNmClient().getClient(); + this.instance = instance; + } + @Override public void run() { + ContainerStatus status = null; + try { + status = nmClient.getContainerStatus(containerId, nodeId); + } catch (Exception e) { + if (e instanceof YarnException) { + throw new YarnRuntimeException( + instance.compInstanceId + " Failed to get container status on " + + nodeId + " , cancelling.", e); + } + LOG.error(instance.compInstanceId + " Failed to get container status on " + + nodeId + ", will try again", e); + return; + } + if (SliderUtils.isEmpty(status.getIPs()) || SliderUtils + .isUnset(status.getHost())) { + return; + } + instance.updateContainerStatus(status); + LOG.info( + instance.compInstanceId + " IP = " + status.getIPs() + ", host = " + + status.getHost() + ", cancel container status retriever"); + instance.containerStatusFuture.cancel(false); + } + } + + @Override + public int compareTo(ComponentInstance to) { + long delta = containerStartedTime - to.containerStartedTime; + if (delta == 0) { + return getCompInstanceId().compareTo(to.getCompInstanceId()); + } else if (delta < 0) { + return -1; + } else { + return 1; + } + } + + @Override public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + ComponentInstance instance = (ComponentInstance) o; + + if (containerStartedTime != instance.containerStartedTime) + return false; + return compInstanceId.equals(instance.compInstanceId); + } + + @Override public int hashCode() { + int result = compInstanceId.hashCode(); + result = 31 * result + (int) (containerStartedTime ^ (containerStartedTime + >>> 32)); + return result; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java new file mode 100644 index 0000000..707b034 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component.instance; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.event.AbstractEvent; + +public class ComponentInstanceEvent + extends AbstractEvent { + + private ContainerId id; + private ContainerStatus status; + private boolean shouldDestroy = false; + + public ComponentInstanceEvent(ContainerId containerId, + ComponentInstanceEventType componentInstanceEventType) { + super(componentInstanceEventType); + this.id = containerId; + } + + public ContainerId getContainerId() { + return id; + } + + public ContainerStatus getStatus() { + return this.status; + } + + public ComponentInstanceEvent setStatus(ContainerStatus status) { + this.status = status; + return this; + } + + public void setShouldDestroy() { + shouldDestroy = true; + } + + public boolean shouldDestroy() { + return shouldDestroy; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEventType.java new file mode 100644 index 0000000..1a880ba --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEventType.java @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component.instance; + +public enum ComponentInstanceEventType { + START, + STOP, + BECOME_READY, + BECOME_NOT_READY +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceId.java new file mode 100644 index 0000000..14387ba --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceId.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component.instance; + +import org.apache.hadoop.yarn.api.records.ContainerId; + +public class ComponentInstanceId implements Comparable { + + private long Id; + private String name; + private ContainerId containerId; + + public ComponentInstanceId(long id, String name) { + Id = id; + this.name = name; + } + + public long getId() { + return Id; + } + + public String getCompName() { + return name; + } + + public String getCompInstanceName() { + return getCompName() + "-" + getId(); + } + + public void setContainerId(ContainerId containerId) { + this.containerId = containerId; + } + + @Override + public String toString() { + if (containerId == null) { + return "[COMPINSTANCE " + getCompInstanceName() + "]"; + } else { + return "[COMPINSTANCE " + getCompInstanceName() + " : " + containerId + "]"; + } + } + + @Override public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + ComponentInstanceId that = (ComponentInstanceId) o; + + if (getId() != that.getId()) + return false; + return getCompName() != null ? getCompName().equals(that.getCompName()) : + that.getCompName() == null; + + } + + @Override public int hashCode() { + int result = (int) (getId() ^ (getId() >>> 32)); + result = 31 * result + (getCompName() != null ? getCompName().hashCode() : 0); + return result; + } + + @Override + public int compareTo(ComponentInstanceId to) { + int delta = this.getCompName().compareTo(to.getCompName()); + if (delta == 0) { + return Long.compare(this.getId(), to.getId()); + } else if (delta < 0) { + return -1; + } else { + return 1; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceState.java new file mode 100644 index 0000000..f5de5cb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceState.java @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.component.instance; + +public enum ComponentInstanceState { + INIT, + STARTED, + READY, + UPGRADING +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/RestApiConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/RestApiConstants.java new file mode 100644 index 0000000..35e1980 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/RestApiConstants.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +public interface RestApiConstants { + + // Rest endpoints + String CONTEXT_ROOT = "/ws/v1"; + String VERSION = "/services/version"; + String SERVICE_ROOT_PATH = "/services"; + String SERVICE_PATH = "/services/{service_name}"; + String COMPONENT_PATH = "/services/{service_name}/components/{component_name}"; + + // Query param + String SERVICE_NAME = "service_name"; + String COMPONENT_NAME = "component_name"; + + Long DEFAULT_UNLIMITED_LIFETIME = -1l; + + Integer ERROR_CODE_APP_DOES_NOT_EXIST = 404001; + Integer ERROR_CODE_APP_IS_NOT_RUNNING = 404002; + Integer ERROR_CODE_APP_SUBMITTED_BUT_NOT_RUNNING_YET = 404003; + Integer ERROR_CODE_APP_NAME_INVALID = 404004; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/SliderExitCodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/SliderExitCodes.java new file mode 100644 index 0000000..ee270cb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/SliderExitCodes.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes; + +public interface SliderExitCodes extends LauncherExitCodes { + + /** + * starting point for exit codes; not an exception itself + */ + int _EXIT_CODE_BASE = 64; + + /** + * service entered the failed state: {@value} + */ + int EXIT_YARN_SERVICE_FAILED = 65; + + /** + * service was killed: {@value} + */ + int EXIT_YARN_SERVICE_KILLED = 66; + + /** + * timeout on monitoring client: {@value} + */ + int EXIT_TIMED_OUT = 67; + + /** + * service finished with an error: {@value} + */ + int EXIT_YARN_SERVICE_FINISHED_WITH_ERROR = 68; + + /** + * the service instance is unknown: {@value} + */ + int EXIT_UNKNOWN_INSTANCE = 69; + + /** + * the service instance is in the wrong state for that operation: {@value} + */ + int EXIT_BAD_STATE = 70; + + /** + * A spawned master process failed + */ + int EXIT_PROCESS_FAILED = 71; + + /** + * The instance failed -too many containers were + * failing or some other threshold was reached + */ + int EXIT_DEPLOYMENT_FAILED = 72; + + /** + * The service is live -and the requested operation + * does not work if the cluster is running + */ + int EXIT_APPLICATION_IN_USE = 73; + + /** + * There already is an service instance of that name + * when an attempt is made to create a new instance + */ + int EXIT_INSTANCE_EXISTS = 75; + + /** + * Exit code when the configurations in valid/incomplete: {@value} + */ + int EXIT_BAD_CONFIGURATION = 77; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java new file mode 100644 index 0000000..1968e95 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.yarn.service.api.records.Configuration; + +public class YarnServiceConf { + + // Retry settings for the ServiceClient to talk to Service AppMaster + public static final String CLIENT_AM_RETRY_MAX_WAIT_MS = "yarn.service.client-am.retry.max-wait-ms"; + public static final String CLIENT_AM_RETRY_MAX_INTERVAL_MS = "yarn.service.client-am.retry-interval-ms"; + + // Retry settings for container failures + public static final String CONTAINER_RETRY_MAX = "yarn.service.container-failure.retry.max"; + public static final String CONTAINER_RETRY_INTERVAL = "yarn.service.container-failure.retry-interval"; + + public static final String AM_RESTART_MAX = "yarn.service.am-restart.max-attempts"; + public static final String AM_RESOURCE_MEM = "yarn.service.am-resource.memory"; + public static final long DEFAULT_KEY_AM_RESOURCE_MEM = 1024; + + public static final String YARN_QUEUE = "yarn.service.queue"; + + public static final String API_SERVER_ADDRESS = "yarn.service.api-server.address"; + public static final String DEFAULT_API_SERVER_ADDRESS = "0.0.0.0:"; + public static final int DEFAULT_API_SERVER_PORT = 9191; + + /** + * The yarn service base path: + * Defaults to HomeDir/.yarn/ + */ + public static final String YARN_SERVICE_BASE_PATH = "yarn.service.base.path"; + + //TODO rename + /** Declare that a keytab must be provided */ + public static final String KEY_AM_LOGIN_KEYTAB_REQUIRED = "slider.am.login.keytab.required"; + public static final String KEY_AM_LOGIN_KEYTAB_NAME = "slider.am.login.keytab.name"; + public static final String KEY_HDFS_KEYTAB_DIR = "slider.hdfs.keytab.dir"; + public static final String KEY_AM_KEYTAB_LOCAL_PATH = "slider.am.keytab.local.path"; + + /** + * maximum number of failed containers (in a single component) + * before the app exits + */ + public static final String CONTAINER_FAILURE_THRESHOLD = + "yarn.service.container-failure-per-component.threshold"; + /** + * Maximum number of container failures on a node before the node is blacklisted + */ + public static final String NODE_BLACKLIST_THRESHOLD = + "yarn.service.node-blacklist.threshold"; + + /** + * The failure count for CONTAINER_FAILURE_THRESHOLD and NODE_BLACKLIST_THRESHOLD + * gets reset periodically, the unit is seconds. + */ + public static final String CONTAINER_FAILURE_WINDOW = + "yarn.service.failure-count-reset.window"; + + /** + * interval between readiness checks. + */ + public static final String READINESS_CHECK_INTERVAL = "yarn.service.readiness-check-interval.seconds"; + public static final int DEFAULT_READINESS_CHECK_INTERVAL = 30; // seconds + + /** + * Get long value for the property. First get from the userConf, if not + * present, get from systemConf. + * + * @param name name of the property + * @param defaultValue default value of the property, if it is not defined in + * userConf and systemConf. + * @param userConf Configuration provided by client in the JSON definition + * @param systemConf The YarnConfiguration in the system. + * @return long value for the property + */ + public static long getLong(String name, long defaultValue, + Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) { + return userConf.getPropertyLong(name, systemConf.getLong(name, defaultValue)); + } + + public static int getInt(String name, int defaultValue, + Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) { + return userConf.getPropertyInt(name, systemConf.getInt(name, defaultValue)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java new file mode 100644 index 0000000..e5ed703 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConstants.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +public interface YarnServiceConstants { + + /** + * The path under which cluster and temp data are stored + */ + String SERVICE_BASE_DIRECTORY = ".yarn"; + + /** + * The paths under which Service AM dependency libraries are stored + */ + String DEPENDENCY_LOCALIZED_DIR_LINK = "service_dep"; + String DEPENDENCY_DIR = "/yarn-services/%s/"; + String DEPENDENCY_TAR_GZ_FILE_NAME = "service-dep"; + String DEPENDENCY_TAR_GZ_FILE_EXT = ".tar.gz"; + String DEPENDENCY_DIR_PERMISSIONS = "755"; + + /** + * Service type for YARN service + */ + String APP_TYPE = "yarn-service"; + + String KEYTAB_DIR = "keytabs"; + String RESOURCE_DIR = "resources"; + + + String SERVICES_DIRECTORY = "services"; + + /** + * JVM property to define the service lib directory; + * this is set by the yarn.sh script + */ + String PROPERTY_LIB_DIR = "service.libdir"; + + /** + * name of generated dir for this conf + */ + String SUBMITTED_CONF_DIR = "conf"; + + /** + * Service AM log4j file name + */ + String YARN_SERVICE_LOG4J_FILENAME = "yarnservice-log4j.properties"; + + /** + * Log4j sysprop to name the resource + */ + String SYSPROP_LOG4J_CONFIGURATION = "log4j.configuration"; + + /** + * sysprop for Service AM log4j directory + */ + String SYSPROP_LOG_DIR = "LOG_DIR"; + + String TMP_DIR_PREFIX = "tmp"; + + + String SERVICE_CORE_JAR = "yarn-service-core.jar"; + + String STDOUT_AM = "serviceam-out.txt"; + String STDERR_AM = "serviceam-err.txt"; + + String HADOOP_USER_NAME = "HADOOP_USER_NAME"; + + String APP_CONF_DIR = "conf"; + + String APP_LIB_DIR = "lib"; + + String OUT_FILE = "stdout.txt"; + String ERR_FILE = "stderr.txt"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java new file mode 100644 index 0000000..e4eae20 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java @@ -0,0 +1,271 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.ContainerRetryContext; +import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.util.Records; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.utils.CoreFileSystem; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import static org.apache.hadoop.yarn.service.provider.docker.DockerKeys.DEFAULT_DOCKER_NETWORK; + +/** + * Launcher of applications: base class + */ +public class AbstractLauncher { + private static final Logger log = + LoggerFactory.getLogger(AbstractLauncher.class); + public static final String CLASSPATH = "CLASSPATH"; + /** + * Filesystem to use for the launch + */ + protected final CoreFileSystem coreFileSystem; + /** + * Env vars; set up at final launch stage + */ + protected final Map envVars = new HashMap<>(); + protected final ContainerLaunchContext containerLaunchContext = + Records.newRecord(ContainerLaunchContext.class); + protected final List commands = new ArrayList<>(20); + protected final Map localResources = new HashMap<>(); + protected final Map mountPaths = new HashMap<>(); + private final Map serviceData = new HashMap<>(); + // security + protected final Credentials credentials; + protected boolean yarnDockerMode = false; + protected String dockerImage; + protected String dockerNetwork = DEFAULT_DOCKER_NETWORK; + protected String dockerHostname; + protected String runPrivilegedContainer; + + + /** + * Create instance. + * @param coreFileSystem filesystem + * @param credentials initial set of credentials -null is permitted + */ + public AbstractLauncher( + CoreFileSystem coreFileSystem, + Credentials credentials) { + this.coreFileSystem = coreFileSystem; + this.credentials = credentials != null ? credentials: new Credentials(); + } + + public void setYarnDockerMode(boolean yarnDockerMode){ + this.yarnDockerMode = yarnDockerMode; + } + + /** + * Get the env vars to work on + * @return env vars + */ + public Map getEnv() { + return envVars; + } + + /** + * Get the launch commands. + * @return the live list of commands + */ + public List getCommands() { + return commands; + } + + public void addLocalResource(String subPath, LocalResource resource) { + localResources.put(subPath, resource); + } + + public void addLocalResource(String subPath, LocalResource resource, String mountPath) { + localResources.put(subPath, resource); + mountPaths.put(subPath, mountPath); + } + + /** + * Accessor to the credentials + * @return the credentials associated with this launcher + */ + public Credentials getCredentials() { + return credentials; + } + + + public void addCommand(String cmd) { + commands.add(cmd); + } + + /** + * Complete the launch context (copy in env vars, etc). + * @return the container to launch + */ + public ContainerLaunchContext completeContainerLaunch() throws IOException { + + String cmdStr = SliderUtils.join(commands, " ", false); + log.debug("Completed setting up container command {}", cmdStr); + containerLaunchContext.setCommands(commands); + + //env variables + if (log.isDebugEnabled()) { + log.debug("Environment variables"); + for (Map.Entry envPair : envVars.entrySet()) { + log.debug(" \"{}\"=\"{}\"", envPair.getKey(), envPair.getValue()); + } + } + containerLaunchContext.setEnvironment(envVars); + + //service data + if (log.isDebugEnabled()) { + log.debug("Service Data size"); + for (Map.Entry entry : serviceData.entrySet()) { + log.debug("\"{}\"=> {} bytes of data", entry.getKey(), + entry.getValue().array().length); + } + } + containerLaunchContext.setServiceData(serviceData); + + // resources + dumpLocalResources(); + containerLaunchContext.setLocalResources(localResources); + + //tokens + log.debug("{} tokens", credentials.numberOfTokens()); + containerLaunchContext.setTokens(CredentialUtils.marshallCredentials( + credentials)); + + if(yarnDockerMode){ + Map env = containerLaunchContext.getEnvironment(); + env.put("YARN_CONTAINER_RUNTIME_TYPE", "docker"); + env.put("YARN_CONTAINER_RUNTIME_DOCKER_IMAGE", dockerImage); + env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK", dockerNetwork); + env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME", + dockerHostname); + env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER", runPrivilegedContainer); + StringBuilder sb = new StringBuilder(); + for (Entry mount : mountPaths.entrySet()) { + if (sb.length() > 0) { + sb.append(","); + } + sb.append(mount.getKey()); + sb.append(":"); + sb.append(mount.getValue()); + } + env.put("YARN_CONTAINER_RUNTIME_DOCKER_LOCAL_RESOURCE_MOUNTS", sb.toString()); + log.info("yarn docker env var has been set {}", containerLaunchContext.getEnvironment().toString()); + } + + return containerLaunchContext; + } + + public void setRetryContext(int maxRetries, int retryInterval) { + ContainerRetryContext retryContext = ContainerRetryContext + .newInstance(ContainerRetryPolicy.RETRY_ON_ALL_ERRORS, null, maxRetries, + retryInterval); + containerLaunchContext.setContainerRetryContext(retryContext); + } + + /** + * Dump local resources at debug level + */ + private void dumpLocalResources() { + if (log.isDebugEnabled()) { + log.debug("{} resources: ", localResources.size()); + for (Map.Entry entry : localResources.entrySet()) { + + String key = entry.getKey(); + LocalResource val = entry.getValue(); + log.debug(key + "=" + SliderUtils.stringify(val.getResource())); + } + } + } + + /** + * This is critical for an insecure cluster -it passes + * down the username to YARN, and so gives the code running + * in containers the rights it needs to work with + * data. + * @throws IOException problems working with current user + */ + protected void propagateUsernameInInsecureCluster() throws IOException { + //insecure cluster: propagate user name via env variable + String userName = UserGroupInformation.getCurrentUser().getUserName(); + envVars.put(YarnServiceConstants.HADOOP_USER_NAME, userName); + } + + /** + * Utility method to set up the classpath + * @param classpath classpath to use + */ + public void setClasspath(ClasspathConstructor classpath) { + setEnv(CLASSPATH, classpath.buildClasspath()); + } + + /** + * Set an environment variable in the launch context + * @param var variable name + * @param value value (must be non null) + */ + public void setEnv(String var, String value) { + Preconditions.checkArgument(var != null, "null variable name"); + Preconditions.checkArgument(value != null, "null value"); + envVars.put(var, value); + } + + + public void putEnv(Map map) { + envVars.putAll(map); + } + + + public void setDockerImage(String dockerImage) { + this.dockerImage = dockerImage; + } + + public void setDockerNetwork(String dockerNetwork) { + this.dockerNetwork = dockerNetwork; + } + + public void setDockerHostname(String dockerHostname) { + this.dockerHostname = dockerHostname; + } + + public void setRunPrivilegedContainer(boolean runPrivilegedContainer) { + if (runPrivilegedContainer) { + this.runPrivilegedContainer = Boolean.toString(true); + } else { + this.runPrivilegedContainer = Boolean.toString(false); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ClasspathConstructor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ClasspathConstructor.java new file mode 100644 index 0000000..22b3877 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ClasspathConstructor.java @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.utils.SliderUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * build a classpath -allows for entries to be injected in front of + * YARN classpath as well as behind, adds appropriate separators, + * extraction of local classpath, etc. + */ +public class ClasspathConstructor { + + public static final String CLASS_PATH_SEPARATOR = ApplicationConstants.CLASS_PATH_SEPARATOR; + private final List pathElements = new ArrayList<>(); + + public ClasspathConstructor() { + } + + + /** + * Get the list of JARs from the YARN settings + * @param config configuration + */ + public List yarnApplicationClasspath(Configuration config) { + String[] cp = config.getTrimmedStrings( + YarnConfiguration.YARN_APPLICATION_CLASSPATH, + YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH); + return cp != null ? Arrays.asList(cp) : new ArrayList(0); + + } + + + @Override + public String toString() { + return buildClasspath(); + } + + public String buildClasspath() { + return SliderUtils.join(pathElements, + CLASS_PATH_SEPARATOR, + false); + } + + /** + * Get a copy of the path list + * @return the JARs + */ + public List getPathElements() { + return Collections.unmodifiableList(pathElements); + } + + /** + * Append an entry + * @param path path + */ + public void append(String path) { + pathElements.add(path); + } + + /** + * Insert a path at the front of the list. This places it ahead of + * the standard YARN artifacts + * @param path path to the JAR. Absolute or relative -on the target + * system + */ + public void insert(String path) { + pathElements.add(0, path); + } + + public void appendAll(Collection paths) { + pathElements.addAll(paths); + } + + public void insertAll(Collection paths) { + pathElements.addAll(0, paths); + } + + + public void addLibDir(String pathToLibDir) { + append(buildLibDir(pathToLibDir)); + } + + public void insertLibDir(String pathToLibDir) { + insert(buildLibDir(pathToLibDir)); + } + + public void addClassDirectory(String pathToDir) { + append(appendDirectoryTerminator(pathToDir)); + } + + public void insertClassDirectory(String pathToDir) { + insert(buildLibDir(appendDirectoryTerminator(pathToDir))); + } + + + public void addRemoteClasspathEnvVar() { + append(ApplicationConstants.Environment.CLASSPATH.$$()); + } + + + public void insertRemoteClasspathEnvVar() { + append(ApplicationConstants.Environment.CLASSPATH.$$()); + } + + + /** + * Build a lib dir path + * @param pathToLibDir path to the directory; may or may not end with a + * trailing space + * @return a path to a lib dir that is compatible with the java classpath + */ + public String buildLibDir(String pathToLibDir) { + String dir = appendDirectoryTerminator(pathToLibDir); + dir += "*"; + return dir; + } + + private String appendDirectoryTerminator(String pathToLibDir) { + String dir = pathToLibDir.trim(); + if (!dir.endsWith("/")) { + dir += "/"; + } + return dir; + } + + /** + * Split a classpath. This uses the local path separator so MUST NOT + * be used to work with remote classpaths + * @param localpath local path + * @return a splite + */ + public Collection splitClasspath(String localpath) { + String separator = System.getProperty("path.separator"); + return StringUtils.getStringCollection(localpath, separator); + } + + /** + * Get the local JVM classpath split up + * @return the list of entries on the JVM classpath env var + */ + public Collection localJVMClasspath() { + return splitClasspath(System.getProperty("java.class.path")); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java new file mode 100644 index 0000000..7baa284 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.service.utils.SliderUtils; + +import java.util.ArrayList; +import java.util.List; + +/** + * Build a single command line to include in the container commands; + * Special support for JVM command buildup. + */ +public class CommandLineBuilder { + protected final List argumentList = new ArrayList<>(20); + + /** + * Add an entry to the command list + * @param args arguments -these will be converted strings + */ + public void add(Object... args) { + for (Object arg : args) { + argumentList.add(arg.toString()); + } + } + + // Get the number of arguments + public int size() { + return argumentList.size(); + } + + /** + * Append the output and error files to the tail of the command + * @param stdout out + * @param stderr error. Set this to null to append into stdout + */ + public void addOutAndErrFiles(String stdout, String stderr) { + Preconditions.checkNotNull(stdout, "Null output file"); + Preconditions.checkState(!stdout.isEmpty(), "output filename invalid"); + // write out the path output + argumentList.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" + + stdout); + if (stderr != null) { + argumentList.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/" + + stderr); + } else { + argumentList.add("2>&1"); + } + } + + /** + * This just returns the command line + * @see #build() + * @return the command line + */ + @Override + public String toString() { + return build(); + } + + /** + * Build the command line + * @return the command line + */ + public String build() { + return SliderUtils.join(argumentList, " "); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java new file mode 100644 index 0000000..0e51a62 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.provider.ProviderService; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +public class ContainerLaunchService extends AbstractService{ + + protected static final Logger LOG = + LoggerFactory.getLogger(ContainerLaunchService.class); + + private ExecutorService executorService; + private SliderFileSystem fs; + + public ContainerLaunchService(SliderFileSystem fs) { + super(ContainerLaunchService.class.getName()); + this.fs = fs; + } + + @Override + public void serviceInit(Configuration conf) throws Exception { + executorService = Executors.newCachedThreadPool(); + super.serviceInit(conf); + } + + @Override + protected void serviceStop() throws Exception { + if (executorService != null) { + executorService.shutdownNow(); + } + super.serviceStop(); + } + + public void launchCompInstance(Service service, + ComponentInstance instance, Container container) { + ContainerLauncher launcher = + new ContainerLauncher(service, instance, container); + executorService.execute(launcher); + } + + private class ContainerLauncher implements Runnable { + public final Container container; + public final Service service; + public ComponentInstance instance; + + public ContainerLauncher( + Service service, + ComponentInstance instance, Container container) { + this.container = container; + this.service = service; + this.instance = instance; + } + + @Override public void run() { + Component compSpec = instance.getCompSpec(); + ProviderService provider = ProviderFactory.getProviderService( + compSpec.getArtifact()); + AbstractLauncher launcher = new AbstractLauncher(fs, null); + try { + provider.buildContainerLaunchContext(launcher, service, + instance, fs, getConfig()); + instance.getComponent().getScheduler().getNmClient() + .startContainerAsync(container, + launcher.completeContainerLaunch()); + } catch (Exception e) { + LOG.error(instance.getCompInstanceId() + + ": Failed to launch container. ", e); + + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CredentialUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CredentialUtils.java new file mode 100644 index 0000000..fce58e5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CredentialUtils.java @@ -0,0 +1,319 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; +import org.apache.hadoop.yarn.client.ClientRMProxy; +import org.apache.hadoop.yarn.client.api.TimelineClient; +import org.apache.hadoop.yarn.client.api.YarnClient; +import org.apache.hadoop.yarn.conf.HAUtil; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.text.DateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.Iterator; +import java.util.List; + +import static org.apache.hadoop.yarn.conf.YarnConfiguration.*; + +/** + * Utils to work with credentials and tokens. + * + * Designed to be movable to Hadoop core + */ +public final class CredentialUtils { + + private CredentialUtils() { + } + + private static final Logger LOG = + LoggerFactory.getLogger(CredentialUtils.class); + + /** + * Save credentials to a byte buffer. Returns null if there were no + * credentials to save + * @param credentials credential set + * @return a byte buffer of serialized tokens + * @throws IOException if the credentials could not be written to the stream + */ + public static ByteBuffer marshallCredentials(Credentials credentials) throws IOException { + ByteBuffer buffer = null; + if (!credentials.getAllTokens().isEmpty()) { + DataOutputBuffer dob = new DataOutputBuffer(); + try { + credentials.writeTokenStorageToStream(dob); + } finally { + dob.close(); + } + buffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + } + return buffer; + } + + /** + * Save credentials to a file + * @param file file to save to (will be overwritten) + * @param credentials credentials to write + * @throws IOException + */ + public static void saveTokens(File file, + Credentials credentials) throws IOException { + try(DataOutputStream daos = new DataOutputStream( + new FileOutputStream(file))) { + credentials.writeTokenStorageToStream(daos); + } + } + + /** + * Look up and return the resource manager's principal. This method + * automatically does the _HOST replacement in the principal and + * correctly handles HA resource manager configurations. + * + * From: YARN-4629 + * @param conf the {@link Configuration} file from which to read the + * principal + * @return the resource manager's principal string + * @throws IOException thrown if there's an error replacing the host name + */ + public static String getRMPrincipal(Configuration conf) throws IOException { + String principal = conf.get(RM_PRINCIPAL, ""); + String hostname; + Preconditions.checkState(!principal.isEmpty(), "Not set: " + RM_PRINCIPAL); + + if (HAUtil.isHAEnabled(conf)) { + YarnConfiguration yarnConf = new YarnConfiguration(conf); + if (yarnConf.get(RM_HA_ID) == null) { + // If RM_HA_ID is not configured, use the first of RM_HA_IDS. + // Any valid RM HA ID should work. + String[] rmIds = yarnConf.getStrings(RM_HA_IDS); + Preconditions.checkState((rmIds != null) && (rmIds.length > 0), + "Not set " + RM_HA_IDS); + yarnConf.set(RM_HA_ID, rmIds[0]); + } + + hostname = yarnConf.getSocketAddr( + RM_ADDRESS, + DEFAULT_RM_ADDRESS, + DEFAULT_RM_PORT).getHostName(); + } else { + hostname = conf.getSocketAddr( + RM_ADDRESS, + DEFAULT_RM_ADDRESS, + DEFAULT_RM_PORT).getHostName(); + } + return SecurityUtil.getServerPrincipal(principal, hostname); + } + + /** + * Create and add any filesystem delegation tokens with + * the RM(s) configured to be able to renew them. Returns null + * on an insecure cluster (i.e. harmless) + * @param conf configuration + * @param fs filesystem + * @param credentials credentials to update + * @return a list of all added tokens. + * @throws IOException + */ + public static Token[] addRMRenewableFSDelegationTokens(Configuration conf, + FileSystem fs, + Credentials credentials) throws IOException { + Preconditions.checkArgument(conf != null); + Preconditions.checkArgument(credentials != null); + if (UserGroupInformation.isSecurityEnabled()) { + return fs.addDelegationTokens(CredentialUtils.getRMPrincipal(conf), + credentials); + } + return null; + } + + /** + * Add an FS delegation token which can be renewed by the current user + * @param fs filesystem + * @param credentials credentials to update + * @throws IOException problems. + */ + public static void addSelfRenewableFSDelegationTokens( + FileSystem fs, + Credentials credentials) throws IOException { + Preconditions.checkArgument(fs != null); + Preconditions.checkArgument(credentials != null); + fs.addDelegationTokens( + getSelfRenewer(), + credentials); + } + + public static String getSelfRenewer() throws IOException { + return UserGroupInformation.getLoginUser().getShortUserName(); + } + + /** + * Create and add an RM delegation token to the credentials + * @param yarnClient Yarn Client + * @param credentials to add token to + * @return the token which was added + * @throws IOException + * @throws YarnException + */ + public static Token addRMDelegationToken(YarnClient yarnClient, + Credentials credentials) + throws IOException, YarnException { + Configuration conf = yarnClient.getConfig(); + Text rmPrincipal = new Text(CredentialUtils.getRMPrincipal(conf)); + Text rmDTService = ClientRMProxy.getRMDelegationTokenService(conf); + Token rmDelegationToken = + ConverterUtils.convertFromYarn( + yarnClient.getRMDelegationToken(rmPrincipal), + rmDTService); + credentials.addToken(rmDelegationToken.getService(), rmDelegationToken); + return rmDelegationToken; + } + + public static Token maybeAddTimelineToken( + Configuration conf, + Credentials credentials) + throws IOException, YarnException { + if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false)) { + LOG.debug("Timeline service enabled -fetching token"); + + try(TimelineClient timelineClient = TimelineClient.createTimelineClient()) { + timelineClient.init(conf); + timelineClient.start(); + Token token = + timelineClient.getDelegationToken( + CredentialUtils.getRMPrincipal(conf)); + credentials.addToken(token.getService(), token); + return token; + } + } else { + LOG.debug("Timeline service is disabled"); + return null; + } + } + + /** + * Filter a list of tokens from a set of credentials + * @param credentials credential source (a new credential set os re + * @param filter List of tokens to strip out + * @return a new, filtered, set of credentials + */ + public static Credentials filterTokens(Credentials credentials, + List filter) { + Credentials result = new Credentials(credentials); + Iterator> iter = + result.getAllTokens().iterator(); + while (iter.hasNext()) { + Token token = iter.next(); + LOG.debug("Token {}", token.getKind()); + if (filter.contains(token.getKind())) { + LOG.debug("Filtering token {}", token.getKind()); + iter.remove(); + } + } + return result; + } + + public static String dumpTokens(Credentials credentials, String separator) { + ArrayList> sorted = + new ArrayList<>(credentials.getAllTokens()); + Collections.sort(sorted, new TokenComparator()); + StringBuilder buffer = new StringBuilder(sorted.size()* 128); + for (Token token : sorted) { + buffer.append(tokenToString(token)).append(separator); + } + return buffer.toString(); + } + + /** + * Create a string for people to look at + * @param token token to convert to a string form + * @return a printable view of the token + */ + public static String tokenToString(Token token) { + DateFormat df = DateFormat.getDateTimeInstance( + DateFormat.SHORT, DateFormat.SHORT); + StringBuilder buffer = new StringBuilder(128); + buffer.append(token.toString()); + try { + TokenIdentifier ti = token.decodeIdentifier(); + buffer.append("; ").append(ti); + if (ti instanceof AbstractDelegationTokenIdentifier) { + // details in human readable form, and compensate for information HDFS DT omits + AbstractDelegationTokenIdentifier dt = (AbstractDelegationTokenIdentifier) ti; + buffer.append("; Renewer: ").append(dt.getRenewer()); + buffer.append("; Issued: ") + .append(df.format(new Date(dt.getIssueDate()))); + buffer.append("; Max Date: ") + .append(df.format(new Date(dt.getMaxDate()))); + } + } catch (IOException e) { + //marshall problem; not ours + LOG.debug("Failed to decode {}: {}", token, e, e); + } + return buffer.toString(); + } + + /** + * Get the expiry time of a token. + * @param token token to examine + * @return the time in milliseconds after which the token is invalid. + * @throws IOException + */ + public static long getTokenExpiryTime(Token token) throws IOException { + TokenIdentifier identifier = token.decodeIdentifier(); + Preconditions.checkState(identifier instanceof AbstractDelegationTokenIdentifier, + "Token %s of type: %s has an identifier which cannot be examined: %s", + token, token.getClass(), identifier); + AbstractDelegationTokenIdentifier id = + (AbstractDelegationTokenIdentifier) identifier; + return id.getMaxDate(); + } + + private static class TokenComparator + implements Comparator>, Serializable { + @Override + public int compare(Token left, + Token right) { + return left.getKind().toString().compareTo(right.getKind().toString()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java new file mode 100644 index 0000000..cbcb0d6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.containerlaunch; + + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; + +import java.util.Map; + +/** + * Command line builder purely for the Java CLI. + * Some of the define methods are designed to work with Hadoop tool and + * Slider launcher applications. + */ +public class JavaCommandLineBuilder extends CommandLineBuilder { + + public JavaCommandLineBuilder() { + add(getJavaBinary()); + } + + /** + * Get the java binary. This is called in the constructor so don't try and + * do anything other than return a constant. + * @return the path to the Java binary + */ + protected String getJavaBinary() { + return ApplicationConstants.Environment.JAVA_HOME.$$() + "/bin/java"; + } + + /** + * Set the size of the heap if a non-empty heap is passed in. + * @param heap empty string or something like "128M" ,"1G" etc. The value is + * trimmed. + */ + public void setJVMHeap(String heap) { + if (SliderUtils.isSet(heap)) { + add("-Xmx" + heap.trim()); + } + } + + /** + * Turn Java assertions on + */ + public void enableJavaAssertions() { + add("-ea"); + add("-esa"); + } + + /** + * Add a system property definition -must be used before setting the main entry point + * @param property + * @param value + */ + public void sysprop(String property, String value) { + Preconditions.checkArgument(property != null, "null property name"); + Preconditions.checkArgument(value != null, "null value"); + add("-D" + property + "=" + value); + } + + public JavaCommandLineBuilder forceIPv4() { + sysprop("java.net.preferIPv4Stack", "true"); + return this; + } + + public JavaCommandLineBuilder headless() { + sysprop("java.awt.headless", "true"); + return this; + } + + public boolean addConfOption(Configuration conf, String key) { + return defineIfSet(key, conf.get(key)); + } + + /** + * Add a varargs list of configuration parameters —if they are present + * @param conf configuration source + * @param keys keys + */ + public void addConfOptions(Configuration conf, String... keys) { + for (String key : keys) { + addConfOption(conf, key); + } + } + + /** + * Add all configuration options which match the prefix + * @param conf configuration + * @param prefix prefix, e.g {@code "slider."} + * @return the number of entries copied + */ + public int addPrefixedConfOptions(Configuration conf, String prefix) { + int copied = 0; + for (Map.Entry entry : conf) { + if (entry.getKey().startsWith(prefix)) { + define(entry.getKey(), entry.getValue()); + copied++; + } + } + return copied; + } + + /** + * Ass a configuration option to the command line of the application + * @param conf configuration + * @param key key + * @param defVal default value + * @return the resolved configuration option + * @throws IllegalArgumentException if key is null or the looked up value + * is null (that is: the argument is missing and devVal was null. + */ + public String addConfOptionToCLI(Configuration conf, + String key, + String defVal) { + Preconditions.checkArgument(key != null, "null key"); + String val = conf.get(key, defVal); + define(key, val); + return val; + } + + /** + * Add a -D key=val command to the CLI. This is very Hadoop API + * @param key key + * @param val value + * @throws IllegalArgumentException if either argument is null + */ + public void define(String key, String val) { + Preconditions.checkArgument(key != null, "null key"); + Preconditions.checkArgument(val != null, "null value"); + add("-D", key + "=" + val); + } + + /** + * Add a -D key=val command to the CLI if val + * is not null + * @param key key + * @param val value + */ + public boolean defineIfSet(String key, String val) { + Preconditions.checkArgument(key != null, "null key"); + if (val != null) { + define(key, val); + return true; + } else { + return false; + } + } + + /** + * Add a mandatory config option + * @param conf configuration + * @param key key + * @throws BadConfigException if the key is missing + */ + public void addMandatoryConfOption(Configuration conf, + String key) throws BadConfigException { + if (!addConfOption(conf, key)) { + throw new BadConfigException("Missing configuration option: " + key); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadClusterStateException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadClusterStateException.java new file mode 100644 index 0000000..db9de7a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadClusterStateException.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +import org.apache.hadoop.yarn.service.exceptions.SliderException; + +/** + * The system is in a bad state + */ +public class BadClusterStateException extends SliderException { + public BadClusterStateException(String message, + Object... args) { + super(EXIT_BAD_STATE, message, args); + } + + public BadClusterStateException(Throwable throwable, + String message, Object... args) { + super(EXIT_BAD_STATE, throwable, message, args); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadCommandArgumentsException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadCommandArgumentsException.java new file mode 100644 index 0000000..41e3251 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadCommandArgumentsException.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +public class BadCommandArgumentsException extends SliderException { + public BadCommandArgumentsException(String s, Object... args) { + super(EXIT_COMMAND_ARGUMENT_ERROR, s, args); + } + + public BadCommandArgumentsException(Throwable throwable, String message, + Object... args) { + super(EXIT_COMMAND_ARGUMENT_ERROR, throwable, message, args); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadConfigException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadConfigException.java new file mode 100644 index 0000000..8199c3c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/BadConfigException.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +/** + * An exception to raise on a bad configuration + */ +public class BadConfigException extends SliderException { + + public BadConfigException(String s) { + super(EXIT_BAD_CONFIGURATION, s); + } + + public BadConfigException(String message, Object... args) { + super(EXIT_BAD_CONFIGURATION, message, args); + } + + public BadConfigException( + Throwable throwable, + String message, Object... args) { + super(EXIT_BAD_CONFIGURATION, throwable, message, args); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java new file mode 100644 index 0000000..83658c8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ErrorStrings.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +public interface ErrorStrings { + + String PRINTF_E_INSTANCE_ALREADY_EXISTS = "Service Instance \"%s\" already exists and is defined in %s"; + String PRINTF_E_INSTANCE_DIR_ALREADY_EXISTS = "Service Instance dir already exists: %s"; + + /** + * ERROR Strings + */ + String ERROR_NO_ACTION = "No action specified"; + String ERROR_UNKNOWN_ACTION = "Unknown command: "; + String ERROR_NOT_ENOUGH_ARGUMENTS = + "Not enough arguments for action: "; + String ERROR_PARSE_FAILURE = + "Failed to parse "; + /** + * All the remaining values after argument processing + */ + String ERROR_TOO_MANY_ARGUMENTS = + "Too many arguments"; + String ERROR_DUPLICATE_ENTRY = "Duplicate entry for "; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ExitCodeProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ExitCodeProvider.java new file mode 100644 index 0000000..d66b860 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ExitCodeProvider.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +/** + * Get the exit code of an exception. Making it an interface allows + * us to retrofit exit codes onto existing classes + */ +public interface ExitCodeProvider { + + /** + * Method to get the exit code + * @return the exit code + */ + int getExitCode(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/LauncherExitCodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/LauncherExitCodes.java new file mode 100644 index 0000000..483fb48 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/LauncherExitCodes.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +/* + * Common Exit codes + *

+ * Exit codes from 64 up are service specific. + *

+ * Many of the exit codes are designed to resemble HTTP error codes, + * squashed into a single byte. e.g 44 , "not found" is the equivalent + * of 404 + *

+ *    0-10: general command issues
+ *   30-39: equivalent to the 3XX responses, where those responses are
+ *          considered errors by the service.
+ *   40-49: request-related errors
+ *   50-59: server-side problems. These may be triggered by the request.
+ *   64-  : service specific error codes
+ * 
+ */ +public interface LauncherExitCodes { + + /** + * 0: success + */ + int EXIT_SUCCESS = 0; + + /** + * -1: generic "false" response. The operation worked but + * the result was not true + */ + int EXIT_FALSE = -1; + + /** + * Exit code when a client requested service termination: {@value} + */ + int EXIT_CLIENT_INITIATED_SHUTDOWN = 1; + + /** + * Exit code when targets could not be launched: {@value} + */ + int EXIT_TASK_LAUNCH_FAILURE = 2; + + /** + * Exit code when a control-C, kill -3, signal was picked up: {@value} + */ + int EXIT_INTERRUPTED = 3; + + /** + * Exit code when a usage message was printed: {@value} + */ + int EXIT_USAGE = 4; + + /** + * Exit code when something happened but we can't be specific: {@value} + */ + int EXIT_OTHER_FAILURE = 5; + + /** + * Exit code on connectivity problems: {@value} + */ + int EXIT_MOVED = 31; + + /** + * found: {@value}. + *

+ * This is low value as in HTTP it is normally a success/redirect; + * whereas on the command line 0 is the sole success code. + *

+ * 302 Found + */ + int EXIT_FOUND = 32; + + /** + * Exit code on a request where the destination has not changed + * and (somehow) the command specified that this is an error. + * That is, this exit code is somehow different from a "success" + * : {@value} + *

+ * 304 Not Modified + */ + int EXIT_NOT_MODIFIED = 34; + + /** + * Exit code when the command line doesn't parse: {@value}, or + * when it is otherwise invalid. + *

+ * 400 BAD REQUEST + */ + int EXIT_COMMAND_ARGUMENT_ERROR = 40; + + /** + * The request requires user authentication: {@value} + *

+ * 401 Unauthorized + */ + int EXIT_UNAUTHORIZED = 41; + + /** + * Forbidden action: {@value} + *

+ * 403: Forbidden + */ + int EXIT_FORBIDDEN = 43; + + /** + * Something was not found: {@value} + *

+ * 404: NOT FOUND + */ + int EXIT_NOT_FOUND = 44; + + /** + * The operation is not allowed: {@value} + *

+ * 405: NOT ALLOWED + */ + int EXIT_OPERATION_NOT_ALLOWED = 45; + + /** + * The command is somehow not acceptable: {@value} + *

+ * 406: NOT ACCEPTABLE + */ + int EXIT_NOT_ACCEPTABLE = 46; + + /** + * Exit code on connectivity problems: {@value} + *

+ * 408: Request Timeout + */ + int EXIT_CONNECTIVITY_PROBLEM = 48; + + /** + * The request could not be completed due to a conflict with the current + * state of the resource. {@value} + *

+ * 409: conflict + */ + int EXIT_CONFLICT = 49; + + /** + * internal error: {@value} + *

+ * 500 Internal Server Error + */ + int EXIT_INTERNAL_ERROR = 50; + + /** + * Unimplemented feature: {@value} + *

+ * 501: Not Implemented + */ + int EXIT_UNIMPLEMENTED = 51; + + /** + * Service Unavailable; it may be available later: {@value} + *

+ * 503 Service Unavailable + */ + int EXIT_SERVICE_UNAVAILABLE = 53; + + /** + * The service does not support, or refuses to support this version: {@value}. + * If raised, this is expected to be raised server-side and likely due + * to client/server version incompatibilities. + *

+ * 505: Version Not Supported + */ + int EXIT_UNSUPPORTED_VERSION = 55; + + /** + * Exit code when an exception was thrown from the service: {@value} + *

+ * 5XX + */ + int EXIT_EXCEPTION_THROWN = 56; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java new file mode 100644 index 0000000..ef22b57 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +public interface RestApiErrorMessages { + String ERROR_APPLICATION_NAME_INVALID = + "Service name is either empty or not provided"; + String ERROR_APPLICATION_NAME_INVALID_FORMAT = + "Service name %s is not valid - only lower case letters, digits, " + + "and hyphen are allowed, and the name must be no more " + + "than 63 characters"; + String ERROR_COMPONENT_NAME_INVALID = + "Component name must be no more than %s characters: %s"; + String ERROR_USER_NAME_INVALID = + "User name must be no more than 63 characters"; + + String ERROR_APPLICATION_NOT_RUNNING = "Service not running"; + String ERROR_APPLICATION_DOES_NOT_EXIST = "Service not found"; + String ERROR_APPLICATION_IN_USE = "Service already exists in started" + + " state"; + String ERROR_APPLICATION_INSTANCE_EXISTS = "Service already exists in" + + " stopped/failed state (either restart with PUT or destroy with DELETE" + + " before creating a new one)"; + + String ERROR_SUFFIX_FOR_COMPONENT = + " for component %s (nor at the global level)"; + String ERROR_ARTIFACT_INVALID = "Artifact is not provided"; + String ERROR_ARTIFACT_FOR_COMP_INVALID = + ERROR_ARTIFACT_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + String ERROR_ARTIFACT_ID_INVALID = + "Artifact id (like docker image name) is either empty or not provided"; + String ERROR_ARTIFACT_ID_FOR_COMP_INVALID = + ERROR_ARTIFACT_ID_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + + String ERROR_RESOURCE_INVALID = "Resource is not provided"; + String ERROR_RESOURCE_FOR_COMP_INVALID = + ERROR_RESOURCE_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + String ERROR_RESOURCE_MEMORY_INVALID = + "Service resource or memory not provided"; + String ERROR_RESOURCE_CPUS_INVALID = + "Service resource or cpus not provided"; + String ERROR_RESOURCE_CPUS_INVALID_RANGE = + "Unacceptable no of cpus specified, either zero or negative"; + String ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID = + ERROR_RESOURCE_MEMORY_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID = + ERROR_RESOURCE_CPUS_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE = + ERROR_RESOURCE_CPUS_INVALID_RANGE + + " for component %s (or at the global level)"; + String ERROR_CONTAINERS_COUNT_INVALID = + "Invalid no of containers specified"; + String ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID = + ERROR_CONTAINERS_COUNT_INVALID + ERROR_SUFFIX_FOR_COMPONENT; + String ERROR_DEPENDENCY_INVALID = "Dependency %s for component %s is " + + "invalid, does not exist as a component"; + String ERROR_DEPENDENCY_CYCLE = "Invalid dependencies, a cycle may " + + "exist: %s"; + + String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED = + "Cannot specify" + " cpus/memory along with profile"; + String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED = + ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED + + " for component %s"; + String ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET = + "Resource profile is not " + "supported yet. Please specify cpus/memory."; + + String ERROR_NULL_ARTIFACT_ID = + "Artifact Id can not be null if artifact type is none"; + String ERROR_ABSENT_NUM_OF_INSTANCE = + "Num of instances should appear either globally or per component"; + String ERROR_ABSENT_LAUNCH_COMMAND = + "Launch_command is required when type is not DOCKER"; + + String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at" + + " component level, needs corresponding values set at service level"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ServiceLaunchException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ServiceLaunchException.java new file mode 100644 index 0000000..e83ccbe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/ServiceLaunchException.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + + +import org.apache.hadoop.yarn.exceptions.YarnException; + +/** + * A service launch exception that includes an exit code; + * when caught by the ServiceLauncher, it will convert that + * into a process exit code. + */ +public class ServiceLaunchException extends YarnException + implements ExitCodeProvider, LauncherExitCodes { + + private final int exitCode; + + /** + * Create an exception with the specific exit code + * @param exitCode exit code + * @param cause cause of the exception + */ + public ServiceLaunchException(int exitCode, Throwable cause) { + super(cause); + this.exitCode = exitCode; + } + + /** + * Create an exception with the specific exit code and text + * @param exitCode exit code + * @param message message to use in exception + */ + public ServiceLaunchException(int exitCode, String message) { + super(message); + this.exitCode = exitCode; + } + + /** + * Create an exception with the specific exit code, text and cause + * @param exitCode exit code + * @param message message to use in exception + * @param cause cause of the exception + */ + public ServiceLaunchException(int exitCode, String message, Throwable cause) { + super(message, cause); + this.exitCode = exitCode; + } + + /** + * Get the exit code + * @return the exit code + */ + @Override + public int getExitCode() { + return exitCode; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/SliderException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/SliderException.java new file mode 100644 index 0000000..5b74b80 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/SliderException.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +import org.apache.hadoop.yarn.service.conf.SliderExitCodes; + +public class SliderException extends ServiceLaunchException implements + SliderExitCodes { + public SliderException() { + super(EXIT_EXCEPTION_THROWN, "SliderException"); + } + + public SliderException(int code, String message) { + super(code, message); + } + + public SliderException(String s) { + super(EXIT_EXCEPTION_THROWN, s); + } + + public SliderException(String s, Throwable throwable) { + super(EXIT_EXCEPTION_THROWN, s, throwable); + } + + /** + * Format the exception as you create it + * @param code exit code + * @param message exception message -sprintf formatted + * @param args arguments for the formatting + */ + public SliderException(int code, String message, Object... args) { + super(code, String.format(message, args)); + } + + /** + * Format the exception, include a throwable. + * The throwable comes before the message so that it is out of the varargs + * @param code exit code + * @param throwable thrown + * @param message message + * @param args arguments + */ + public SliderException(int code, + Throwable throwable, + String message, + Object... args) { + super(code, String.format(message, args), throwable); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/UsageException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/UsageException.java new file mode 100644 index 0000000..3a9fa25 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/UsageException.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.exceptions; + +/** + * Used to raise a usage exception ... this has the exit code + * {@link #EXIT_USAGE} + */ +public class UsageException extends SliderException { + public UsageException(String s, Object... args) { + super(EXIT_USAGE, s, args); + } + + public UsageException(Throwable throwable, String message, + Object... args) { + super(EXIT_USAGE, throwable, message, args); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java new file mode 100644 index 0000000..33e33a6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/client/ClientAMProtocolPBClientImpl.java @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.impl.pb.client; + +import com.google.protobuf.ServiceException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.ipc.RPCUtil; +import org.apache.hadoop.yarn.service.ClientAMProtocol; + +import java.io.Closeable; +import java.io.IOException; +import java.net.InetSocketAddress; + +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto; +import org.apache.hadoop.yarn.service.impl.pb.service.ClientAMProtocolPB; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto; + +public class ClientAMProtocolPBClientImpl + implements ClientAMProtocol, Closeable { + + private ClientAMProtocolPB proxy; + + public ClientAMProtocolPBClientImpl(long clientVersion, + InetSocketAddress addr, Configuration conf) throws IOException { + RPC.setProtocolEngine(conf, ClientAMProtocolPB.class, + ProtobufRpcEngine.class); + proxy = RPC.getProxy(ClientAMProtocolPB.class, clientVersion, addr, conf); + + } + + @Override public FlexComponentsResponseProto flexComponents( + FlexComponentsRequestProto request) throws IOException, YarnException { + try { + return proxy.flexComponents(null, request); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + } + return null; + } + + @Override + public GetStatusResponseProto getStatus(GetStatusRequestProto request) + throws IOException, YarnException { + try { + return proxy.getStatus(null, request); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + } + return null; + } + + @Override + public StopResponseProto stop(StopRequestProto requestProto) + throws IOException, YarnException { + try { + return proxy.stop(null, requestProto); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + } + return null; + } + + @Override public void close() { + if (this.proxy != null) { + RPC.stopProxy(this.proxy); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPB.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPB.java new file mode 100644 index 0000000..6a9cd37 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPB.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.impl.pb.service; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.yarn.proto.ClientAMProtocol; + +@ProtocolInfo( + protocolName = "org.apache.hadoop.yarn.service.ClientAMProtocol", + protocolVersion = 1) +public interface ClientAMProtocolPB extends + ClientAMProtocol.ClientAMProtocolService.BlockingInterface { +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPBServiceImpl.java new file mode 100644 index 0000000..7100781 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/impl/pb/service/ClientAMProtocolPBServiceImpl.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.impl.pb.service; + +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.FlexComponentsResponseProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusRequestProto; +import org.apache.hadoop.yarn.proto.ClientAMProtocol.GetStatusResponseProto; +import org.apache.hadoop.yarn.service.ClientAMProtocol; + +import java.io.IOException; + +public class ClientAMProtocolPBServiceImpl implements ClientAMProtocolPB { + + private ClientAMProtocol real; + + public ClientAMProtocolPBServiceImpl(ClientAMProtocol impl) { + this.real = impl; + } + + @Override + public FlexComponentsResponseProto flexComponents(RpcController controller, + FlexComponentsRequestProto request) throws ServiceException { + try { + return real.flexComponents(request); + } catch (IOException | YarnException e) { + throw new ServiceException(e); + } + } + + @Override public GetStatusResponseProto getStatus(RpcController controller, + GetStatusRequestProto request) throws ServiceException { + try { + return real.getStatus(request); + } catch (IOException | YarnException e) { + throw new ServiceException(e); + } + } + + @Override + public org.apache.hadoop.yarn.proto.ClientAMProtocol.StopResponseProto stop( + RpcController controller, + org.apache.hadoop.yarn.proto.ClientAMProtocol.StopRequestProto request) + throws ServiceException { + try { + return real.stop(request); + } catch (IOException | YarnException e) { + throw new ServiceException(e); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java new file mode 100644 index 0000000..982448a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java @@ -0,0 +1,147 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.apache.hadoop.yarn.service.component.Component; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.service.component.ComponentEvent; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent; +import org.apache.hadoop.yarn.service.component.ComponentState; +import org.apache.hadoop.yarn.service.monitor.probe.ProbeStatus; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.STARTED; +import static org.apache.hadoop.yarn.service.component.ComponentEventType.FLEX; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.BECOME_NOT_READY; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.BECOME_READY; +import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.READY; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_WINDOW; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_INTERVAL; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.READINESS_CHECK_INTERVAL; + +public class ServiceMonitor extends AbstractService { + + private static final Logger LOG = + LoggerFactory.getLogger(ServiceMonitor.class); + + public ScheduledExecutorService executorService; + private Map liveInstances = null; + private ServiceContext context; + private Configuration conf; + + public ServiceMonitor(String name, ServiceContext context) { + super(name); + liveInstances = context.scheduler.getLiveInstances(); + this.context = context; + } + + @Override + public void serviceInit(Configuration conf) throws Exception { + executorService = Executors.newScheduledThreadPool(1); + this.conf = conf; + super.serviceInit(conf); + } + + @Override + public void serviceStart() throws Exception { + long readinessCheckInterval = YarnServiceConf + .getLong(READINESS_CHECK_INTERVAL, DEFAULT_READINESS_CHECK_INTERVAL, + context.service.getConfiguration(), conf); + + executorService + .scheduleAtFixedRate(new ReadinessChecker(), readinessCheckInterval, + readinessCheckInterval, TimeUnit.SECONDS); + + // Default 6 hours. + long failureResetInterval = YarnServiceConf + .getLong(CONTAINER_FAILURE_WINDOW, 21600, + context.service.getConfiguration(), conf); + + executorService + .scheduleAtFixedRate(new ContainerFailureReset(), failureResetInterval, + failureResetInterval, TimeUnit.SECONDS); + } + + @Override + public void serviceStop() throws Exception { + if (executorService != null) { + executorService.shutdownNow(); + } + } + + private class ReadinessChecker implements Runnable { + + @Override + public void run() { + + // check if the comp instance are ready + for (Map.Entry entry : liveInstances + .entrySet()) { + ComponentInstance instance = entry.getValue(); + + ProbeStatus status = instance.ping(); + if (status.isSuccess()) { + if (instance.getState() == STARTED) { + // synchronously update the state. + instance.handle( + new ComponentInstanceEvent(entry.getKey(), BECOME_READY)); + } + } else { + if (instance.getState() == READY) { + instance.handle( + new ComponentInstanceEvent(entry.getKey(), BECOME_NOT_READY)); + } + } + } + + for (Component component : context.scheduler.getAllComponents() + .values()) { + // If comp hasn't started yet and its dependencies are satisfied + if (component.getState() == ComponentState.INIT && component + .areDependenciesReady()) { + LOG.info("[COMPONENT {}]: Dependencies satisfied, ramping up.", + component.getName()); + ComponentEvent event = new ComponentEvent(component.getName(), FLEX) + .setDesired(component.getComponentSpec().getNumberOfContainers()); + component.handle(event); + } + } + } + } + + private class ContainerFailureReset implements Runnable { + @Override + public void run() { + for (Component component : context.scheduler.getAllComponents().values()) { + component.resetCompFailureCount(); + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java new file mode 100644 index 0000000..1923086 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Map; + +public class HttpProbe extends Probe { + protected static final Logger log = LoggerFactory.getLogger(HttpProbe.class); + + private static final String HOST_TOKEN = "${THIS_HOST}"; + + private final String urlString; + private final int timeout; + private final int min, max; + + + public HttpProbe(String url, int timeout, int min, int max, Configuration + conf) { + super("Http probe of " + url + " [" + min + "-" + max + "]", conf); + this.urlString = url; + this.timeout = timeout; + this.min = min; + this.max = max; + } + + public static HttpProbe create(Map props) + throws IOException { + String urlString = getProperty(props, WEB_PROBE_URL, null); + new URL(urlString); + int timeout = getPropertyInt(props, WEB_PROBE_CONNECT_TIMEOUT, + WEB_PROBE_CONNECT_TIMEOUT_DEFAULT); + int minSuccess = getPropertyInt(props, WEB_PROBE_MIN_SUCCESS, + WEB_PROBE_MIN_SUCCESS_DEFAULT); + int maxSuccess = getPropertyInt(props, WEB_PROBE_MAX_SUCCESS, + WEB_PROBE_MAX_SUCCESS_DEFAULT); + return new HttpProbe(urlString, timeout, minSuccess, maxSuccess, null); + } + + + private static HttpURLConnection getConnection(URL url, int timeout) throws + IOException { + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); + connection.setInstanceFollowRedirects(true); + connection.setConnectTimeout(timeout); + return connection; + } + + @Override + public ProbeStatus ping(ComponentInstance instance) { + ProbeStatus status = new ProbeStatus(); + ContainerStatus containerStatus = instance.getContainerStatus(); + if (containerStatus == null || SliderUtils.isEmpty(containerStatus.getIPs()) + || StringUtils.isEmpty(containerStatus.getHost())) { + status.fail(this, new IOException("IP is not available yet")); + return status; + } + + String ip = containerStatus.getIPs().get(0); + HttpURLConnection connection = null; + try { + URL url = new URL(urlString.replace(HOST_TOKEN, ip)); + connection = getConnection(url, this.timeout); + int rc = connection.getResponseCode(); + if (rc < min || rc > max) { + String error = "Probe " + url + " error code: " + rc; + log.info(error); + status.fail(this, + new IOException(error)); + } else { + status.succeed(this); + } + } catch (Throwable e) { + String error = "Probe " + urlString + " failed for IP " + ip + ": " + e; + log.info(error, e); + status.fail(this, + new IOException(error, e)); + } finally { + if (connection != null) { + connection.disconnect(); + } + } + return status; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/LogEntryBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/LogEntryBuilder.java new file mode 100644 index 0000000..9ad86fe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/LogEntryBuilder.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +/** + * Build up log entries for ease of splunk + */ +public class LogEntryBuilder { + + private final StringBuilder builder = new StringBuilder(); + + public LogEntryBuilder() { + } + + public LogEntryBuilder(String text) { + elt(text); + } + + + public LogEntryBuilder(String name, Object value) { + entry(name, value); + } + + public LogEntryBuilder elt(String text) { + addComma(); + builder.append(text); + return this; + } + + public LogEntryBuilder elt(String name, Object value) { + addComma(); + entry(name, value); + return this; + } + + private void addComma() { + if (!isEmpty()) { + builder.append(", "); + } + } + + private void entry(String name, Object value) { + builder.append(name).append('='); + if (value != null) { + builder.append('"').append(value.toString()).append('"'); + } else { + builder.append("null"); + } + } + + @Override + public String toString() { + return builder.toString(); + } + + private boolean isEmpty() { + return builder.length() == 0; + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java new file mode 100644 index 0000000..55b55f6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +/** + * Config keys for monitoring + */ +public interface MonitorKeys { + + /** + * Port probing key : port to attempt to create a TCP connection to {@value}. + */ + String PORT_PROBE_PORT = "port"; + /** + * Port probing key : timeout for the the connection attempt {@value}. + */ + String PORT_PROBE_CONNECT_TIMEOUT = "timeout"; + /** + * Port probing default : timeout for the connection attempt {@value}. + */ + int PORT_PROBE_CONNECT_TIMEOUT_DEFAULT = 1000; + + /** + * Web probing key : URL {@value}. + */ + String WEB_PROBE_URL = "url"; + /** + * Web probing key : min success code {@value}. + */ + String WEB_PROBE_MIN_SUCCESS = "min.success"; + /** + * Web probing key : max success code {@value}. + */ + String WEB_PROBE_MAX_SUCCESS = "max.success"; + /** + * Web probing default : min successful response code {@value}. + */ + int WEB_PROBE_MIN_SUCCESS_DEFAULT = 200; + /** + * Web probing default : max successful response code {@value}. + */ + int WEB_PROBE_MAX_SUCCESS_DEFAULT = 299; + /** + * Web probing key : timeout for the connection attempt {@value} + */ + String WEB_PROBE_CONNECT_TIMEOUT = "timeout"; + /** + * Port probing default : timeout for the connection attempt {@value}. + */ + int WEB_PROBE_CONNECT_TIMEOUT_DEFAULT = 1000; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java new file mode 100644 index 0000000..684f655 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +import org.apache.hadoop.yarn.service.api.records.ReadinessCheck; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Formatter; +import java.util.Locale; + +/** + * Various utils to work with the monitor + */ +public final class MonitorUtils { + protected static final Logger LOG = LoggerFactory.getLogger(MonitorUtils + .class); + + private MonitorUtils() { + } + + public static String toPlural(int val) { + return val != 1 ? "s" : ""; + } + + /** + * Convert milliseconds to human time -the exact format is unspecified + * @param milliseconds a time in milliseconds + * @return a time that is converted to human intervals + */ + public static String millisToHumanTime(long milliseconds) { + StringBuilder sb = new StringBuilder(); + // Send all output to the Appendable object sb + Formatter formatter = new Formatter(sb, Locale.US); + + long s = Math.abs(milliseconds / 1000); + long m = Math.abs(milliseconds % 1000); + if (milliseconds > 0) { + formatter.format("%d.%03ds", s, m); + } else if (milliseconds == 0) { + formatter.format("0"); + } else { + formatter.format("-%d.%03ds", s, m); + } + return sb.toString(); + } + + public static Probe getProbe(ReadinessCheck readinessCheck) { + if (readinessCheck == null) { + return null; + } + if (readinessCheck.getType() == null) { + return null; + } + try { + switch (readinessCheck.getType()) { + case HTTP: + return HttpProbe.create(readinessCheck.getProps()); + case PORT: + return PortProbe.create(readinessCheck.getProps()); + default: + return null; + } + } catch (Throwable t) { + throw new IllegalArgumentException("Error creating readiness check " + + t); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java new file mode 100644 index 0000000..aba5859 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.util.Map; + +/** + * Probe for a port being open. + */ +public class PortProbe extends Probe { + protected static final Logger log = LoggerFactory.getLogger(PortProbe.class); + private final int port; + private final int timeout; + + public PortProbe(int port, int timeout) { + super("Port probe of " + port + " for " + timeout + "ms", null); + this.port = port; + this.timeout = timeout; + } + + public static PortProbe create(Map props) + throws IOException { + int port = getPropertyInt(props, PORT_PROBE_PORT, null); + + if (port >= 65536) { + throw new IOException(PORT_PROBE_PORT + " " + port + " is out of " + + "range"); + } + + int timeout = getPropertyInt(props, PORT_PROBE_CONNECT_TIMEOUT, + PORT_PROBE_CONNECT_TIMEOUT_DEFAULT); + + return new PortProbe(port, timeout); + } + + /** + * Try to connect to the (host,port); a failure to connect within + * the specified timeout is a failure. + * @param instance role instance + * @return the outcome + */ + @Override + public ProbeStatus ping(ComponentInstance instance) { + ProbeStatus status = new ProbeStatus(); + + if (instance.getContainerStatus() == null || SliderUtils + .isEmpty(instance.getContainerStatus().getIPs())) { + status.fail(this, new IOException( + instance.getCompInstanceName() + ": IP is not available yet")); + return status; + } + + String ip = instance.getContainerStatus().getIPs().get(0); + InetSocketAddress sockAddr = new InetSocketAddress(ip, port); + Socket socket = new Socket(); + try { + if (log.isDebugEnabled()) { + log.debug(instance.getCompInstanceName() + ": Connecting " + sockAddr + .toString() + ", timeout=" + MonitorUtils + .millisToHumanTime(timeout)); + } + socket.connect(sockAddr, timeout); + status.succeed(this); + } catch (Throwable e) { + String error = + instance.getCompInstanceName() + ": Probe " + sockAddr + " failed"; + log.debug(error, e); + status.fail(this, new IOException(error, e)); + } finally { + IOUtils.closeSocket(socket); + } + return status; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java new file mode 100644 index 0000000..3237a2b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; + +import java.io.IOException; +import java.util.Map; + +/** + * Base class of all probes. + */ +public abstract class Probe implements MonitorKeys { + + protected final Configuration conf; + private String name; + + /** + * Create a probe of a specific name + * + * @param name probe name + * @param conf configuration being stored. + */ + public Probe(String name, Configuration conf) { + this.name = name; + this.conf = conf; + } + + + protected void setName(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + + @Override + public String toString() { + return getName(); + } + + public static String getProperty(Map props, String name, + String defaultValue) throws IOException { + String value = props.get(name); + if (StringUtils.isEmpty(value)) { + if (defaultValue == null) { + throw new IOException(name + " not specified"); + } + return defaultValue; + } + return value; + } + + public static int getPropertyInt(Map props, String name, + Integer defaultValue) throws IOException { + String value = props.get(name); + if (StringUtils.isEmpty(value)) { + if (defaultValue == null) { + throw new IOException(name + " not specified"); + } + return defaultValue; + } + return Integer.parseInt(value); + } + + /** + * perform any prelaunch initialization + */ + public void init() throws IOException { + + } + + /** + * Ping the endpoint. All exceptions must be caught and included in the + * (failure) status. + * + * @param instance instance to ping + * @return the status + */ + public abstract ProbeStatus ping(ComponentInstance instance); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/ProbeStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/ProbeStatus.java new file mode 100644 index 0000000..bc62dcd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/ProbeStatus.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.monitor.probe; + +import java.io.Serializable; +import java.util.Date; + +/** + * Status message of a probe. This is designed to be sent over the wire, though the exception + * Had better be unserializable at the far end if that is to work. + */ +public final class ProbeStatus implements Serializable { + private static final long serialVersionUID = 165468L; + + private long timestamp; + private String timestampText; + private boolean success; + private boolean realOutcome; + private String message; + private Throwable thrown; + private transient Probe originator; + + public ProbeStatus() { + } + + public ProbeStatus(long timestamp, String message, Throwable thrown) { + this.success = false; + this.message = message; + this.thrown = thrown; + setTimestamp(timestamp); + } + + public ProbeStatus(long timestamp, String message) { + this.success = true; + setTimestamp(timestamp); + this.message = message; + this.thrown = null; + } + + public long getTimestamp() { + return timestamp; + } + + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + timestampText = new Date(timestamp).toString(); + } + + public boolean isSuccess() { + return success; + } + + /** + * Set both the success and the real outcome bits to the same value + * @param success the new value + */ + public void setSuccess(boolean success) { + this.success = success; + realOutcome = success; + } + + public String getTimestampText() { + return timestampText; + } + + public boolean getRealOutcome() { + return realOutcome; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public Throwable getThrown() { + return thrown; + } + + public void setThrown(Throwable thrown) { + this.thrown = thrown; + } + + /** + * Get the probe that generated this result. May be null + * @return a possibly null reference to a probe + */ + public Probe getOriginator() { + return originator; + } + + /** + * The probe has succeeded -capture the current timestamp, set + * success to true, and record any other data needed. + * @param probe probe + */ + public void succeed(Probe probe) { + finish(probe, true, probe.getName(), null); + } + + /** + * A probe has failed either because the test returned false, or an exception + * was thrown. The {@link #success} field is set to false, any exception + * thrown is recorded. + * @param probe probe that failed + * @param thrown an exception that was thrown. + */ + public void fail(Probe probe, Throwable thrown) { + finish(probe, false, "Failure in " + probe, thrown); + } + + public void finish(Probe probe, boolean succeeded, String text, Throwable thrown) { + setTimestamp(System.currentTimeMillis()); + setSuccess(succeeded); + originator = probe; + message = text; + this.thrown = thrown; + } + + @Override + public String toString() { + LogEntryBuilder builder = new LogEntryBuilder("Probe Status"); + builder.elt("time", timestampText) + .elt("outcome", (success ? "success" : "failure")); + + if (success != realOutcome) { + builder.elt("originaloutcome", (realOutcome ? "success" : "failure")); + } + builder.elt("message", message); + if (thrown != null) { + builder.elt("exception", thrown); + } + + return builder.toString(); + } + + /** + * Flip the success bit on while the real outcome bit is kept false + */ + public void markAsSuccessful() { + success = true; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java new file mode 100644 index 0000000..0d11be2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.provider; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.utils.SliderUtils; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public abstract class AbstractClientProvider { + + public AbstractClientProvider() { + } + + /** + * Generates a fixed format of application tags given one or more of + * application name, version and description. This allows subsequent query for + * an application with a name only, version only or description only or any + * combination of those as filters. + * + * @param appName name of the application + * @param appVersion version of the application + * @param appDescription brief description of the application + * @return + */ + public static final Set createApplicationTags(String appName, + String appVersion, String appDescription) { + Set tags = new HashSet<>(); + tags.add(SliderUtils.createNameTag(appName)); + if (appVersion != null) { + tags.add(SliderUtils.createVersionTag(appVersion)); + } + if (appDescription != null) { + tags.add(SliderUtils.createDescriptionTag(appDescription)); + } + return tags; + } + + /** + * Validate the artifact. + * @param artifact + */ + public abstract void validateArtifact(Artifact artifact, FileSystem + fileSystem) throws IOException; + + protected abstract void validateConfigFile(ConfigFile configFile, FileSystem + fileSystem) throws IOException; + + /** + * Validate the config files. + * @param configFiles config file list + * @param fs file system + */ + public void validateConfigFiles(List configFiles, + FileSystem fs) throws IOException { + Set destFileSet = new HashSet<>(); + + for (ConfigFile file : configFiles) { + if (file.getType() == null) { + throw new IllegalArgumentException("File type is empty"); + } + + if (file.getType().equals(ConfigFile.TypeEnum.TEMPLATE) && StringUtils + .isEmpty(file.getSrcFile())) { + throw new IllegalArgumentException( + "Src_file is empty for " + ConfigFile.TypeEnum.TEMPLATE); + + } + if (!StringUtils.isEmpty(file.getSrcFile())) { + Path p = new Path(file.getSrcFile()); + if (!fs.exists(p)) { + throw new IllegalArgumentException( + "Src_file does not exist for config file: " + file + .getSrcFile()); + } + } + + if (StringUtils.isEmpty(file.getDestFile())) { + throw new IllegalArgumentException("Dest_file is empty."); + } + + if (destFileSet.contains(file.getDestFile())) { + throw new IllegalArgumentException( + "Duplicated ConfigFile exists: " + file.getDestFile()); + } + destFileSet.add(file.getDestFile()); + + java.nio.file.Path destPath = Paths.get(file.getDestFile()); + if (!destPath.isAbsolute() && destPath.getNameCount() > 1) { + throw new IllegalArgumentException("Non-absolute dest_file has more " + + "than one path element"); + } + + // provider-specific validation + validateConfigFile(file, fs); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java new file mode 100644 index 0000000..6ffb84d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; +import org.apache.hadoop.yarn.service.containerlaunch.CommandLineBuilder; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; +import java.util.Map.Entry; + +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_INTERVAL; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_MAX; +import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.$; + +public abstract class AbstractProviderService implements ProviderService, + YarnServiceConstants { + + protected static final Logger log = + LoggerFactory.getLogger(AbstractProviderService.class); + + public abstract void processArtifact(AbstractLauncher launcher, + ComponentInstance compInstance, SliderFileSystem fileSystem, + Service service) + throws IOException; + + public void buildContainerLaunchContext(AbstractLauncher launcher, + Service service, ComponentInstance instance, + SliderFileSystem fileSystem, Configuration yarnConf) + throws IOException, SliderException { + Component component = instance.getComponent().getComponentSpec();; + processArtifact(launcher, instance, fileSystem, service); + + ServiceContext context = + instance.getComponent().getScheduler().getContext(); + // Generate tokens (key-value pair) for config substitution. + // Get pre-defined tokens + Map globalTokens = + instance.getComponent().getScheduler().globalTokens; + Map tokensForSubstitution = ProviderUtils + .initCompTokensForSubstitute(instance); + tokensForSubstitution.putAll(globalTokens); + // Set the environment variables in launcher + launcher.putEnv(SliderUtils + .buildEnvMap(component.getConfiguration(), tokensForSubstitution)); + launcher.setEnv("WORK_DIR", ApplicationConstants.Environment.PWD.$()); + launcher.setEnv("LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR); + if (System.getenv(HADOOP_USER_NAME) != null) { + launcher.setEnv(HADOOP_USER_NAME, System.getenv(HADOOP_USER_NAME)); + } + launcher.setEnv("LANG", "en_US.UTF-8"); + launcher.setEnv("LC_ALL", "en_US.UTF-8"); + launcher.setEnv("LANGUAGE", "en_US.UTF-8"); + + for (Entry entry : launcher.getEnv().entrySet()) { + tokensForSubstitution.put($(entry.getKey()), entry.getValue()); + } + //TODO add component host tokens? +// ProviderUtils.addComponentHostTokens(tokensForSubstitution, amState); + + // create config file on hdfs and add local resource + ProviderUtils.createConfigFileAndAddLocalResource(launcher, fileSystem, + component, tokensForSubstitution, instance, context); + + // substitute launch command + String launchCommand = component.getLaunchCommand(); + // docker container may have empty commands + if (!StringUtils.isEmpty(launchCommand)) { + launchCommand = ProviderUtils + .substituteStrWithTokens(launchCommand, tokensForSubstitution); + CommandLineBuilder operation = new CommandLineBuilder(); + operation.add(launchCommand); + operation.addOutAndErrFiles(OUT_FILE, ERR_FILE); + launcher.addCommand(operation.build()); + } + + // By default retry forever every 30 seconds + launcher.setRetryContext(YarnServiceConf + .getInt(CONTAINER_RETRY_MAX, -1, service.getConfiguration(), + yarnConf), YarnServiceConf + .getInt(CONTAINER_RETRY_INTERVAL, 30000, service.getConfiguration(), + yarnConf)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderFactory.java new file mode 100644 index 0000000..0f949e0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderFactory.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.provider; + +import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultProviderFactory; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.provider.docker.DockerProviderFactory; +import org.apache.hadoop.yarn.service.provider.tarball.TarballProviderFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Base class for factories. + */ +public abstract class ProviderFactory { + protected static final Logger LOG = + LoggerFactory.getLogger(ProviderFactory.class); + + protected ProviderFactory() {} + + public abstract AbstractClientProvider createClientProvider(); + + public abstract ProviderService createServerProvider(); + + public static synchronized ProviderService getProviderService(Artifact + artifact) { + return createServiceProviderFactory(artifact).createServerProvider(); + } + + public static synchronized AbstractClientProvider getClientProvider(Artifact + artifact) { + return createServiceProviderFactory(artifact).createClientProvider(); + } + + /** + * Create a provider for a specific service + * @param artifact artifact + * @return provider factory + */ + public static synchronized ProviderFactory createServiceProviderFactory( + Artifact artifact) { + if (artifact == null || artifact.getType() == null) { + LOG.debug("Loading service provider type default"); + return DefaultProviderFactory.getInstance(); + } + LOG.debug("Loading service provider type {}", artifact.getType()); + switch (artifact.getType()) { + // TODO add handling for custom types? + // TODO handle service + case DOCKER: + return DockerProviderFactory.getInstance(); + case TARBALL: + return TarballProviderFactory.getInstance(); + default: + throw new IllegalArgumentException(String.format("Resolution error, " + + "%s should not be passed to createServiceProviderFactory", + artifact.getType())); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java new file mode 100644 index 0000000..eb721b4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.provider; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; + +import java.io.IOException; + +public interface ProviderService { + + /** + * Set up the entire container launch context + */ + void buildContainerLaunchContext(AbstractLauncher containerLauncher, + Service service, ComponentInstance instance, + SliderFileSystem sliderFileSystem, Configuration yarnConf) + throws IOException, SliderException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java new file mode 100644 index 0000000..63fbaae --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java @@ -0,0 +1,408 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.provider; + +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.api.records.ConfigFormat; +import org.apache.hadoop.yarn.service.api.records.Configuration; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; +import org.apache.hadoop.yarn.service.exceptions.BadCommandArgumentsException; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.service.utils.PublishedConfiguration; +import org.apache.hadoop.yarn.service.utils.PublishedConfigurationOutputter; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.OutputStream; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.regex.Pattern; + +import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*; + +/** + * This is a factoring out of methods handy for providers. It's bonded to a log + * at construction time. + */ +public class ProviderUtils implements YarnServiceConstants { + + protected static final Logger log = + LoggerFactory.getLogger(ProviderUtils.class); + + + /** + * Add oneself to the classpath. This does not work + * on minicluster test runs where the JAR is not built up. + * @param providerResources map of provider resources to add these entries to + * @param providerClass provider to add + * @param jarName name of the jar to use + * @param sliderFileSystem target filesystem + * @param tempPath path in the cluster FS for temp files + * @param libdir relative directory to place resources + * @param miniClusterTestRun true if minicluster is being used + * @return true if the class was found in a JAR + * + * @throws FileNotFoundException if the JAR was not found and this is NOT + * a mini cluster test run + * @throws IOException IO problems + * @throws SliderException any Slider problem + */ + public static boolean addProviderJar( + Map providerResources, + Class providerClass, + String jarName, + SliderFileSystem sliderFileSystem, + Path tempPath, + String libdir, + boolean miniClusterTestRun) throws + IOException, + SliderException { + try { + SliderUtils.putJar(providerResources, + sliderFileSystem, + providerClass, + tempPath, + libdir, + jarName); + return true; + } catch (FileNotFoundException e) { + if (miniClusterTestRun) { + return false; + } else { + throw e; + } + } + } + + /** + * Loads all dependency jars from the default path. + * @param providerResources map of provider resources to add these entries to + * @param sliderFileSystem target filesystem + * @param tempPath path in the cluster FS for temp files + * @param libDir relative directory to place resources + * @param libLocalSrcDir explicitly supplied local libs dir + * @throws IOException trouble copying to HDFS + * @throws SliderException trouble copying to HDFS + */ + public static void addAllDependencyJars( + Map providerResources, + SliderFileSystem sliderFileSystem, + Path tempPath, + String libDir, + String libLocalSrcDir) + throws IOException, SliderException { + if (SliderUtils.isSet(libLocalSrcDir)) { + File file = new File(libLocalSrcDir); + if (!file.exists() || !file.isDirectory()) { + throw new BadCommandArgumentsException( + "Supplied lib src dir %s is not valid", libLocalSrcDir); + } + } + SliderUtils.putAllJars(providerResources, sliderFileSystem, tempPath, + libDir, libLocalSrcDir); + } + + public static String substituteStrWithTokens(String content, + Map tokensForSubstitution) { + for (Map.Entry token : tokensForSubstitution.entrySet()) { + content = + content.replaceAll(Pattern.quote(token.getKey()), token.getValue()); + } + return content; + } + + // configs will be substituted by corresponding env in tokenMap + public static void substituteMapWithTokens(Map configs, + Map tokenMap) { + for (Map.Entry entry : configs.entrySet()) { + String value = entry.getValue(); + if (tokenMap != null) { + for (Map.Entry token : tokenMap.entrySet()) { + value = + value.replaceAll(Pattern.quote(token.getKey()), token.getValue()); + } + } + entry.setValue(value); + } + } + + /** + * Localize the service keytabs for the service. + * @param launcher container launcher + * @param fileSystem file system + * @throws IOException trouble uploading to HDFS + */ + public void localizeServiceKeytabs(AbstractLauncher launcher, + SliderFileSystem fileSystem, Service service) throws IOException { + + Configuration conf = service.getConfiguration(); + String keytabPathOnHost = + conf.getProperty(YarnServiceConf.KEY_AM_KEYTAB_LOCAL_PATH); + if (SliderUtils.isUnset(keytabPathOnHost)) { + String amKeytabName = + conf.getProperty(YarnServiceConf.KEY_AM_LOGIN_KEYTAB_NAME); + String keytabDir = + conf.getProperty(YarnServiceConf.KEY_HDFS_KEYTAB_DIR); + // we need to localize the keytab files in the directory + Path keytabDirPath = fileSystem.buildKeytabPath(keytabDir, null, + service.getName()); + boolean serviceKeytabsDeployed = false; + if (fileSystem.getFileSystem().exists(keytabDirPath)) { + FileStatus[] keytabs = fileSystem.getFileSystem().listStatus( + keytabDirPath); + LocalResource keytabRes; + for (FileStatus keytab : keytabs) { + if (!amKeytabName.equals(keytab.getPath().getName()) + && keytab.getPath().getName().endsWith(".keytab")) { + serviceKeytabsDeployed = true; + log.info("Localizing keytab {}", keytab.getPath().getName()); + keytabRes = fileSystem.createAmResource(keytab.getPath(), + LocalResourceType.FILE); + launcher.addLocalResource(KEYTAB_DIR + "/" + + keytab.getPath().getName(), + keytabRes); + } + } + } + if (!serviceKeytabsDeployed) { + log.warn("No service keytabs for the service have been localized. " + + "If the service requires keytabs for secure operation, " + + "please ensure that the required keytabs have been uploaded " + + "to the folder {}", keytabDirPath); + } + } + } + + public static Path initCompInstanceDir(SliderFileSystem fs, + ComponentInstance instance) { + Path compDir = new Path(new Path(fs.getAppDir(), "components"), + instance.getCompName()); + Path compInstanceDir = new Path(compDir, instance.getCompInstanceName()); + instance.setCompInstanceDir(compInstanceDir); + return compInstanceDir; + } + + // 1. Create all config files for a component on hdfs for localization + // 2. Add the config file to localResource + public static synchronized void createConfigFileAndAddLocalResource( + AbstractLauncher launcher, SliderFileSystem fs, Component component, + Map tokensForSubstitution, ComponentInstance instance, + ServiceContext context) throws IOException { + Path compInstanceDir = initCompInstanceDir(fs, instance); + if (!fs.getFileSystem().exists(compInstanceDir)) { + log.info(instance.getCompInstanceId() + ": Creating dir on hdfs: " + compInstanceDir); + fs.getFileSystem().mkdirs(compInstanceDir, + new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE)); + } else { + log.info("Component instance conf dir already exists: " + compInstanceDir); + } + + if (log.isDebugEnabled()) { + log.debug("Tokens substitution for component instance: " + instance + .getCompInstanceName() + System.lineSeparator() + + tokensForSubstitution); + } + + for (ConfigFile originalFile : component.getConfiguration().getFiles()) { + ConfigFile configFile = originalFile.copy(); + String fileName = new Path(configFile.getDestFile()).getName(); + + // substitute file name + for (Map.Entry token : tokensForSubstitution.entrySet()) { + configFile.setDestFile(configFile.getDestFile() + .replaceAll(Pattern.quote(token.getKey()), token.getValue())); + } + + Path remoteFile = new Path(compInstanceDir, fileName); + if (!fs.getFileSystem().exists(remoteFile)) { + log.info("Saving config file on hdfs for component " + instance + .getCompInstanceName() + ": " + configFile); + + if (configFile.getSrcFile() != null) { + // Load config file template + switch (configFile.getType()) { + case HADOOP_XML: + // Hadoop_xml_template + resolveHadoopXmlTemplateAndSaveOnHdfs(fs.getFileSystem(), + tokensForSubstitution, configFile, remoteFile, context); + break; + case TEMPLATE: + // plain-template + resolvePlainTemplateAndSaveOnHdfs(fs.getFileSystem(), + tokensForSubstitution, configFile, remoteFile, context); + break; + default: + log.info("Not supporting loading src_file for " + configFile); + break; + } + } else { + // non-template + resolveNonTemplateConfigsAndSaveOnHdfs(fs, tokensForSubstitution, + instance, configFile, fileName, remoteFile); + } + } + + // Add resource for localization + LocalResource configResource = + fs.createAmResource(remoteFile, LocalResourceType.FILE); + File destFile = new File(configFile.getDestFile()); + String symlink = APP_CONF_DIR + "/" + fileName; + if (destFile.isAbsolute()) { + launcher.addLocalResource(symlink, configResource, + configFile.getDestFile()); + log.info("Add config file for localization: " + symlink + " -> " + + configResource.getResource().getFile() + ", dest mount path: " + + configFile.getDestFile()); + } else { + launcher.addLocalResource(symlink, configResource); + log.info("Add config file for localization: " + symlink + " -> " + + configResource.getResource().getFile()); + } + } + } + + private static void resolveNonTemplateConfigsAndSaveOnHdfs(SliderFileSystem fs, + Map tokensForSubstitution, ComponentInstance instance, + ConfigFile configFile, String fileName, Path remoteFile) + throws IOException { + // substitute non-template configs + substituteMapWithTokens(configFile.getProps(), tokensForSubstitution); + + // write configs onto hdfs + PublishedConfiguration publishedConfiguration = + new PublishedConfiguration(fileName, + configFile.getProps().entrySet()); + if (!fs.getFileSystem().exists(remoteFile)) { + PublishedConfigurationOutputter configurationOutputter = + PublishedConfigurationOutputter.createOutputter( + ConfigFormat.resolve(configFile.getType().toString()), + publishedConfiguration); + try (FSDataOutputStream os = fs.getFileSystem().create(remoteFile)) { + configurationOutputter.save(os); + os.flush(); + } + } else { + log.info("Component instance = " + instance.getCompInstanceName() + + ", config file already exists: " + remoteFile); + } + } + + // 1. substitute config template - only handle hadoop_xml format + // 2. save on hdfs + @SuppressWarnings("unchecked") + private static void resolveHadoopXmlTemplateAndSaveOnHdfs(FileSystem fs, + Map tokensForSubstitution, ConfigFile configFile, + Path remoteFile, ServiceContext context) throws IOException { + Map conf; + try { + conf = (Map) context.configCache.get(configFile); + } catch (ExecutionException e) { + log.info("Failed to load config file: " + configFile, e); + return; + } + // make a copy for substitution + org.apache.hadoop.conf.Configuration confCopy = + new org.apache.hadoop.conf.Configuration(false); + for (Map.Entry entry : conf.entrySet()) { + confCopy.set(entry.getKey(), entry.getValue()); + } + // substitute properties + for (Map.Entry entry : configFile.getProps().entrySet()) { + confCopy.set(entry.getKey(), entry.getValue()); + } + // substitute env variables + for (Map.Entry entry : confCopy) { + String val = entry.getValue(); + if (val != null) { + for (Map.Entry token : tokensForSubstitution + .entrySet()) { + val = val.replaceAll(Pattern.quote(token.getKey()), token.getValue()); + confCopy.set(entry.getKey(), val); + } + } + } + // save on hdfs + try (OutputStream output = fs.create(remoteFile)) { + confCopy.writeXml(output); + log.info("Reading config from: " + configFile.getSrcFile() + + ", writing to: " + remoteFile); + } + } + + // 1) read the template as a string + // 2) do token substitution + // 3) save on hdfs + private static void resolvePlainTemplateAndSaveOnHdfs(FileSystem fs, + Map tokensForSubstitution, ConfigFile configFile, + Path remoteFile, ServiceContext context) { + String content; + try { + content = (String) context.configCache.get(configFile); + } catch (ExecutionException e) { + log.info("Failed to load config file: " + configFile, e); + return; + } + // substitute tokens + content = substituteStrWithTokens(content, tokensForSubstitution); + + try (OutputStream output = fs.create(remoteFile)) { + org.apache.commons.io.IOUtils.write(content, output); + } catch (IOException e) { + log.info("Failed to create " + remoteFile); + } + } + + /** + * Get initial component token map to be substituted into config values. + * @return tokens to replace + */ + public static Map initCompTokensForSubstitute( + ComponentInstance instance) { + Map tokens = new HashMap<>(); + tokens.put(COMPONENT_NAME, instance.getCompSpec().getName()); + tokens + .put(COMPONENT_NAME_LC, instance.getCompSpec().getName().toLowerCase()); + tokens.put(COMPONENT_INSTANCE_NAME, instance.getCompInstanceName()); + tokens.put(CONTAINER_ID, instance.getContainer().getId().toString()); + tokens.put(COMPONENT_ID, + String.valueOf(instance.getCompInstanceId().getId())); + tokens.putAll(instance.getComponent().getDependencyHostIpTokens()); + return tokens; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java new file mode 100644 index 0000000..0920a9c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultClientProvider.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.defaultImpl; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; + +import java.io.IOException; +import java.nio.file.Paths; + +public class DefaultClientProvider extends AbstractClientProvider { + + public DefaultClientProvider() { + } + + @Override + public void validateArtifact(Artifact artifact, FileSystem fileSystem) { + } + + @Override + protected void validateConfigFile(ConfigFile configFile, FileSystem + fileSystem) throws IOException { + // validate dest_file is not absolute + if (Paths.get(configFile.getDestFile()).isAbsolute()) { + throw new IllegalArgumentException( + "Dest_file must not be absolute path: " + configFile.getDestFile()); + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderFactory.java new file mode 100644 index 0000000..868bba8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderFactory.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.defaultImpl; + +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.provider.ProviderService; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; + +public final class DefaultProviderFactory extends ProviderFactory { + private static final ProviderFactory FACTORY = new + DefaultProviderFactory(); + + private DefaultProviderFactory() {} + + private static class Client { + static final AbstractClientProvider PROVIDER = new DefaultClientProvider(); + } + + private static class Server { + static final ProviderService PROVIDER = new DefaultProviderService(); + } + + @Override + public AbstractClientProvider createClientProvider() { + return Client.PROVIDER; + } + + @Override + public ProviderService createServerProvider() { + return Server.PROVIDER; + } + + public static ProviderFactory getInstance() { + return FACTORY; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderService.java new file mode 100644 index 0000000..a3a0c1f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/defaultImpl/DefaultProviderService.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.defaultImpl; + +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.provider.AbstractProviderService; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; + +import java.io.IOException; + +public class DefaultProviderService extends AbstractProviderService { + + @Override + public void processArtifact(AbstractLauncher launcher, + ComponentInstance compInstance, SliderFileSystem fileSystem, + Service service) + throws IOException { + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java new file mode 100644 index 0000000..d4a2254 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerClientProvider.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.docker; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages; + +import java.io.IOException; + +public class DockerClientProvider extends AbstractClientProvider + implements YarnServiceConstants { + + public DockerClientProvider() { + super(); + } + + @Override + public void validateArtifact(Artifact artifact, FileSystem fileSystem) { + if (artifact == null) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_ARTIFACT_INVALID); + } + if (StringUtils.isEmpty(artifact.getId())) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID); + } + } + + @Override + protected void validateConfigFile(ConfigFile configFile, FileSystem + fileSystem) throws IOException { + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerKeys.java new file mode 100644 index 0000000..f30c002 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerKeys.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.docker; + +public interface DockerKeys { + String PROVIDER_DOCKER = "docker"; + String DOCKER_PREFIX = "docker."; + String DOCKER_IMAGE = DOCKER_PREFIX + "image"; + String DOCKER_NETWORK = DOCKER_PREFIX + "network"; + String DOCKER_USE_PRIVILEGED = DOCKER_PREFIX + "usePrivileged"; + String DOCKER_START_COMMAND = DOCKER_PREFIX + "startCommand"; + + String DEFAULT_DOCKER_NETWORK = "bridge"; + Boolean DEFAULT_DOCKER_USE_PRIVILEGED = false; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderFactory.java new file mode 100644 index 0000000..57330ab --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderFactory.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.docker; + +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.provider.ProviderService; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; + +public class DockerProviderFactory extends ProviderFactory { + private static final ProviderFactory FACTORY = new + DockerProviderFactory(); + + private DockerProviderFactory() { + } + + private static class Client { + static final AbstractClientProvider PROVIDER = new DockerClientProvider(); + } + + private static class Server { + static final ProviderService PROVIDER = new DockerProviderService(); + } + + @Override + public AbstractClientProvider createClientProvider() { + return Client.PROVIDER; + } + + @Override + public ProviderService createServerProvider() { + return Server.PROVIDER; + } + + public static ProviderFactory getInstance() { + return FACTORY; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java new file mode 100644 index 0000000..0741947 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.docker; + +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.provider.AbstractProviderService; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; + +import java.io.IOException; +import java.text.MessageFormat; + +public class DockerProviderService extends AbstractProviderService + implements DockerKeys { + + public void processArtifact(AbstractLauncher launcher, + ComponentInstance compInstance, SliderFileSystem fileSystem, + Service service) throws IOException{ + launcher.setYarnDockerMode(true); + launcher.setDockerImage(compInstance.getCompSpec().getArtifact().getId()); + launcher.setDockerNetwork(compInstance.getCompSpec().getConfiguration() + .getProperty(DOCKER_NETWORK, DEFAULT_DOCKER_NETWORK)); + String domain = compInstance.getComponent().getScheduler().getConfig() + .get(RegistryConstants.KEY_DNS_DOMAIN); + String hostname; + if (domain == null || domain.isEmpty()) { + hostname = MessageFormat + .format("{0}.{1}.{2}", compInstance.getCompInstanceName(), + service.getName(), RegistryUtils.currentUser()); + } else { + hostname = MessageFormat + .format("{0}.{1}.{2}.{3}", compInstance.getCompInstanceName(), + service.getName(), RegistryUtils.currentUser(), domain); + } + launcher.setDockerHostname(hostname); + launcher.setRunPrivilegedContainer( + compInstance.getCompSpec().getRunPrivilegedContainer()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java new file mode 100644 index 0000000..01f7b20 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballClientProvider.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.tarball; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages; + +import java.io.IOException; +import java.nio.file.Paths; + +public class TarballClientProvider extends AbstractClientProvider + implements YarnServiceConstants { + + public TarballClientProvider() { + } + + @Override + public void validateArtifact(Artifact artifact, FileSystem fs) + throws IOException { + if (artifact == null) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_ARTIFACT_INVALID); + } + if (StringUtils.isEmpty(artifact.getId())) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID); + } + Path p = new Path(artifact.getId()); + if (!fs.exists(p)) { + throw new IllegalArgumentException( "Artifact tarball does not exist " + + artifact.getId()); + } + } + + @Override + protected void validateConfigFile(ConfigFile configFile, FileSystem + fileSystem) throws IOException { + // validate dest_file is not absolute + if (Paths.get(configFile.getDestFile()).isAbsolute()) { + throw new IllegalArgumentException( + "Dest_file must not be absolute path: " + configFile.getDestFile()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderFactory.java new file mode 100644 index 0000000..9d81f66 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderFactory.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.tarball; + +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.provider.ProviderService; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; + +public class TarballProviderFactory extends ProviderFactory { + private static final ProviderFactory FACTORY = new + TarballProviderFactory(); + + private TarballProviderFactory() { + } + + private static class Client { + static final AbstractClientProvider PROVIDER = new TarballClientProvider(); + } + + private static class Server { + static final ProviderService PROVIDER = new TarballProviderService(); + } + + @Override + public AbstractClientProvider createClientProvider() { + return Client.PROVIDER; + } + + @Override + public ProviderService createServerProvider() { + return Server.PROVIDER; + } + + public static ProviderFactory getInstance() { + return FACTORY; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderService.java new file mode 100644 index 0000000..9f29c8b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/tarball/TarballProviderService.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.provider.tarball; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.provider.AbstractProviderService; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher; + +import java.io.IOException; + +public class TarballProviderService extends AbstractProviderService { + + @Override + public void processArtifact(AbstractLauncher launcher, + ComponentInstance instance, SliderFileSystem fileSystem, + Service service) + throws IOException { + Path artifact = new Path(instance.getCompSpec().getArtifact().getId()); + if (!fileSystem.isFile(artifact)) { + throw new IOException( + "Package doesn't exist as a resource: " + artifact.toString()); + } + log.info("Adding resource {}", artifact.toString()); + LocalResourceType type = LocalResourceType.ARCHIVE; + LocalResource packageResource = fileSystem.createAmResource(artifact, type); + launcher.addLocalResource(APP_LIB_DIR, packageResource); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/CustomRegistryConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/CustomRegistryConstants.java new file mode 100644 index 0000000..56634f6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/CustomRegistryConstants.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.registry; + +/** + * These are constants unique to the Slider AM + */ +public class CustomRegistryConstants { + + public static final String MANAGEMENT_REST_API = + "classpath:org.apache.slider.management"; + + public static final String REGISTRY_REST_API = + "classpath:org.apache.slider.registry"; + + public static final String PUBLISHER_REST_API = + "classpath:org.apache.slider.publisher"; + + public static final String PUBLISHER_CONFIGURATIONS_API = + "classpath:org.apache.slider.publisher.configurations"; + + public static final String PUBLISHER_EXPORTS_API = + "classpath:org.apache.slider.publisher.exports"; + + public static final String PUBLISHER_DOCUMENTS_API = + "classpath:org.apache.slider.publisher.documents"; + + public static final String AGENT_SECURE_REST_API = + "classpath:org.apache.slider.agents.secure"; + + public static final String AGENT_ONEWAY_REST_API = + "classpath:org.apache.slider.agents.oneway"; + + public static final String AM_IPC_PROTOCOL = + "classpath:org.apache.slider.appmaster.ipc"; + + public static final String AM_REST_BASE = + "classpath:org.apache.slider.client.rest"; + + public static final String WEB_UI = "http://"; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java new file mode 100644 index 0000000..d418b59 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java @@ -0,0 +1,249 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.registry; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.registry.client.api.BindFlags; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.registry.client.binding.RegistryPathUtils; + +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.join; + +/** + * Registry view for providers. This tracks where the service + * is registered, offers access to the record and other things. + */ +public class YarnRegistryViewForProviders { + private static final Logger LOG = + LoggerFactory.getLogger(YarnRegistryViewForProviders.class); + + private final RegistryOperations registryOperations; + private final String user; + private final String serviceClass; + private final String instanceName; + /** + * Record used where the service registered itself. + * Null until the service is registered + */ + private ServiceRecord selfRegistration; + + /** + * Path where record was registered. + * Null until the service is registered + */ + private String selfRegistrationPath; + + public YarnRegistryViewForProviders(RegistryOperations registryOperations, + String user, + String serviceClass, + String instanceName, + ApplicationAttemptId applicationAttemptId) { + Preconditions.checkArgument(registryOperations != null, + "null registry operations"); + Preconditions.checkArgument(user != null, "null user"); + Preconditions.checkArgument(SliderUtils.isSet(serviceClass), + "unset service class"); + Preconditions.checkArgument(SliderUtils.isSet(instanceName), + "instanceName"); + Preconditions.checkArgument(applicationAttemptId != null, + "null applicationAttemptId"); + this.registryOperations = registryOperations; + this.user = user; + this.serviceClass = serviceClass; + this.instanceName = instanceName; + } + + public String getUser() { + return user; + } + + + private void setSelfRegistration(ServiceRecord selfRegistration) { + this.selfRegistration = selfRegistration; + } + + /** + * Get the path to where the service has registered itself. + * Null until the service is registered + * @return the service registration path. + */ + public String getSelfRegistrationPath() { + return selfRegistrationPath; + } + + /** + * Get the absolute path to where the service has registered itself. + * This includes the base registry path + * Null until the service is registered + * @return the service registration path. + */ + public String getAbsoluteSelfRegistrationPath() { + if (selfRegistrationPath == null) { + return null; + } + String root = registryOperations.getConfig().getTrimmed( + RegistryConstants.KEY_REGISTRY_ZK_ROOT, + RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT); + return RegistryPathUtils.join(root, selfRegistrationPath); + } + + /** + * Add a component under the slider name/entry. + * @param componentName component name + * @param record record to put + * @throws IOException + */ + public void putComponent(String componentName, + ServiceRecord record) throws + IOException { + putComponent(serviceClass, instanceName, + componentName, + record); + } + + /** + * Add a component. + * @param serviceClass service class to use under ~user + * @param componentName component name + * @param record record to put + * @throws IOException + */ + public void putComponent(String serviceClass, + String serviceName, + String componentName, + ServiceRecord record) throws IOException { + String path = RegistryUtils.componentPath( + user, serviceClass, serviceName, componentName); + registryOperations.mknode(RegistryPathUtils.parentOf(path), true); + registryOperations.bind(path, record, BindFlags.OVERWRITE); + } + + /** + * Get a component. + * @param componentName component name + * @return the service record + * @throws IOException + */ + public ServiceRecord getComponent(String componentName) throws IOException { + String path = RegistryUtils.componentPath( + user, serviceClass, instanceName, componentName); + LOG.info("Resolving path {}", path); + return registryOperations.resolve(path); + } + + /** + * List components. + * @return a list of components + * @throws IOException + */ + public List listComponents() throws IOException { + String path = RegistryUtils.componentListPath( + user, serviceClass, instanceName); + return registryOperations.list(path); + } + + /** + * Add a service under a path, optionally purging any history. + * @param username user + * @param serviceClass service class to use under ~user + * @param serviceName name of the service + * @param record service record + * @param deleteTreeFirst perform recursive delete of the path first. + * @return the path the service was created at + * @throws IOException + */ + public String putService(String username, + String serviceClass, + String serviceName, + ServiceRecord record, + boolean deleteTreeFirst) throws IOException { + String path = RegistryUtils.servicePath( + username, serviceClass, serviceName); + if (deleteTreeFirst) { + registryOperations.delete(path, true); + } + registryOperations.mknode(RegistryPathUtils.parentOf(path), true); + registryOperations.bind(path, record, BindFlags.OVERWRITE); + return path; + } + + /** + * Add a service under a path for the current user. + * @param record service record + * @param deleteTreeFirst perform recursive delete of the path first + * @return the path the service was created at + * @throws IOException + */ + public String registerSelf( + ServiceRecord record, + boolean deleteTreeFirst) throws IOException { + selfRegistrationPath = + putService(user, serviceClass, instanceName, record, deleteTreeFirst); + setSelfRegistration(record); + return selfRegistrationPath; + } + + /** + * Delete a component. + * @param containerId component name + * @throws IOException + */ + public void deleteComponent(ComponentInstanceId instanceId, + String containerId) throws IOException { + String path = RegistryUtils.componentPath( + user, serviceClass, instanceName, + containerId); + LOG.info(instanceId + ": Deleting registry path " + path); + registryOperations.delete(path, false); + } + + /** + * Delete the children of a path -but not the path itself. + * It is not an error if the path does not exist + * @param path path to delete + * @param recursive flag to request recursive deletes + * @throws IOException IO problems + */ + public void deleteChildren(String path, boolean recursive) throws IOException { + List childNames = null; + try { + childNames = registryOperations.list(path); + } catch (PathNotFoundException e) { + return; + } + for (String childName : childNames) { + String child = join(path, childName); + registryOperations.delete(child, recursive); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java new file mode 100644 index 0000000..cf4e836 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceMetricsSink.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +import org.apache.commons.configuration2.SubsetConfiguration; +import org.apache.hadoop.metrics2.MetricsRecord; +import org.apache.hadoop.metrics2.MetricsSink; +import org.apache.hadoop.metrics2.MetricsTag; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Write the metrics to a ATSv2. Generally, this class is instantiated via + * hadoop-metrics2 property files. Specifically, you would create this class by + * adding the following to by This would actually be set as: + * [prefix].sink.[some instance name].class + * =org.apache.hadoop.yarn.service.timelineservice.ServiceMetricsSink + * , where prefix is "atsv2": and some instance name is + * just any unique name, so properties can be differentiated if there are + * multiple sinks of the same type created + */ +public class ServiceMetricsSink implements MetricsSink { + + private static final Logger log = + LoggerFactory.getLogger(ServiceMetricsSink.class); + + private ServiceTimelinePublisher serviceTimelinePublisher; + + public ServiceMetricsSink() { + + } + + public ServiceMetricsSink(ServiceTimelinePublisher publisher) { + serviceTimelinePublisher = publisher; + } + + /** + * Publishes service and component metrics to ATS. + */ + @Override + public void putMetrics(MetricsRecord record) { + if (serviceTimelinePublisher.isStopped()) { + log.warn("ServiceTimelinePublisher has stopped. " + + "Not publishing any more metrics to ATS."); + return; + } + + boolean isServiceMetrics = false; + boolean isComponentMetrics = false; + String appId = null; + for (MetricsTag tag : record.tags()) { + if (tag.name().equals("type") && tag.value().equals("service")) { + isServiceMetrics = true; + } else if (tag.name().equals("type") && tag.value().equals("component")) { + isComponentMetrics = true; + break; // if component metrics, no more information required from tag so + // break the loop + } else if (tag.name().equals("appId")) { + appId = tag.value(); + } + } + + if (isServiceMetrics && appId != null) { + if (log.isDebugEnabled()) { + log.debug("Publishing service metrics. " + record); + } + serviceTimelinePublisher.publishMetrics(record.metrics(), appId, + ServiceTimelineEntityType.SERVICE_ATTEMPT.toString(), + record.timestamp()); + } else if (isComponentMetrics) { + if (log.isDebugEnabled()) { + log.debug("Publishing Component metrics. " + record); + } + serviceTimelinePublisher.publishMetrics(record.metrics(), record.name(), + ServiceTimelineEntityType.COMPONENT.toString(), record.timestamp()); + } + } + + @Override + public void init(SubsetConfiguration conf) { + } + + @Override + public void flush() { + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEntityType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEntityType.java new file mode 100644 index 0000000..d5c9539 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEntityType.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +/** + * Slider entities that are published to ATS. + */ +public enum ServiceTimelineEntityType { + /** + * Used for publishing service entity information. + */ + SERVICE_ATTEMPT, + + /** + * Used for publishing component entity information. + */ + COMPONENT, + + /** + * Used for publishing component instance entity information. + */ + COMPONENT_INSTANCE +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEvent.java new file mode 100644 index 0000000..7f7f9a1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineEvent.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +/** + * Events that are used to store in ATS. + */ +public enum ServiceTimelineEvent { + SERVICE_ATTEMPT_REGISTERED, + + SERVICE_ATTEMPT_UNREGISTERED, + + COMPONENT_INSTANCE_REGISTERED, + + COMPONENT_INSTANCE_UNREGISTERED, + + COMPONENT_INSTANCE_UPDATED +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineMetricsConstants.java new file mode 100644 index 0000000..78a7171 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelineMetricsConstants.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +/** + * Constants which are stored as key in ATS + */ +public final class ServiceTimelineMetricsConstants { + + public static final String URI = "URI"; + + public static final String NAME = "NAME"; + + public static final String STATE = "STATE"; + + public static final String EXIT_STATUS_CODE = "EXIT_STATUS_CODE"; + + public static final String EXIT_REASON = "EXIT_REASON"; + + public static final String DIAGNOSTICS_INFO = "DIAGNOSTICS_INFO"; + + public static final String LAUNCH_TIME = "LAUNCH_TIME"; + + public static final String QUICK_LINKS = "QUICK_LINKS"; + + public static final String LAUNCH_COMMAND = "LAUNCH_COMMAND"; + + public static final String TOTAL_CONTAINERS = "NUMBER_OF_CONTAINERS"; + + public static final String RUNNING_CONTAINERS = + "NUMBER_OF_RUNNING_CONTAINERS"; + + /** + * Artifacts constants. + */ + public static final String ARTIFACT_ID = "ARTIFACT_ID"; + + public static final String ARTIFACT_TYPE = "ARTIFACT_TYPE"; + + public static final String ARTIFACT_URI = "ARTIFACT_URI"; + + /** + * Resource constants. + */ + public static final String RESOURCE_CPU = "RESOURCE_CPU"; + + public static final String RESOURCE_MEMORY = "RESOURCE_MEMORY"; + + public static final String RESOURCE_PROFILE = "RESOURCE_PROFILE"; + + /** + * component instance constants. + */ + public static final String IP = "IP"; + + public static final String HOSTNAME = "HOSTNAME"; + + public static final String BARE_HOST = "BARE_HOST"; + + public static final String COMPONENT_NAME = "COMPONENT_NAME"; + + public static final String COMPONENT_INSTANCE_NAME = "COMPONENT_INSTANCE_NAME"; + + /** + * component constants. + */ + public static final String DEPENDENCIES = "DEPENDENCIES"; + + public static final String DESCRIPTION = "DESCRIPTION"; + + public static final String RUN_PRIVILEGED_CONTAINER = + "RUN_PRIVILEGED_CONTAINER"; + + public static final String PLACEMENT_POLICY = "PLACEMENT_POLICY"; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelinePublisher.java new file mode 100644 index 0000000..cfe7c1b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/ServiceTimelinePublisher.java @@ -0,0 +1,375 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +import org.apache.hadoop.metrics2.AbstractMetric; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric; +import org.apache.hadoop.yarn.client.api.TimelineV2Client; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.api.records.Configuration; +import org.apache.hadoop.yarn.service.api.records.Container; +import org.apache.hadoop.yarn.service.api.records.ServiceState; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.util.timeline.TimelineUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.timelineservice.ServiceTimelineMetricsConstants.DIAGNOSTICS_INFO; + +/** + * A single service that publishes all the Timeline Entities. + */ +public class ServiceTimelinePublisher extends CompositeService { + + // Number of bytes of config which can be published in one shot to ATSv2. + public static final int ATS_CONFIG_PUBLISH_SIZE_BYTES = 10 * 1024; + + private TimelineV2Client timelineClient; + + private volatile boolean stopped = false; + + private static final Logger log = + LoggerFactory.getLogger(ServiceTimelinePublisher.class); + + @Override + protected void serviceInit(org.apache.hadoop.conf.Configuration configuration) + throws Exception { + addService(timelineClient); + super.serviceInit(configuration); + } + + + @Override + protected void serviceStop() throws Exception { + stopped = true; + super.serviceStop(); + } + + public boolean isStopped() { + return stopped; + } + + public ServiceTimelinePublisher(TimelineV2Client client) { + super(ServiceTimelinePublisher.class.getName()); + timelineClient = client; + } + + public void serviceAttemptRegistered(Service service, + org.apache.hadoop.conf.Configuration systemConf) { + long currentTimeMillis = service.getLaunchTime() == null + ? System.currentTimeMillis() : service.getLaunchTime().getTime(); + + TimelineEntity entity = createServiceAttemptEntity(service.getId()); + entity.setCreatedTime(currentTimeMillis); + + // create info keys + Map entityInfos = new HashMap(); + entityInfos.put(ServiceTimelineMetricsConstants.NAME, service.getName()); + entityInfos.put(ServiceTimelineMetricsConstants.STATE, + ServiceState.STARTED.toString()); + entityInfos.put(ServiceTimelineMetricsConstants.LAUNCH_TIME, + currentTimeMillis); + entity.addInfo(ServiceTimelineMetricsConstants.QUICK_LINKS, + service.getQuicklinks()); + entity.addInfo(entityInfos); + + // add an event + TimelineEvent startEvent = new TimelineEvent(); + startEvent.setId(ServiceTimelineEvent.SERVICE_ATTEMPT_REGISTERED.toString()); + startEvent.setTimestamp(currentTimeMillis); + entity.addEvent(startEvent); + + // publish before configurations published + putEntity(entity); + + // publish system config - YarnConfiguration + populateTimelineEntity(systemConf.iterator(), service.getId(), + ServiceTimelineEntityType.SERVICE_ATTEMPT.toString()); + // publish user conf + publishUserConf(service.getConfiguration(), service.getId(), + ServiceTimelineEntityType.SERVICE_ATTEMPT.toString()); + + // publish component as separate entity. + publishComponents(service.getComponents()); + } + + public void serviceAttemptUpdated(Service service) { + TimelineEntity entity = createServiceAttemptEntity(service.getId()); + entity.addInfo(ServiceTimelineMetricsConstants.QUICK_LINKS, + service.getQuicklinks()); + putEntity(entity); + } + + public void serviceAttemptUnregistered(ServiceContext context, + String diagnostics) { + TimelineEntity entity = createServiceAttemptEntity( + context.attemptId.getApplicationId().toString()); + Map entityInfos = new HashMap(); + entityInfos.put(ServiceTimelineMetricsConstants.STATE, + FinalApplicationStatus.ENDED); + entityInfos.put(DIAGNOSTICS_INFO, diagnostics); + entity.addInfo(entityInfos); + + // add an event + TimelineEvent finishEvent = new TimelineEvent(); + finishEvent + .setId(ServiceTimelineEvent.SERVICE_ATTEMPT_UNREGISTERED.toString()); + finishEvent.setTimestamp(System.currentTimeMillis()); + entity.addEvent(finishEvent); + + putEntity(entity); + } + + public void componentInstanceStarted(Container container, + ComponentInstance instance) { + + TimelineEntity entity = createComponentInstanceEntity(container.getId()); + entity.setCreatedTime(container.getLaunchTime().getTime()); + + // create info keys + Map entityInfos = new HashMap(); + entityInfos.put(ServiceTimelineMetricsConstants.BARE_HOST, + container.getBareHost()); + entityInfos.put(ServiceTimelineMetricsConstants.STATE, + container.getState().toString()); + entityInfos.put(ServiceTimelineMetricsConstants.LAUNCH_TIME, + container.getLaunchTime().getTime()); + entityInfos.put(ServiceTimelineMetricsConstants.COMPONENT_NAME, + instance.getCompName()); + entityInfos.put(ServiceTimelineMetricsConstants.COMPONENT_INSTANCE_NAME, + instance.getCompInstanceName()); + entity.addInfo(entityInfos); + + // add an event + TimelineEvent startEvent = new TimelineEvent(); + startEvent + .setId(ServiceTimelineEvent.COMPONENT_INSTANCE_REGISTERED.toString()); + startEvent.setTimestamp(container.getLaunchTime().getTime()); + entity.addEvent(startEvent); + + putEntity(entity); + } + + public void componentInstanceFinished(ComponentInstance instance, + int exitCode, ContainerState state, String diagnostics) { + TimelineEntity entity = createComponentInstanceEntity( + instance.getContainer().getId().toString()); + + // create info keys + Map entityInfos = new HashMap(); + entityInfos.put(ServiceTimelineMetricsConstants.EXIT_STATUS_CODE, + exitCode); + entityInfos.put(DIAGNOSTICS_INFO, diagnostics); + entityInfos.put(ServiceTimelineMetricsConstants.STATE, state); + entity.addInfo(entityInfos); + + // add an event + TimelineEvent startEvent = new TimelineEvent(); + startEvent + .setId(ServiceTimelineEvent.COMPONENT_INSTANCE_UNREGISTERED.toString()); + startEvent.setTimestamp(System.currentTimeMillis()); + entity.addEvent(startEvent); + + putEntity(entity); + } + + public void componentInstanceUpdated(Container container) { + TimelineEntity entity = createComponentInstanceEntity(container.getId()); + + // create info keys + Map entityInfos = new HashMap(); + entityInfos.put(ServiceTimelineMetricsConstants.IP, container.getIp()); + entityInfos.put(ServiceTimelineMetricsConstants.HOSTNAME, + container.getHostname()); + entityInfos.put(ServiceTimelineMetricsConstants.STATE, + container.getState().toString()); + entity.addInfo(entityInfos); + + TimelineEvent updateEvent = new TimelineEvent(); + updateEvent + .setId(ServiceTimelineEvent.COMPONENT_INSTANCE_UPDATED.toString()); + updateEvent.setTimestamp(System.currentTimeMillis()); + entity.addEvent(updateEvent); + + putEntity(entity); + } + + private void publishComponents(List components) { + long currentTimeMillis = System.currentTimeMillis(); + for (Component component : components) { + TimelineEntity entity = createComponentEntity(component.getName()); + entity.setCreatedTime(currentTimeMillis); + + // create info keys + Map entityInfos = new HashMap(); + if (component.getArtifact() != null) { + entityInfos.put(ServiceTimelineMetricsConstants.ARTIFACT_ID, + component.getArtifact().getId()); + entityInfos.put(ServiceTimelineMetricsConstants.ARTIFACT_TYPE, + component.getArtifact().getType().toString()); + } + + if (component.getResource() != null) { + entityInfos.put(ServiceTimelineMetricsConstants.RESOURCE_CPU, + component.getResource().getCpus()); + entityInfos.put(ServiceTimelineMetricsConstants.RESOURCE_MEMORY, + component.getResource().getMemory()); + if (component.getResource().getProfile() != null) { + entityInfos.put(ServiceTimelineMetricsConstants.RESOURCE_PROFILE, + component.getResource().getProfile()); + } + } + + if (component.getLaunchCommand() != null) { + entityInfos.put(ServiceTimelineMetricsConstants.LAUNCH_COMMAND, + component.getLaunchCommand()); + } + entityInfos.put(ServiceTimelineMetricsConstants.RUN_PRIVILEGED_CONTAINER, + component.getRunPrivilegedContainer().toString()); + if (component.getPlacementPolicy() != null) { + entityInfos.put(ServiceTimelineMetricsConstants.PLACEMENT_POLICY, + component.getPlacementPolicy().getLabel()); + } + entity.addInfo(entityInfos); + + putEntity(entity); + + // publish component specific configurations + publishUserConf(component.getConfiguration(), component.getName(), + ServiceTimelineEntityType.COMPONENT.toString()); + } + } + + private void publishUserConf(Configuration configuration, + String entityId, String entityType) { + populateTimelineEntity(configuration.getProperties().entrySet().iterator(), + entityId, entityType); + + populateTimelineEntity(configuration.getEnv().entrySet().iterator(), + entityId, entityType); + + for (ConfigFile configFile : configuration.getFiles()) { + populateTimelineEntity(configFile.getProps().entrySet().iterator(), + entityId, entityType); + } + } + + private void populateTimelineEntity(Iterator> iterator, + String entityId, String entityType) { + int configSize = 0; + TimelineEntity entity = createTimelineEntity(entityId, entityType); + while (iterator.hasNext()) { + Entry entry = iterator.next(); + int size = entry.getKey().length() + entry.getValue().length(); + configSize += size; + // Configs are split into multiple entities if they exceed 100kb in size. + if (configSize > ATS_CONFIG_PUBLISH_SIZE_BYTES) { + if (entity.getConfigs().size() > 0) { + putEntity(entity); + entity = createTimelineEntity(entityId, entityType); + } + configSize = size; + } + entity.addConfig(entry.getKey(), entry.getValue()); + } + if (configSize > 0) { + putEntity(entity); + } + } + + /** + * Called from ServiceMetricsSink at regular interval of time. + * @param metrics of service or components + * @param entityId Id of entity + * @param entityType Type of entity + * @param timestamp + */ + public void publishMetrics(Iterable metrics, String entityId, + String entityType, long timestamp) { + TimelineEntity entity = createTimelineEntity(entityId, entityType); + Set entityMetrics = new HashSet(); + for (AbstractMetric metric : metrics) { + TimelineMetric timelineMetric = new TimelineMetric(); + timelineMetric.setId(metric.name()); + timelineMetric.addValue(timestamp, metric.value()); + entityMetrics.add(timelineMetric); + } + entity.setMetrics(entityMetrics); + putEntity(entity); + } + + private TimelineEntity createServiceAttemptEntity(String serviceId) { + TimelineEntity entity = createTimelineEntity(serviceId, + ServiceTimelineEntityType.SERVICE_ATTEMPT.toString()); + return entity; + } + + private TimelineEntity createComponentInstanceEntity(String instanceId) { + TimelineEntity entity = createTimelineEntity(instanceId, + ServiceTimelineEntityType.COMPONENT_INSTANCE.toString()); + return entity; + } + + private TimelineEntity createComponentEntity(String componentId) { + TimelineEntity entity = createTimelineEntity(componentId, + ServiceTimelineEntityType.COMPONENT.toString()); + return entity; + } + + private TimelineEntity createTimelineEntity(String entityId, + String entityType) { + TimelineEntity entity = new TimelineEntity(); + entity.setId(entityId); + entity.setType(entityType); + return entity; + } + + private void putEntity(TimelineEntity entity) { + try { + if (log.isDebugEnabled()) { + log.debug("Publishing the entity " + entity + ", JSON-style content: " + + TimelineUtils.dumpTimelineRecordtoJSON(entity)); + } + if (timelineClient != null) { + timelineClient.putEntitiesAsync(entity); + } else { + log.error("Seems like client has been removed before the entity " + + "could be published for " + entity); + } + } catch (Exception e) { + log.error("Error when publishing entity " + entity, e); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/package-info.java new file mode 100644 index 0000000..72f7842 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/timelineservice/package-info.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * ATS implementation + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +package org.apache.hadoop.yarn.service.timelineservice; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java new file mode 100644 index 0000000..2607c08 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ApplicationReportSerDeser.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.codehaus.jackson.JsonGenerationException; +import org.codehaus.jackson.JsonParseException; +import org.codehaus.jackson.map.JsonMappingException; + +import java.io.IOException; + +/** + * Persistence of {@link SerializedApplicationReport} + * + */ +public class ApplicationReportSerDeser + extends JsonSerDeser { + public ApplicationReportSerDeser() { + super(SerializedApplicationReport.class); + } + + + private static final ApplicationReportSerDeser + staticinstance = new ApplicationReportSerDeser(); + + /** + * Convert an instance to a JSON string -sync access to a shared ser/deser + * object instance + * @param instance object to convert + * @return a JSON string description + * @throws JsonParseException parse problems + * @throws JsonMappingException O/J mapping problems + */ + public static String toString(SerializedApplicationReport instance) + throws IOException, JsonGenerationException, JsonMappingException { + synchronized (staticinstance) { + return staticinstance.toJson(instance); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java new file mode 100644 index 0000000..86896b2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java @@ -0,0 +1,201 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.registry.client.binding.RegistryTypeUtils; +import org.apache.hadoop.registry.client.exceptions.InvalidRecordException; +import org.apache.hadoop.registry.client.impl.zk.RegistryInternalConstants; +import org.apache.hadoop.registry.client.types.Endpoint; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.encodeForRegistry; +import static org.apache.hadoop.registry.client.binding.RegistryUtils.convertUsername; +import static org.apache.hadoop.registry.client.binding.RegistryUtils.getCurrentUsernameUnencoded; +import static org.apache.hadoop.registry.client.binding.RegistryUtils.servicePath; + +/** + * Generic code to get the URLs for clients via the registry + */ +public class ClientRegistryBinder { + private static final Logger log = + LoggerFactory.getLogger(ClientRegistryBinder.class); + + private final RegistryOperations operations; + + public ClientRegistryBinder(RegistryOperations operations) { + this.operations = operations; + } + + /** + * Buld the user path -switches to the system path if the user is "". + * It also cross-converts the username to ascii via punycode + * @param username username or "" + * @return the path to the user + */ + public static String homePathForUser(String username) { + Preconditions.checkArgument(username != null, "null user"); + + // catch recursion + if (username.startsWith(RegistryConstants.PATH_USERS)) { + return username; + } + + if (username.isEmpty()) { + return RegistryConstants.PATH_SYSTEM_SERVICES; + } + + // convert username to registry name + String convertedName = convertUsername(username); + + return RegistryPathUtils.join(RegistryConstants.PATH_USERS, + encodeForRegistry(convertedName)); + } + + /** + * Get the current username, before any encoding has been applied. + * @return the current user from the kerberos identity, falling back + * to the user and/or env variables. + */ + public static String currentUsernameUnencoded() { + String env_hadoop_username = System.getenv( + RegistryInternalConstants.HADOOP_USER_NAME); + return getCurrentUsernameUnencoded(env_hadoop_username); + } + + /** + * Qualify a user. + *

    + *
  1. "~" maps to user home path home
  2. + *
  3. "~user" maps to /users/$user
  4. + *
  5. "/" maps to /services/
  6. + *
+ * @param user the username + * @return the base path + */ + public static String qualifyUser(String user) { + // qualify the user + String t = user.trim(); + if (t.startsWith("/")) { + // already resolved + return t; + } else if (t.equals("~")) { + // self + return currentUsernameUnencoded(); + } else if (t.startsWith("~")) { + // another user + // convert username to registry name + String convertedName = convertUsername(t.substring(1)); + + return RegistryPathUtils.join(RegistryConstants.PATH_USERS, + encodeForRegistry(convertedName)); + } else { + return "/" + t; + } + } + + /** + * Look up an external REST API + * @param user user which will be qualified as per {@link #qualifyUser(String)} + * @param serviceClass service class + * @param instance instance name + * @param api API + * @return the API, or an exception is raised. + * @throws IOException + */ + public String lookupExternalRestAPI(String user, + String serviceClass, + String instance, + String api) + throws IOException { + String qualified = qualifyUser(user); + String path = servicePath(qualified, serviceClass, instance); + String restAPI = resolveExternalRestAPI(api, path); + if (restAPI == null) { + throw new PathNotFoundException(path + " API " + api); + } + return restAPI; + } + + /** + * Resolve a service record then return an external REST API exported it. + * + * @param api API to resolve + * @param path path of the service record + * @return null if the record exists but the API is absent or it has no + * REST endpoints. + * @throws IOException resolution problems, as covered in + * {@link RegistryOperations#resolve(String)} + */ + protected String resolveExternalRestAPI(String api, String path) throws + IOException { + ServiceRecord record = operations.resolve(path); + return lookupRestAPI(record, api, true); + } + + /** + * Look up an external REST API endpoint + * @param record service record + * @param api URI of api + * @param external flag to indicate this is an external record + * @return the first endpoint of the implementation, or null if there + * is no entry for the API, implementation or it's the wrong type. + */ + public static String lookupRestAPI(ServiceRecord record, + String api, boolean external) throws InvalidRecordException { + try { + String url = null; + Endpoint endpoint = getEndpoint(record, api, external); + List addresses = + RegistryTypeUtils.retrieveAddressesUriType(endpoint); + if (addresses != null && !addresses.isEmpty()) { + url = addresses.get(0); + } + return url; + } catch (InvalidRecordException e) { + log.debug("looking for API {}", api, e); + return null; + } + } + + /** + * Get an endpont by API + * @param record service record + * @param api API + * @param external flag to indicate this is an external record + * @return the endpoint or null + */ + public static Endpoint getEndpoint(ServiceRecord record, + String api, + boolean external) { + return external ? record.getExternalEndpoint(api) + : record.getInternalEndpoint(api); + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Comparators.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Comparators.java new file mode 100644 index 0000000..9f0e5d4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Comparators.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import java.io.Serializable; +import java.util.Comparator; + +/** + * Some general comparators + */ +public class Comparators { + + public static class LongComparator implements Comparator, Serializable { + @Override + public int compare(Long o1, Long o2) { + return o1.compareTo(o2); + } + } + + public static class InvertedLongComparator + implements Comparator, Serializable { + @Override + public int compare(Long o1, Long o2) { + return o2.compareTo(o1); + } + } + + /** + * Little template class to reverse any comparitor + * @param the type that is being compared + */ + public static class ComparatorReverser implements Comparator, + Serializable { + + final Comparator instance; + + public ComparatorReverser(Comparator instance) { + this.instance = instance; + } + + @Override + public int compare(CompareType first, CompareType second) { + return instance.compare(second, first); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java new file mode 100644 index 0000000..fe8cce8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java @@ -0,0 +1,157 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.io.StringWriter; +import java.net.URL; +import java.util.Map; + +/** + * Methods to aid in config, both in the Configuration class and + * with other parts of setting up Slider-initated processes. + * + * Some of the methods take an argument of a map iterable for their sources; this allows + * the same method + */ +public class ConfigHelper { + private static final Logger log = LoggerFactory.getLogger(ConfigHelper.class); + + /** + * Set an entire map full of values + * + * @param config config to patch + * @param map map of data + * @param origin origin data + */ + public static void addConfigMap(Configuration config, + Map map, + String origin) throws BadConfigException { + addConfigMap(config, map.entrySet(), origin); + } + + /** + * Set an entire map full of values + * + * @param config config to patch + * @param map map of data + * @param origin origin data + */ + public static void addConfigMap(Configuration config, + Iterable> map, + String origin) throws BadConfigException { + for (Map.Entry mapEntry : map) { + String key = mapEntry.getKey(); + String value = mapEntry.getValue(); + if (value == null) { + throw new BadConfigException("Null value for property " + key); + } + config.set(key, value, origin); + } + } + + /** + * Convert to an XML string + * @param conf configuration + * @return conf + * @throws IOException + */ + public static String toXml(Configuration conf) throws IOException { + StringWriter writer = new StringWriter(); + conf.writeXml(writer); + return writer.toString(); + } + + + /** + * Register a resource as a default resource. + * Do not attempt to use this unless you understand that the + * order in which default resources are loaded affects the outcome, + * and that subclasses of Configuration often register new default + * resources + * @param resource the resource name + * @return the URL or null + */ + public static URL registerDefaultResource(String resource) { + URL resURL = getResourceUrl(resource); + if (resURL != null) { + Configuration.addDefaultResource(resource); + } + return resURL; + } + + /** + * Load a configuration from a resource on this classpath. + * If the resource is not found, an empty configuration is returned + * @param resource the resource name + * @return the loaded configuration. + */ + public static Configuration loadFromResource(String resource) { + Configuration conf = new Configuration(false); + URL resURL = getResourceUrl(resource); + if (resURL != null) { + log.debug("loaded resources from {}", resURL); + conf.addResource(resource); + } else{ + log.debug("failed to find {} on the classpath", resource); + } + return conf; + + } + + /** + * Get the URL to a resource, null if not on the CP + * @param resource resource to look for + * @return the URL or null + */ + public static URL getResourceUrl(String resource) { + return ConfigHelper.class.getClassLoader() + .getResource(resource); + } + + /** + * This goes through the keyset of one configuration and retrieves each value + * from a value source -a different or the same configuration. This triggers + * the property resolution process of the value, resolving any variables against + * in-config or inherited configurations + * @param keysource source of keys + * @param valuesource the source of values + * @return a new configuration where foreach key in keysource, get(key)==valuesource.get(key) + */ + public static Configuration resolveConfiguration( + Iterable> keysource, + Configuration valuesource) { + Configuration result = new Configuration(false); + for (Map.Entry entry : keysource) { + String key = entry.getKey(); + String value = valuesource.get(key); + Preconditions.checkState(value != null, + "no reference for \"%s\" in values", key); + result.set(key, value); + } + return result; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigUtils.java new file mode 100644 index 0000000..a969be9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigUtils.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.service.api.records.ConfigFormat; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class ConfigUtils { + public static final String TEMPLATE_FILE = "template.file"; + + public static String replaceProps(Map config, String content) { + Map tokens = new HashMap<>(); + for (Entry entry : config.entrySet()) { + tokens.put("${" + entry.getKey() + "}", entry.getValue()); + tokens.put("{{" + entry.getKey() + "}}", entry.getValue()); + } + String value = content; + for (Map.Entry token : tokens.entrySet()) { + value = value.replaceAll(Pattern.quote(token.getKey()), + Matcher.quoteReplacement(token.getValue())); + } + return value; + } + + public static Map replacePropsInConfig( + Map config, Map env) { + Map tokens = new HashMap<>(); + for (Entry entry : env.entrySet()) { + tokens.put("${" + entry.getKey() + "}", entry.getValue()); + } + Map newConfig = new HashMap<>(); + for (Entry entry : config.entrySet()) { + String value = entry.getValue(); + for (Map.Entry token : tokens.entrySet()) { + value = value.replaceAll(Pattern.quote(token.getKey()), + Matcher.quoteReplacement(token.getValue())); + } + newConfig.put(entry.getKey(), entry.getValue()); + } + return newConfig; + } + + public static void prepConfigForTemplateOutputter(ConfigFormat configFormat, + Map config, SliderFileSystem fileSystem, + String clusterName, String fileName) throws IOException { + if (!configFormat.equals(ConfigFormat.TEMPLATE)) { + return; + } + Path templateFile = null; + if (config.containsKey(TEMPLATE_FILE)) { + templateFile = fileSystem.buildResourcePath(config.get(TEMPLATE_FILE)); + if (!fileSystem.isFile(templateFile)) { + templateFile = fileSystem.buildResourcePath(clusterName, + config.get(TEMPLATE_FILE)); + } + if (!fileSystem.isFile(templateFile)) { + throw new IOException("config specified template file " + config + .get(TEMPLATE_FILE) + " but " + templateFile + " doesn't exist"); + } + } + if (templateFile == null && fileName != null) { + templateFile = fileSystem.buildResourcePath(fileName); + if (!fileSystem.isFile(templateFile)) { + templateFile = fileSystem.buildResourcePath(clusterName, + fileName); + } + } + if (fileSystem.isFile(templateFile)) { + config.put("content", fileSystem.cat(templateFile)); + } else { + config.put("content", ""); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java new file mode 100644 index 0000000..281e1dfe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java @@ -0,0 +1,521 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.api.records.URL; +import org.apache.hadoop.yarn.service.conf.SliderExitCodes; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; +import org.apache.hadoop.yarn.service.exceptions.ErrorStrings; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.util.Records; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class CoreFileSystem { + private static final Logger + log = LoggerFactory.getLogger(CoreFileSystem.class); + + private static final String UTF_8 = "UTF-8"; + + protected final FileSystem fileSystem; + protected final Configuration configuration; + + public CoreFileSystem(FileSystem fileSystem, Configuration configuration) { + Preconditions.checkNotNull(fileSystem, + "Cannot create a CoreFileSystem with a null FileSystem"); + Preconditions.checkNotNull(configuration, + "Cannot create a CoreFileSystem with a null Configuration"); + this.fileSystem = fileSystem; + this.configuration = configuration; + } + + public CoreFileSystem(Configuration configuration) throws IOException { + Preconditions.checkNotNull(configuration, + "Cannot create a CoreFileSystem with a null Configuration"); + this.fileSystem = FileSystem.get(configuration); + this.configuration = configuration; + } + + /** + * Get the temp path for this cluster + * @param clustername name of the cluster + * @return path for temp files (is not purged) + */ + public Path getTempPathForCluster(String clustername) { + Path clusterDir = buildClusterDirPath(clustername); + return new Path(clusterDir, YarnServiceConstants.TMP_DIR_PREFIX); + } + + /** + * Returns the underlying FileSystem for this object. + * + * @return filesystem + */ + public FileSystem getFileSystem() { + return fileSystem; + } + + @Override + public String toString() { + final StringBuilder sb = + new StringBuilder("CoreFileSystem{"); + sb.append("fileSystem=").append(fileSystem.getUri()); + sb.append('}'); + return sb.toString(); + } + + /** + * Build up the path string for a cluster instance -no attempt to + * create the directory is made + * + * @param clustername name of the cluster + * @return the path for persistent data + */ + public Path buildClusterDirPath(String clustername) { + Preconditions.checkNotNull(clustername); + Path path = getBaseApplicationPath(); + return new Path(path, YarnServiceConstants.SERVICES_DIRECTORY + "/" + clustername); + } + + + /** + * Build up the path string for keytab install location -no attempt to + * create the directory is made + * + * @return the path for keytab + */ + public Path buildKeytabInstallationDirPath(String keytabFolder) { + Preconditions.checkNotNull(keytabFolder); + Path path = getBaseApplicationPath(); + return new Path(path, YarnServiceConstants.KEYTAB_DIR + "/" + keytabFolder); + } + + /** + * Build up the path string for keytab install location -no attempt to + * create the directory is made + * + * @return the path for keytab installation location + */ + public Path buildKeytabPath(String keytabDir, String keytabName, String clusterName) { + Path homePath = getHomeDirectory(); + Path baseKeytabDir; + if (keytabDir != null) { + baseKeytabDir = new Path(homePath, keytabDir); + } else { + baseKeytabDir = new Path(buildClusterDirPath(clusterName), + YarnServiceConstants.KEYTAB_DIR); + } + return keytabName == null ? baseKeytabDir : + new Path(baseKeytabDir, keytabName); + } + + /** + * Build up the path string for resource install location -no attempt to + * create the directory is made + * + * @return the path for resource + */ + public Path buildResourcePath(String resourceFolder) { + Preconditions.checkNotNull(resourceFolder); + Path path = getBaseApplicationPath(); + return new Path(path, YarnServiceConstants.RESOURCE_DIR + "/" + resourceFolder); + } + + /** + * Build up the path string for resource install location -no attempt to + * create the directory is made + * + * @return the path for resource + */ + public Path buildResourcePath(String dirName, String fileName) { + Preconditions.checkNotNull(dirName); + Preconditions.checkNotNull(fileName); + Path path = getBaseApplicationPath(); + return new Path(path, YarnServiceConstants.RESOURCE_DIR + "/" + dirName + "/" + fileName); + } + + /** + * Create a directory with the given permissions. + * + * @param dir directory + * @param clusterPerms cluster permissions + * @throws IOException IO problem + * @throws BadClusterStateException any cluster state problem + */ + @SuppressWarnings("deprecation") + public void createWithPermissions(Path dir, FsPermission clusterPerms) throws + IOException, + BadClusterStateException { + if (fileSystem.isFile(dir)) { + // HADOOP-9361 shows some filesystems don't correctly fail here + throw new BadClusterStateException( + "Cannot create a directory over a file %s", dir); + } + log.debug("mkdir {} with perms {}", dir, clusterPerms); + //no mask whatoever + fileSystem.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "000"); + fileSystem.mkdirs(dir, clusterPerms); + //and force set it anyway just to make sure + fileSystem.setPermission(dir, clusterPerms); + } + + /** + * Verify that the cluster directory is not present + * + * @param clustername name of the cluster + * @param clusterDirectory actual directory to look for + * @throws IOException trouble with FS + * @throws SliderException If the directory exists + */ + public void verifyClusterDirectoryNonexistent(String clustername, + Path clusterDirectory) + throws IOException, SliderException { + if (fileSystem.exists(clusterDirectory)) { + throw new SliderException(SliderExitCodes.EXIT_INSTANCE_EXISTS, + ErrorStrings.PRINTF_E_INSTANCE_ALREADY_EXISTS, clustername, + clusterDirectory); + } + } + /** + * Verify that the given directory is not present + * + * @param clusterDirectory actual directory to look for + * @throws IOException trouble with FS + * @throws SliderException If the directory exists + */ + public void verifyDirectoryNonexistent(Path clusterDirectory) throws + IOException, + SliderException { + if (fileSystem.exists(clusterDirectory)) { + + log.error("Dir {} exists: {}", + clusterDirectory, + listFSDir(clusterDirectory)); + throw new SliderException(SliderExitCodes.EXIT_INSTANCE_EXISTS, + ErrorStrings.PRINTF_E_INSTANCE_DIR_ALREADY_EXISTS, + clusterDirectory); + } + } + + /** + * Verify that a user has write access to a directory. + * It does this by creating then deleting a temp file + * + * @param dirPath actual directory to look for + * @throws FileNotFoundException file not found + * @throws IOException trouble with FS + * @throws BadClusterStateException if the directory is not writeable + */ + public void verifyDirectoryWriteAccess(Path dirPath) throws IOException, + SliderException { + verifyPathExists(dirPath); + Path tempFile = new Path(dirPath, "tmp-file-for-checks"); + try { + FSDataOutputStream out ; + out = fileSystem.create(tempFile, true); + IOUtils.closeStream(out); + fileSystem.delete(tempFile, false); + } catch (IOException e) { + log.warn("Failed to create file {}: {}", tempFile, e); + throw new BadClusterStateException(e, + "Unable to write to directory %s : %s", dirPath, e.toString()); + } + } + + /** + * Verify that a path exists + * @param path path to check + * @throws FileNotFoundException file not found + * @throws IOException trouble with FS + */ + public void verifyPathExists(Path path) throws IOException { + if (!fileSystem.exists(path)) { + throw new FileNotFoundException(path.toString()); + } + } + + /** + * Verify that a path exists + * @param path path to check + * @throws FileNotFoundException file not found or is not a file + * @throws IOException trouble with FS + */ + public void verifyFileExists(Path path) throws IOException { + FileStatus status = fileSystem.getFileStatus(path); + + if (!status.isFile()) { + throw new FileNotFoundException("Not a file: " + path.toString()); + } + } + + /** + * Given a path, check if it exists and is a file + * + * @param path + * absolute path to the file to check + * @return true if and only if path exists and is a file, false for all other + * reasons including if file check throws IOException + */ + public boolean isFile(Path path) { + boolean isFile = false; + try { + FileStatus status = fileSystem.getFileStatus(path); + if (status.isFile()) { + isFile = true; + } + } catch (IOException e) { + // ignore, isFile is already set to false + } + return isFile; + } + + /** + * Get the base path + * + * @return the base path optionally configured by + * {@link YarnServiceConf#YARN_SERVICE_BASE_PATH} + */ + public Path getBaseApplicationPath() { + String configuredBasePath = configuration + .get(YarnServiceConf.YARN_SERVICE_BASE_PATH, + getHomeDirectory() + "/" + YarnServiceConstants.SERVICE_BASE_DIRECTORY); + return new Path(configuredBasePath); + } + + /** + * Get slider dependency parent dir in HDFS + * + * @return the parent dir path of slider.tar.gz in HDFS + */ + public Path getDependencyPath() { + String parentDir = YarnServiceConstants.DEPENDENCY_DIR; + return new Path(String.format(parentDir, VersionInfo.getVersion())); + } + + /** + * Get slider.tar.gz absolute filepath in HDFS + * + * @return the absolute path to slider.tar.gz in HDFS + */ + public Path getDependencyTarGzip() { + Path dependencyLibAmPath = getDependencyPath(); + Path dependencyLibTarGzip = new Path( + dependencyLibAmPath.toUri().toString(), + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_NAME + + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_EXT); + return dependencyLibTarGzip; + } + + public Path getHomeDirectory() { + return fileSystem.getHomeDirectory(); + } + + /** + * Create an AM resource from the + * + * @param destPath dest path in filesystem + * @param resourceType resource type + * @return the local resource for AM + */ + public LocalResource createAmResource(Path destPath, LocalResourceType resourceType) throws IOException { + FileStatus destStatus = fileSystem.getFileStatus(destPath); + LocalResource amResource = Records.newRecord(LocalResource.class); + amResource.setType(resourceType); + // Set visibility of the resource + // Setting to most private option + amResource.setVisibility(LocalResourceVisibility.APPLICATION); + // Set the resource to be copied over + amResource.setResource( + URL.fromPath(fileSystem.resolvePath(destStatus.getPath()))); + // Set timestamp and length of file so that the framework + // can do basic sanity checks for the local resource + // after it has been copied over to ensure it is the same + // resource the client intended to use with the service + amResource.setTimestamp(destStatus.getModificationTime()); + amResource.setSize(destStatus.getLen()); + return amResource; + } + + /** + * Register all files under a fs path as a directory to push out + * + * @param srcDir src dir + * @param destRelativeDir dest dir (no trailing /) + * @return the map of entries + */ + public Map submitDirectory(Path srcDir, String destRelativeDir) throws IOException { + //now register each of the files in the directory to be + //copied to the destination + FileStatus[] fileset = fileSystem.listStatus(srcDir); + Map localResources = + new HashMap(fileset.length); + for (FileStatus entry : fileset) { + + LocalResource resource = createAmResource(entry.getPath(), + LocalResourceType.FILE); + String relativePath = destRelativeDir + "/" + entry.getPath().getName(); + localResources.put(relativePath, resource); + } + return localResources; + } + + /** + * Submit a JAR containing a specific class, returning + * the resource to be mapped in + * + * @param clazz class to look for + * @param subdir subdirectory (expected to end in a "/") + * @param jarName At the destination + * @return the local resource ref + * @throws IOException trouble copying to HDFS + */ + public LocalResource submitJarWithClass(Class clazz, Path tempPath, String subdir, String jarName) + throws IOException, SliderException { + File localFile = SliderUtils.findContainingJarOrFail(clazz); + return submitFile(localFile, tempPath, subdir, jarName); + } + + /** + * Submit a local file to the filesystem references by the instance's cluster + * filesystem + * + * @param localFile filename + * @param subdir subdirectory (expected to end in a "/") + * @param destFileName destination filename + * @return the local resource ref + * @throws IOException trouble copying to HDFS + */ + public LocalResource submitFile(File localFile, Path tempPath, String subdir, String destFileName) + throws IOException { + Path src = new Path(localFile.toString()); + Path subdirPath = new Path(tempPath, subdir); + fileSystem.mkdirs(subdirPath); + Path destPath = new Path(subdirPath, destFileName); + log.debug("Copying {} (size={} bytes) to {}", localFile, localFile.length(), destPath); + + fileSystem.copyFromLocalFile(false, true, src, destPath); + + // Set the type of resource - file or archive + // archives are untarred at destination + // we don't need the jar file to be untarred for now + return createAmResource(destPath, LocalResourceType.FILE); + } + + /** + * Submit the AM tar.gz resource referenced by the instance's cluster + * filesystem. Also, update the providerResources object with the new + * resource. + * + * @param providerResources + * the provider resource map to be updated + * @throws IOException + * trouble copying to HDFS + */ + public void submitTarGzipAndUpdate( + Map providerResources) throws IOException, + BadClusterStateException { + Path dependencyLibTarGzip = getDependencyTarGzip(); + LocalResource lc = createAmResource(dependencyLibTarGzip, + LocalResourceType.ARCHIVE); + providerResources.put(YarnServiceConstants.DEPENDENCY_LOCALIZED_DIR_LINK, lc); + } + + public void copyLocalFileToHdfs(File localPath, + Path destPath, FsPermission fp) + throws IOException { + if (localPath == null || destPath == null) { + throw new IOException("Either localPath or destPath is null"); + } + fileSystem.getConf().set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, + "000"); + fileSystem.mkdirs(destPath.getParent(), fp); + log.info("Copying file {} to {}", localPath.toURI(), + fileSystem.getScheme() + ":/" + destPath.toUri()); + + fileSystem.copyFromLocalFile(false, true, new Path(localPath.getPath()), + destPath); + // set file permissions of the destPath + fileSystem.setPermission(destPath, fp); + } + + public void copyHdfsFileToLocal(Path hdfsPath, File destFile) + throws IOException { + if (hdfsPath == null || destFile == null) { + throw new IOException("Either hdfsPath or destPath is null"); + } + log.info("Copying file {} to {}", hdfsPath.toUri(), destFile.toURI()); + + Path destPath = new Path(destFile.getPath()); + fileSystem.copyToLocalFile(hdfsPath, destPath); + } + + /** + * list entries in a filesystem directory + * + * @param path directory + * @return a listing, one to a line + * @throws IOException + */ + public String listFSDir(Path path) throws IOException { + FileStatus[] stats = fileSystem.listStatus(path); + StringBuilder builder = new StringBuilder(); + for (FileStatus stat : stats) { + builder.append(stat.getPath().toString()) + .append("\t") + .append(stat.getLen()) + .append("\n"); + } + return builder.toString(); + } + + public String cat(Path path) throws IOException { + FileStatus status = fileSystem.getFileStatus(path); + byte[] b = new byte[(int) status.getLen()]; + FSDataInputStream in = null; + try { + in = fileSystem.open(path); + int count = in.read(b); + return new String(b, 0, count, UTF_8); + } finally { + IOUtils.closeStream(in); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Duration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Duration.java new file mode 100644 index 0000000..6fadfd3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/Duration.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import java.io.Closeable; + +/** + * A duration in milliseconds. This class can be used + * to count time, and to be polled to see if a time limit has + * passed. + */ +public class Duration implements Closeable { + public long start, finish; + public final long limit; + + /** + * Create a duration instance with a limit of 0 + */ + public Duration() { + this(0); + } + + /** + * Create a duration with a limit specified in millis + * @param limit duration in milliseconds + */ + public Duration(long limit) { + this.limit = limit; + } + + /** + * Start + * @return self + */ + public Duration start() { + start = now(); + return this; + } + + /** + * The close operation relays to {@link #finish()}. + * Implementing it allows Duration instances to be automatically + * finish()'d in Java7 try blocks for when used in measuring durations. + */ + @Override + public final void close() { + finish(); + } + + public void finish() { + finish = now(); + } + + protected long now() { + return System.nanoTime()/1000000; + } + + public long getInterval() { + return finish - start; + } + + /** + * return true if the limit has been exceeded + * @return true if a limit was set and the current time + * exceeds it. + */ + public boolean getLimitExceeded() { + return limit >= 0 && ((now() - start) > limit); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Duration"); + if (finish >= start) { + builder.append(" finished at ").append(getInterval()).append(" millis;"); + } else { + if (start > 0) { + builder.append(" started but not yet finished;"); + } else { + builder.append(" unstarted;"); + } + } + if (limit > 0) { + builder.append(" limit: ").append(limit).append(" millis"); + if (getLimitExceeded()) { + builder.append(" - exceeded"); + } + } + return builder.toString(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java new file mode 100644 index 0000000..7b22e3e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java @@ -0,0 +1,249 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; +import org.codehaus.jackson.JsonGenerationException; +import org.codehaus.jackson.JsonParseException; +import org.codehaus.jackson.map.DeserializationConfig; +import org.codehaus.jackson.map.JsonMappingException; +import org.codehaus.jackson.map.ObjectMapper; +import org.codehaus.jackson.map.PropertyNamingStrategy; +import org.codehaus.jackson.map.SerializationConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.EOFException; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +/** + * Support for marshalling objects to and from JSON. + * This class is NOT thread safe; it constructs an object mapper + * as an instance field. + * @param + */ +public class JsonSerDeser { + + private static final Logger log = LoggerFactory.getLogger(JsonSerDeser.class); + private static final String UTF_8 = "UTF-8"; + + private final Class classType; + private final ObjectMapper mapper; + + /** + * Create an instance bound to a specific type + * @param classType class type + */ + public JsonSerDeser(Class classType) { + this.classType = classType; + this.mapper = new ObjectMapper(); + mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false); + } + + public JsonSerDeser(Class classType, PropertyNamingStrategy namingStrategy) { + this(classType); + mapper.setPropertyNamingStrategy(namingStrategy); + } + + /** + * Convert from JSON + * @param json input + * @return the parsed JSON + * @throws IOException IO + * @throws JsonMappingException failure to map from the JSON to this class + */ + public T fromJson(String json) + throws IOException, JsonParseException, JsonMappingException { + try { + return mapper.readValue(json, classType); + } catch (IOException e) { + log.error("Exception while parsing json : " + e + "\n" + json, e); + throw e; + } + } + + /** + * Convert from a JSON file + * @param jsonFile input file + * @return the parsed JSON + * @throws IOException IO problems + * @throws JsonMappingException failure to map from the JSON to this class + */ + public T fromFile(File jsonFile) + throws IOException, JsonParseException, JsonMappingException { + File absoluteFile = jsonFile.getAbsoluteFile(); + try { + return mapper.readValue(absoluteFile, classType); + } catch (IOException e) { + log.error("Exception while parsing json file {}", absoluteFile, e); + throw e; + } + } + + /** + * Convert from a JSON file + * @param resource input file + * @return the parsed JSON + * @throws IOException IO problems + * @throws JsonMappingException failure to map from the JSON to this class + */ + public T fromResource(String resource) + throws IOException, JsonParseException, JsonMappingException { + try(InputStream resStream = this.getClass().getResourceAsStream(resource)) { + if (resStream == null) { + throw new FileNotFoundException(resource); + } + return (T) (mapper.readValue(resStream, classType)); + } catch (IOException e) { + log.error("Exception while parsing json resource {}", resource, e); + throw e; + } + } + + /** + * Convert from an input stream, closing the stream afterwards. + * @param stream + * @return the parsed JSON + * @throws IOException IO problems + */ + public T fromStream(InputStream stream) throws IOException { + try { + return (T) (mapper.readValue(stream, classType)); + } catch (IOException e) { + log.error("Exception while parsing json input stream", e); + throw e; + } finally { + IOUtils.closeStream(stream); + } + } + + /** + * clone by converting to JSON and back again. + * This is much less efficient than any Java clone process. + * @param instance instance to duplicate + * @return a new instance + * @throws IOException problems. + */ + public T fromInstance(T instance) throws IOException { + return fromJson(toJson(instance)); + } + + /** + * Deserialize from a byte array + * @param b + * @return the deserialized value + * @throws IOException parse problems + */ + public T fromBytes(byte[] b) throws IOException { + String json = new String(b, 0, b.length, UTF_8); + return fromJson(json); + } + + /** + * Load from a Hadoop filesystem + * @param fs filesystem + * @param path path + * @return a loaded CD + * @throws IOException IO problems + * @throws JsonParseException parse problems + * @throws JsonMappingException O/J mapping problems + */ + public T load(FileSystem fs, Path path) + throws IOException, JsonParseException, JsonMappingException { + FileStatus status = fs.getFileStatus(path); + long len = status.getLen(); + byte[] b = new byte[(int) len]; + FSDataInputStream dataInputStream = fs.open(path); + int count = dataInputStream.read(b); + if (count != len) { + throw new EOFException("Read of " + path +" finished prematurely"); + } + return fromBytes(b); + } + + + /** + * Save to a hadoop filesystem + * @param fs filesystem + * @param path path + * @param instance instance to save + * @param overwrite should any existing file be overwritten + * @throws IOException IO exception + */ + public void save(FileSystem fs, Path path, T instance, + boolean overwrite) throws + IOException { + FSDataOutputStream dataOutputStream = fs.create(path, overwrite); + writeJsonAsBytes(instance, dataOutputStream); + } + + /** + * Save an instance to a file + * @param instance instance to save + * @param file file + * @throws IOException + */ + public void save(T instance, File file) throws + IOException { + writeJsonAsBytes(instance, new FileOutputStream(file.getAbsoluteFile())); + } + + /** + * Write the json as bytes -then close the file + * @param dataOutputStream an outout stream that will always be closed + * @throws IOException on any failure + */ + private void writeJsonAsBytes(T instance, + OutputStream dataOutputStream) throws IOException { + try { + String json = toJson(instance); + byte[] b = json.getBytes(UTF_8); + dataOutputStream.write(b); + dataOutputStream.flush(); + dataOutputStream.close(); + } finally { + IOUtils.closeStream(dataOutputStream); + } + } + + /** + * Convert an object to a JSON string + * @param instance instance to convert + * @return a JSON string description + * @throws JsonParseException parse problems + * @throws JsonMappingException O/J mapping problems + */ + public String toJson(T instance) throws IOException, + JsonGenerationException, + JsonMappingException { + mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true); + return mapper.writeValueAsString(instance); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PatternValidator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PatternValidator.java new file mode 100644 index 0000000..108ca22 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PatternValidator.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import java.util.regex.Pattern; + +/** + * Utility class to validate strings against a predefined pattern. + */ +public class PatternValidator { + + public static final String E_INVALID_NAME = + "Invalid name %s does not match the pattern %s "; + private final Pattern valid; + private final String pattern; + + public PatternValidator(String pattern) { + this.pattern = pattern; + valid = Pattern.compile(pattern); + } + + /** + * Validate the name -restricting it to the set defined in + * @param name name to validate + * @throws IllegalArgumentException if not a valid name + */ + public void validate(String name) { + if (!matches(name)) { + throw new IllegalArgumentException( + String.format(E_INVALID_NAME, name, pattern)); + } + } + + /** + * Query to see if the pattern matches + * @param name name to validate + * @return true if the string matches the pattern + */ + public boolean matches(String name) { + return valid.matcher(name).matches(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PortScanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PortScanner.java new file mode 100644 index 0000000..2dbf37f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PortScanner.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.yarn.service.conf.SliderExitCodes; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; +import org.apache.hadoop.yarn.service.exceptions.SliderException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * a scanner which can take an input string for a range or scan the lot. + */ +public class PortScanner { + private static Pattern NUMBER_RANGE = Pattern.compile("^(\\d+)\\s*-\\s*(\\d+)$"); + private static Pattern SINGLE_NUMBER = Pattern.compile("^\\d+$"); + + private List remainingPortsToCheck; + + public PortScanner() { + } + + public void setPortRange(String input) throws BadConfigException { + // first split based on commas + Set inputPorts= new TreeSet(); + String[] ranges = input.split(","); + for ( String range : ranges ) { + if (range.trim().isEmpty()) { + continue; + } + Matcher m = SINGLE_NUMBER.matcher(range.trim()); + if (m.find()) { + inputPorts.add(Integer.parseInt(m.group())); + continue; + } + m = NUMBER_RANGE.matcher(range.trim()); + if (m.find()) { + String[] boundaryValues = m.group(0).split("-"); + int start = Integer.parseInt(boundaryValues[0].trim()); + int end = Integer.parseInt(boundaryValues[1].trim()); + if (end < start) { + throw new BadConfigException("End of port range is before start: " + + range + " in input: " + input); + } + for (int i = start; i < end + 1; i++) { + inputPorts.add(i); + } + continue; + } + throw new BadConfigException("Bad port range: " + range + " in input: " + + input); + } + if (inputPorts.size() == 0) { + throw new BadConfigException("No ports found in range: " + input); + } + this.remainingPortsToCheck = new ArrayList(inputPorts); + } + + public List getRemainingPortsToCheck() { + return remainingPortsToCheck; + } + + public int getAvailablePort() throws SliderException, IOException { + if (remainingPortsToCheck != null) { + return getAvailablePortViaPortArray(); + } else { + return SliderUtils.getOpenPort(); + } + } + + private int getAvailablePortViaPortArray() throws SliderException { + boolean found = false; + int availablePort = -1; + Iterator portsToCheck = this.remainingPortsToCheck.iterator(); + while (portsToCheck.hasNext() && !found) { + int portToCheck = portsToCheck.next(); + found = SliderUtils.isPortAvailable(portToCheck); + if (found) { + availablePort = portToCheck; + portsToCheck.remove(); + } + } + + if (availablePort < 0) { + throw new SliderException(SliderExitCodes.EXIT_BAD_CONFIGURATION, + "No available ports found in configured range {}", + remainingPortsToCheck); + } + + return availablePort; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfiguration.java new file mode 100644 index 0000000..9d00b3c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfiguration.java @@ -0,0 +1,196 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.api.records.ConfigFormat; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; +import org.codehaus.jackson.annotate.JsonIgnoreProperties; +import org.codehaus.jackson.map.ObjectMapper; +import org.codehaus.jackson.map.SerializationConfig; +import org.codehaus.jackson.map.annotate.JsonSerialize; + +import java.io.IOException; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +/** + * JSON-serializable description of a published key-val configuration. + * + * The values themselves are not serialized in the external view; they have + * to be served up by the far end + */ +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) +public class PublishedConfiguration { + + public String description; + public long updated; + + public String updatedTime; + + public Map entries = new HashMap<>(); + + public PublishedConfiguration() { + } + + /** + * build an empty published configuration + * @param description configuration description + */ + public PublishedConfiguration(String description) { + this.description = description; + } + + /** + * Build a configuration from the entries + * @param description configuration description + * @param entries entries to put + */ + public PublishedConfiguration(String description, + Iterable> entries) { + this.description = description; + putValues(entries); + } + + /** + * Build a published configuration, using the keys from keysource, + * but resolving the values from the value source, via Configuration.get() + * @param description configuration description + * @param keysource source of keys + * @param valuesource source of values + */ + public PublishedConfiguration(String description, + Iterable> keysource, + Configuration valuesource) { + this.description = description; + putValues(ConfigHelper.resolveConfiguration(keysource, valuesource)); + } + + + /** + * Is the configuration empty. This means either that it has not + * been given any values, or it is stripped down copy set down over the + * wire. + * @return true if it is empty + */ + public boolean isEmpty() { + return entries.isEmpty(); + } + + + public void setUpdated(long updated) { + this.updated = updated; + this.updatedTime = new Date(updated).toString(); + } + + public long getUpdated() { + return updated; + } + + /** + * Set the values from an iterable (this includes a Hadoop Configuration + * and Java properties object). + * Any existing value set is discarded + * @param entries entries to put + */ + public void putValues(Iterable> entries) { + this.entries = new HashMap(); + for (Map.Entry entry : entries) { + this.entries.put(entry.getKey(), entry.getValue()); + } + + } + + /** + * Convert to Hadoop XML + * @return the configuration as a Hadoop Configuratin + */ + public Configuration asConfiguration() { + Configuration conf = new Configuration(false); + try { + ConfigHelper.addConfigMap(conf, entries, ""); + } catch (BadConfigException e) { + // triggered on a null value; switch to a runtime (and discard the stack) + throw new RuntimeException(e.toString()); + } + return conf; + } + + public String asConfigurationXML() throws IOException { + return ConfigHelper.toXml(asConfiguration()); + } + + /** + * Convert values to properties + * @return a property file + */ + public Properties asProperties() { + Properties props = new Properties(); + props.putAll(entries); + return props; + } + + /** + * Return the values as json string + * @return the JSON representation + * @throws IOException marshalling failure + */ + public String asJson() throws IOException { + ObjectMapper mapper = new ObjectMapper(); + mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true); + String json = mapper.writeValueAsString(entries); + return json; + } + + + /** + * This makes a copy without the nested content -so is suitable + * for returning as part of the list of a parent's values + * @return the copy + */ + public PublishedConfiguration shallowCopy() { + PublishedConfiguration that = new PublishedConfiguration(); + that.description = this.description; + that.updated = this.updated; + that.updatedTime = this.updatedTime; + return that; + } + + @Override + public String toString() { + final StringBuilder sb = + new StringBuilder("PublishedConfiguration{"); + sb.append("description='").append(description).append('\''); + sb.append(" entries = ").append(entries.size()); + sb.append('}'); + return sb.toString(); + } + + /** + * Create an outputter for a given format + * @param format format to use + * @return an instance of output + */ + public PublishedConfigurationOutputter createOutputter(ConfigFormat format) { + return PublishedConfigurationOutputter.createOutputter(format, this); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java new file mode 100644 index 0000000..88ecf2c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.api.records.ConfigFormat; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.DumperOptions.FlowStyle; +import org.yaml.snakeyaml.Yaml; + +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.io.StringWriter; +import java.util.Properties; + +/** + * Output a published configuration + */ +public abstract class PublishedConfigurationOutputter { + + private static final String COMMENTS = "Generated by Apache Slider"; + + protected final PublishedConfiguration owner; + + protected PublishedConfigurationOutputter(PublishedConfiguration owner) { + this.owner = owner; + } + + /** + * Save the config to a destination file, in the format of this outputter + * @param dest destination file + * @throws IOException + */ +/* JDK7 + public void save(File dest) throws IOException { + try(FileOutputStream out = new FileOutputStream(dest)) { + save(out); + out.close(); + } + } +*/ + public void save(File dest) throws IOException { + FileUtils.writeStringToFile(dest, asString(), Charsets.UTF_8); + } + + /** + * Save the content. The default saves the asString() value + * to the output stream + * @param out output stream + * @throws IOException + */ + public void save(OutputStream out) throws IOException { + IOUtils.write(asString(), out, Charsets.UTF_8); + } + /** + * Convert to a string + * @return the string form + * @throws IOException + */ + public abstract String asString() throws IOException; + + /** + * Create an outputter for the chosen format + * @param format format enumeration + * @param owner owning config + * @return the outputter + */ + + public static PublishedConfigurationOutputter createOutputter(ConfigFormat format, + PublishedConfiguration owner) { + Preconditions.checkNotNull(owner); + switch (format) { + case XML: + case HADOOP_XML: + return new XmlOutputter(owner); + case PROPERTIES: + return new PropertiesOutputter(owner); + case JSON: + return new JsonOutputter(owner); + case ENV: + return new EnvOutputter(owner); + case TEMPLATE: + return new TemplateOutputter(owner); + case YAML: + return new YamlOutputter(owner); + default: + throw new RuntimeException("Unsupported format :" + format); + } + } + + public static class XmlOutputter extends PublishedConfigurationOutputter { + + + private final Configuration configuration; + + public XmlOutputter(PublishedConfiguration owner) { + super(owner); + configuration = owner.asConfiguration(); + } + + @Override + public void save(OutputStream out) throws IOException { + configuration.writeXml(out); + } + + @Override + public String asString() throws IOException { + return ConfigHelper.toXml(configuration); + } + + public Configuration getConfiguration() { + return configuration; + } + } + + public static class PropertiesOutputter extends PublishedConfigurationOutputter { + + private final Properties properties; + + public PropertiesOutputter(PublishedConfiguration owner) { + super(owner); + properties = owner.asProperties(); + } + + @Override + public void save(OutputStream out) throws IOException { + properties.store(out, COMMENTS); + } + + + public String asString() throws IOException { + StringWriter sw = new StringWriter(); + properties.store(sw, COMMENTS); + return sw.toString(); + } + } + + + public static class JsonOutputter extends PublishedConfigurationOutputter { + + public JsonOutputter(PublishedConfiguration owner) { + super(owner); + } + + @Override + public String asString() throws IOException { + return owner.asJson(); + } + } + + + public static class EnvOutputter extends PublishedConfigurationOutputter { + + public EnvOutputter(PublishedConfiguration owner) { + super(owner); + } + + @Override + public String asString() throws IOException { + if (!owner.entries.containsKey("content")) { + throw new IOException("Configuration has no content field and cannot " + + "be retrieved as type 'env'"); + } + String content = owner.entries.get("content"); + return ConfigUtils.replaceProps(owner.entries, content); + } + } + + public static class TemplateOutputter extends EnvOutputter { + public TemplateOutputter(PublishedConfiguration owner) { + super(owner); + } + } + + public static class YamlOutputter extends PublishedConfigurationOutputter { + + private final Yaml yaml; + + public YamlOutputter(PublishedConfiguration owner) { + super(owner); + DumperOptions options = new DumperOptions(); + options.setDefaultFlowStyle(FlowStyle.BLOCK); + yaml = new Yaml(options); + } + + public String asString() throws IOException { + return yaml.dump(owner.entries); + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SerializedApplicationReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SerializedApplicationReport.java new file mode 100644 index 0000000..140204a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SerializedApplicationReport.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.service.utils.ApplicationReportSerDeser; +import org.codehaus.jackson.annotate.JsonIgnoreProperties; +import org.codehaus.jackson.map.annotate.JsonSerialize; + +import java.io.IOException; + +/** + * Serialized form of an service report which can be persisted + * and then parsed. It can not be converted back into a + * real YARN service report + * + * Useful for testing + */ + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL) + +public class SerializedApplicationReport { + + public String applicationId; + public String applicationAttemptId; + public String name; + public String applicationType; + public String user; + public String queue; + public String host; + public Integer rpcPort; + public String state; + public String diagnostics; + public String url; + /** + * This value is non-null only when a report is generated from a submission context. + * The YARN {@link ApplicationReport} structure does not propagate this value + * from the RM. + */ + public Long submitTime; + public Long startTime; + public Long finishTime; + public String finalStatus; + public String origTrackingUrl; + public Float progress; + + public SerializedApplicationReport() { + } + + public SerializedApplicationReport(ApplicationReport report) { + this.applicationId = report.getApplicationId().toString(); + ApplicationAttemptId attemptId = report.getCurrentApplicationAttemptId(); + this.applicationAttemptId = attemptId != null ? attemptId.toString() : "N/A"; + this.name = report.getName(); + this.applicationType = report.getApplicationType(); + this.user = report.getUser(); + this.queue = report.getQueue(); + this.host = report.getHost(); + this.rpcPort = report.getRpcPort(); + this.state = report.getYarnApplicationState().toString(); + this.diagnostics = report.getDiagnostics(); + this.startTime = report.getStartTime(); + this.finishTime = report.getFinishTime(); + FinalApplicationStatus appStatus = report.getFinalApplicationStatus(); + this.finalStatus = appStatus == null ? "" : appStatus.toString(); + this.progress = report.getProgress(); + this.url = report.getTrackingUrl(); + this.origTrackingUrl= report.getOriginalTrackingUrl(); + } + + @Override + public String toString() { + try { + return ApplicationReportSerDeser.toString(this); + } catch (IOException e) { + return super.toString(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java new file mode 100644 index 0000000..68db0bb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java @@ -0,0 +1,401 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Configuration; +import org.apache.hadoop.yarn.service.api.records.Resource; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; +import org.apache.hadoop.yarn.service.monitor.probe.MonitorUtils; +import org.apache.hadoop.yarn.service.conf.RestApiConstants; +import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages; +import org.codehaus.jackson.map.PropertyNamingStrategy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class ServiceApiUtil { + private static final Logger LOG = + LoggerFactory.getLogger(ServiceApiUtil.class); + public static JsonSerDeser jsonSerDeser = + new JsonSerDeser<>(Service.class, + PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES); + private static final PatternValidator namePattern + = new PatternValidator("[a-z][a-z0-9-]*"); + + @VisibleForTesting + public static void setJsonSerDeser(JsonSerDeser jsd) { + jsonSerDeser = jsd; + } + + @VisibleForTesting + public static void validateAndResolveService(Service service, + SliderFileSystem fs, org.apache.hadoop.conf.Configuration conf) throws + IOException { + boolean dnsEnabled = conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED, + RegistryConstants.DEFAULT_DNS_ENABLED); + if (dnsEnabled && RegistryUtils.currentUser().length() > RegistryConstants + .MAX_FQDN_LABEL_LENGTH) { + throw new IllegalArgumentException(RestApiErrorMessages + .ERROR_USER_NAME_INVALID); + } + if (StringUtils.isEmpty(service.getName())) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID); + } + + validateNameFormat(service.getName(), conf); + + // If the service has no components, throw error + if (!hasComponent(service)) { + throw new IllegalArgumentException( + "No component specified for " + service.getName()); + } + + // Validate there are no component name collisions (collisions are not + // currently supported) and add any components from external services + Configuration globalConf = service.getConfiguration(); + Set componentNames = new HashSet<>(); + List componentsToRemove = new ArrayList<>(); + List componentsToAdd = new ArrayList<>(); + for (Component comp : service.getComponents()) { + int maxCompLength = RegistryConstants.MAX_FQDN_LABEL_LENGTH; + maxCompLength = maxCompLength - Long.toString(Long.MAX_VALUE).length(); + if (dnsEnabled && comp.getName().length() > maxCompLength) { + throw new IllegalArgumentException(String.format(RestApiErrorMessages + .ERROR_COMPONENT_NAME_INVALID, maxCompLength, comp.getName())); + } + if (componentNames.contains(comp.getName())) { + throw new IllegalArgumentException("Component name collision: " + + comp.getName()); + } + // If artifact is of type SERVICE (which cannot be filled from + // global), read external service and add its components to this + // service + if (comp.getArtifact() != null && comp.getArtifact().getType() == + Artifact.TypeEnum.SERVICE) { + if (StringUtils.isEmpty(comp.getArtifact().getId())) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID); + } + LOG.info("Marking {} for removal", comp.getName()); + componentsToRemove.add(comp); + List externalComponents = getComponents(fs, + comp.getArtifact().getId()); + for (Component c : externalComponents) { + Component override = service.getComponent(c.getName()); + if (override != null && override.getArtifact() == null) { + // allow properties from external components to be overridden / + // augmented by properties in this component, except for artifact + // which must be read from external component + override.mergeFrom(c); + LOG.info("Merging external component {} from external {}", c + .getName(), comp.getName()); + } else { + if (componentNames.contains(c.getName())) { + throw new IllegalArgumentException("Component name collision: " + + c.getName()); + } + componentNames.add(c.getName()); + componentsToAdd.add(c); + LOG.info("Adding component {} from external {}", c.getName(), + comp.getName()); + } + } + } else { + // otherwise handle as a normal component + componentNames.add(comp.getName()); + // configuration + comp.getConfiguration().mergeFrom(globalConf); + } + } + service.getComponents().removeAll(componentsToRemove); + service.getComponents().addAll(componentsToAdd); + + // Validate components and let global values take effect if component level + // values are not provided + Artifact globalArtifact = service.getArtifact(); + Resource globalResource = service.getResource(); + for (Component comp : service.getComponents()) { + // fill in global artifact unless it is type SERVICE + if (comp.getArtifact() == null && service.getArtifact() != null + && service.getArtifact().getType() != Artifact.TypeEnum + .SERVICE) { + comp.setArtifact(globalArtifact); + } + // fill in global resource + if (comp.getResource() == null) { + comp.setResource(globalResource); + } + // validate dependency existence + if (comp.getDependencies() != null) { + for (String dependency : comp.getDependencies()) { + if (!componentNames.contains(dependency)) { + throw new IllegalArgumentException(String.format( + RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, dependency, + comp.getName())); + } + } + } + validateComponent(comp, fs.getFileSystem(), conf); + } + + // validate dependency tree + sortByDependencies(service.getComponents()); + + // Service lifetime if not specified, is set to unlimited lifetime + if (service.getLifetime() == null) { + service.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME); + } + } + + private static void validateComponent(Component comp, FileSystem fs, + org.apache.hadoop.conf.Configuration conf) + throws IOException { + validateNameFormat(comp.getName(), conf); + + AbstractClientProvider compClientProvider = ProviderFactory + .getClientProvider(comp.getArtifact()); + compClientProvider.validateArtifact(comp.getArtifact(), fs); + + if (comp.getLaunchCommand() == null && (comp.getArtifact() == null || comp + .getArtifact().getType() != Artifact.TypeEnum.DOCKER)) { + throw new IllegalArgumentException(RestApiErrorMessages + .ERROR_ABSENT_LAUNCH_COMMAND); + } + + validateServiceResource(comp.getResource(), comp); + + if (comp.getNumberOfContainers() == null + || comp.getNumberOfContainers() < 0) { + throw new IllegalArgumentException(String.format( + RestApiErrorMessages.ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID + + ": " + comp.getNumberOfContainers(), comp.getName())); + } + compClientProvider.validateConfigFiles(comp.getConfiguration() + .getFiles(), fs); + + MonitorUtils.getProbe(comp.getReadinessCheck()); + } + + // Check component or service name format and transform to lower case. + public static void validateNameFormat(String name, + org.apache.hadoop.conf.Configuration conf) { + if (StringUtils.isEmpty(name)) { + throw new IllegalArgumentException("Name can not be empty!"); + } + // validate component name + if (name.contains("_")) { + throw new IllegalArgumentException( + "Invalid format: " + name + + ", can not use '_', as DNS hostname does not allow '_'. Use '-' Instead. "); + } + boolean dnsEnabled = conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED, + RegistryConstants.DEFAULT_DNS_ENABLED); + if (dnsEnabled && name.length() > RegistryConstants.MAX_FQDN_LABEL_LENGTH) { + throw new IllegalArgumentException(String + .format("Invalid format %s, must be no more than 63 characters ", + name)); + } + namePattern.validate(name); + } + + @VisibleForTesting + public static List getComponents(SliderFileSystem + fs, String serviceName) throws IOException { + return loadService(fs, serviceName).getComponents(); + } + + public static Service loadService(SliderFileSystem fs, String + serviceName) throws IOException { + Path serviceJson = getServiceJsonPath(fs, serviceName); + LOG.info("Loading service definition from " + serviceJson); + return jsonSerDeser.load(fs.getFileSystem(), serviceJson); + } + + public static Service loadServiceFrom(SliderFileSystem fs, + Path appDefPath) throws IOException { + LOG.info("Loading service definition from " + appDefPath); + return jsonSerDeser.load(fs.getFileSystem(), appDefPath); + } + + public static Path getServiceJsonPath(SliderFileSystem fs, String serviceName) { + Path serviceDir = fs.buildClusterDirPath(serviceName); + return new Path(serviceDir, serviceName + ".json"); + } + + private static void validateServiceResource(Resource resource, + Component comp) { + // Only services/components of type SERVICE can skip resource requirement + if (resource == null) { + throw new IllegalArgumentException( + comp == null ? RestApiErrorMessages.ERROR_RESOURCE_INVALID : String + .format(RestApiErrorMessages.ERROR_RESOURCE_FOR_COMP_INVALID, + comp.getName())); + } + // One and only one of profile OR cpus & memory can be specified. Specifying + // both raises validation error. + if (StringUtils.isNotEmpty(resource.getProfile()) && ( + resource.getCpus() != null || StringUtils + .isNotEmpty(resource.getMemory()))) { + throw new IllegalArgumentException(comp == null ? + RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED : + String.format( + RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED, + comp.getName())); + } + // Currently resource profile is not supported yet, so we will raise + // validation error if only resource profile is specified + if (StringUtils.isNotEmpty(resource.getProfile())) { + throw new IllegalArgumentException( + RestApiErrorMessages.ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET); + } + + String memory = resource.getMemory(); + Integer cpus = resource.getCpus(); + if (StringUtils.isEmpty(memory)) { + throw new IllegalArgumentException( + comp == null ? RestApiErrorMessages.ERROR_RESOURCE_MEMORY_INVALID : + String.format( + RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID, + comp.getName())); + } + if (cpus == null) { + throw new IllegalArgumentException( + comp == null ? RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID : + String.format( + RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID, + comp.getName())); + } + if (cpus <= 0) { + throw new IllegalArgumentException(comp == null ? + RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID_RANGE : String + .format( + RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE, + comp.getName())); + } + } + + // check if comp mem size exceeds cluster limit + public static void validateCompResourceSize( + org.apache.hadoop.yarn.api.records.Resource maxResource, + Service service) throws YarnException { + for (Component component : service.getComponents()) { + // only handle mem now. + long mem = Long.parseLong(component.getResource().getMemory()); + if (mem > maxResource.getMemorySize()) { + throw new YarnException( + "Component " + component.getName() + " memory size (" + mem + + ") is larger than configured max container memory size (" + + maxResource.getMemorySize() + ")"); + } + } + } + + private static boolean hasComponent(Service service) { + if (service.getComponents() == null || service.getComponents() + .isEmpty()) { + return false; + } + return true; + } + + public static Collection sortByDependencies(List + components) { + Map sortedComponents = + sortByDependencies(components, null); + return sortedComponents.values(); + } + + /** + * Each internal call of sortByDependencies will identify all of the + * components with the same dependency depth (the lowest depth that has not + * been processed yet) and add them to the sortedComponents list, preserving + * their original ordering in the components list. + * + * So the first time it is called, all components with no dependencies + * (depth 0) will be identified. The next time it is called, all components + * that have dependencies only on the the depth 0 components will be + * identified (depth 1). This will be repeated until all components have + * been added to the sortedComponents list. If no new components are + * identified but the sortedComponents list is not complete, an error is + * thrown. + */ + private static Map sortByDependencies(List + components, Map sortedComponents) { + if (sortedComponents == null) { + sortedComponents = new LinkedHashMap<>(); + } + + Map componentsToAdd = new LinkedHashMap<>(); + List componentsSkipped = new ArrayList<>(); + for (Component component : components) { + String name = component.getName(); + if (sortedComponents.containsKey(name)) { + continue; + } + boolean dependenciesAlreadySorted = true; + if (!SliderUtils.isEmpty(component.getDependencies())) { + for (String dependency : component.getDependencies()) { + if (!sortedComponents.containsKey(dependency)) { + dependenciesAlreadySorted = false; + break; + } + } + } + if (dependenciesAlreadySorted) { + componentsToAdd.put(name, component); + } else { + componentsSkipped.add(component); + } + } + + if (componentsToAdd.size() == 0) { + throw new IllegalArgumentException(String.format(RestApiErrorMessages + .ERROR_DEPENDENCY_CYCLE, componentsSkipped)); + } + sortedComponents.putAll(componentsToAdd); + if (sortedComponents.size() == components.size()) { + return sortedComponents; + } + return sortByDependencies(components, sortedComponents); + } + + public static String $(String s) { + return "${" + s +"}"; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java new file mode 100644 index 0000000..7440b11 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; + + +public class ServiceRegistryUtils { + + /** + * Base path for services + */ + public static final String ZK_SERVICES = "services"; + + /** + * Base path for all Slider references + */ + public static final String ZK_SLIDER = "slider"; + public static final String ZK_USERS = "users"; + public static final String SVC_SLIDER = "/" + ZK_SERVICES + "/" + ZK_SLIDER; + public static final String SVC_SLIDER_USERS = SVC_SLIDER + "/" + ZK_USERS; + + /** + * Get the registry path for an instance under the user's home node + * @param instanceName application instance + * @return a path to the registry location for this application instance. + */ + public static String registryPathForInstance(String instanceName) { + return RegistryUtils.servicePath( + RegistryUtils.currentUser(), YarnServiceConstants.APP_TYPE, instanceName + ); + } + + /** + * Build the path to a cluster; exists once the cluster has come up. + * Even before that, a ZK watcher could wait for it. + * @param username user + * @param clustername name of the cluster + * @return a strin + */ + public static String mkClusterPath(String username, String clustername) { + return mkSliderUserPath(username) + "/" + clustername; + } + + /** + * Build the path to a cluster; exists once the cluster has come up. + * Even before that, a ZK watcher could wait for it. + * @param username user + * @return a string + */ + public static String mkSliderUserPath(String username) { + return SVC_SLIDER_USERS + "/" + username; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderFileSystem.java new file mode 100644 index 0000000..d6d664e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderFileSystem.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import java.io.IOException; + +/** + * Extends Core Filesystem with operations to manipulate ClusterDescription + * persistent state + */ +public class SliderFileSystem extends CoreFileSystem { + + Path appDir = null; + + public SliderFileSystem(FileSystem fileSystem, + Configuration configuration) { + super(fileSystem, configuration); + } + + public SliderFileSystem(Configuration configuration) throws IOException { + super(configuration); + } + + public void setAppDir(Path appDir) { + this.appDir = appDir; + } + + public Path getAppDir() { + return this.appDir; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderUtils.java new file mode 100644 index 0000000..2809dfb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderUtils.java @@ -0,0 +1,564 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.base.Preconditions; +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.ApplicationConstants; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.service.conf.YarnServiceConstants; +import org.apache.hadoop.yarn.service.containerlaunch.ClasspathConstructor; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.FilenameFilter; +import java.io.IOException; +import java.net.ServerSocket; +import java.net.URL; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.regex.Pattern; +import java.util.zip.GZIPOutputStream; + +/** + * These are slider-specific Util methods + */ +public final class SliderUtils { + + private static final Logger log = LoggerFactory.getLogger(SliderUtils.class); + + private SliderUtils() { + } + + /** + * Implementation of set-ness, groovy definition of true/false for a string + * @param s string + * @return true iff the string is neither null nor empty + */ + public static boolean isUnset(String s) { + return s == null || s.isEmpty(); + } + + public static boolean isSet(String s) { + return !isUnset(s); + } + + public static boolean isEmpty(Collection l) { + return l == null || l.isEmpty(); + } + + /** + * Find a containing JAR + * @param clazz class to find + * @return the file + * @throws IOException any IO problem, including the class not having a + * classloader + * @throws FileNotFoundException if the class did not resolve to a file + */ + public static File findContainingJarOrFail(Class clazz) throws IOException { + File localFile = SliderUtils.findContainingJar(clazz); + if (null == localFile) { + throw new FileNotFoundException("Could not find JAR containing " + clazz); + } + return localFile; + } + + + /** + * Find a containing JAR + * @param my_class class to find + * @return the file or null if it is not found + * @throws IOException any IO problem, including the class not having a + * classloader + */ + public static File findContainingJar(Class my_class) throws IOException { + ClassLoader loader = my_class.getClassLoader(); + if (loader == null) { + throw new IOException( + "Class " + my_class + " does not have a classloader!"); + } + String class_file = my_class.getName().replaceAll("\\.", "/") + ".class"; + Enumeration urlEnumeration = loader.getResources(class_file); + for (; urlEnumeration.hasMoreElements(); ) { + URL url = urlEnumeration.nextElement(); + if ("jar".equals(url.getProtocol())) { + String toReturn = url.getPath(); + if (toReturn.startsWith("file:")) { + toReturn = toReturn.substring("file:".length()); + } + // URLDecoder is a misnamed class, since it actually decodes + // x-www-form-urlencoded MIME type rather than actual + // URL encoding (which the file path has). Therefore it would + // decode +s to ' 's which is incorrect (spaces are actually + // either unencoded or encoded as "%20"). Replace +s first, so + // that they are kept sacred during the decoding process. + toReturn = toReturn.replaceAll("\\+", "%2B"); + toReturn = URLDecoder.decode(toReturn, "UTF-8"); + String jarFilePath = toReturn.replaceAll("!.*$", ""); + return new File(jarFilePath); + } else { + log.info("could not locate JAR containing {} URL={}", my_class, url); + } + } + return null; + } + + /** + * Copy a file to a new FS -both paths must be qualified. + * @param conf conf file + * @param srcFile src file + * @param destFile dest file + */ + @SuppressWarnings("deprecation") + public static void copy(Configuration conf, + Path srcFile, + Path destFile) throws + IOException, + BadClusterStateException { + FileSystem srcFS = FileSystem.get(srcFile.toUri(), conf); + //list all paths in the src. + if (!srcFS.exists(srcFile)) { + throw new FileNotFoundException("Source file not found " + srcFile); + } + if (!srcFS.isFile(srcFile)) { + throw new FileNotFoundException( + "Source file not a file " + srcFile); + } + FileSystem destFS = FileSystem.get(destFile.toUri(), conf); + FileUtil.copy(srcFS, srcFile, destFS, destFile, false, true, conf); + } + + /** + * Take a collection, return a list containing the string value of every + * element in the collection. + * @param c collection + * @return a stringified list + */ + public static List collectionToStringList(Collection c) { + List l = new ArrayList<>(c.size()); + for (Object o : c) { + l.add(o.toString()); + } + return l; + } + + /** + * Join an collection of objects with a separator that appears after every + * instance in the list -including at the end + * @param collection collection to call toString() on each element + * @param separator separator string + * @return the joined entries + */ + public static String join(Collection collection, String separator) { + return join(collection, separator, true); + } + + /** + * Join an collection of objects with a separator that appears after every + * instance in the list -optionally at the end + * @param collection collection to call toString() on each element + * @param separator separator string + * @param trailing add a trailing entry or not + * @return the joined entries + */ + public static String join(Collection collection, + String separator, + boolean trailing) { + StringBuilder b = new StringBuilder(); + // fast return on empty collection + if (collection.isEmpty()) { + return trailing ? separator : ""; + } + for (Object o : collection) { + b.append(o); + b.append(separator); + } + int length = separator.length(); + String s = b.toString(); + return (trailing || s.isEmpty()) ? + s : (b.substring(0, b.length() - length)); + } + + /** + * Join an array of strings with a separator that appears after every + * instance in the list -optionally at the end + * @param collection strings + * @param separator separator string + * @param trailing add a trailing entry or not + * @return the joined entries + */ + public static String join(String[] collection, String separator, + boolean trailing) { + return join(Arrays.asList(collection), separator, trailing); + } + + /** + * Resolve a mandatory environment variable + * @param key env var + * @return the resolved value + * @throws BadClusterStateException + */ + public static String mandatoryEnvVariable(String key) throws + BadClusterStateException { + String v = System.getenv(key); + if (v == null) { + throw new BadClusterStateException("Missing Environment variable " + key); + } + return v; + } + + /** + * Generic map merge logic + * @param first first map + * @param second second map + * @param key type + * @param value type + * @return 'first' merged with the second + */ + public static Map mergeMapsIgnoreDuplicateKeys(Map first, + Map second) { + Preconditions.checkArgument(first != null, "Null 'first' value"); + Preconditions.checkArgument(second != null, "Null 'second' value"); + for (Map.Entry entry : second.entrySet()) { + T1 key = entry.getKey(); + if (!first.containsKey(key)) { + first.put(key, entry.getValue()); + } + } + return first; + } + + /** + * Convert a map to a multi-line string for printing + * @param map map to stringify + * @return a string representation of the map + */ + public static String stringifyMap(Map map) { + StringBuilder builder = new StringBuilder(); + for (Map.Entry entry : map.entrySet()) { + builder.append(entry.getKey()) + .append("=\"") + .append(entry.getValue()) + .append("\"\n"); + + } + return builder.toString(); + } + + /** + * Convert a YARN URL into a string value of a normal URL + * @param url URL + * @return string representatin + */ + public static String stringify(org.apache.hadoop.yarn.api.records.URL url) { + StringBuilder builder = new StringBuilder(); + builder.append(url.getScheme()).append("://"); + if (url.getHost() != null) { + builder.append(url.getHost()).append(":").append(url.getPort()); + } + builder.append(url.getFile()); + return builder.toString(); + } + + /** + * Get a random open port + * @return true if the port was available for listening on + */ + public static int getOpenPort() throws IOException { + ServerSocket socket = null; + try { + socket = new ServerSocket(0); + return socket.getLocalPort(); + } finally { + if (socket != null) { + socket.close(); + } + } + } + + /** + * See if a port is available for listening on by trying to listen + * on it and seeing if that works or fails. + * @param port port to listen to + * @return true if the port was available for listening on + */ + public static boolean isPortAvailable(int port) { + try { + ServerSocket socket = new ServerSocket(port); + socket.close(); + return true; + } catch (IOException e) { + return false; + } + } + + // Build env map: key -> value; + // value will be replaced by the corresponding value in tokenMap, if any. + public static Map buildEnvMap( + org.apache.hadoop.yarn.service.api.records.Configuration conf, + Map tokenMap) { + if (tokenMap == null) { + return conf.getEnv(); + } + Map env = new HashMap<>(); + for (Map.Entry entry : conf.getEnv().entrySet()) { + String key = entry.getKey(); + String val = entry.getValue(); + for (Map.Entry token : tokenMap.entrySet()) { + val = val.replaceAll(Pattern.quote(token.getKey()), + token.getValue()); + } + env.put(key,val); + } + return env; + } + + public static String[] getLibDirs() { + String libDirStr = System.getProperty(YarnServiceConstants.PROPERTY_LIB_DIR); + if (isUnset(libDirStr)) { + return ArrayUtils.EMPTY_STRING_ARRAY; + } + return StringUtils.split(libDirStr, ','); + } + + /** + * Submit a JAR containing a specific class and map it + * @param providerResources provider map to build up + * @param sliderFileSystem remote fs + * @param clazz class to look for + * @param libdir lib directory + * @param jarName At the destination + * @return the local resource ref + * @throws IOException trouble copying to HDFS + */ + public static LocalResource putJar(Map providerResources, + SliderFileSystem sliderFileSystem, + Class clazz, + Path tempPath, + String libdir, + String jarName + ) + throws IOException, SliderException { + LocalResource res = sliderFileSystem.submitJarWithClass( + clazz, + tempPath, + libdir, + jarName); + providerResources.put(libdir + "/" + jarName, res); + return res; + } + + /** + * Submit a JAR containing and map it + * @param providerResources provider map to build up + * @param sliderFileSystem remote fs + * @param libDir lib directory + * @param srcPath copy jars from + */ + public static void putAllJars(Map providerResources, + SliderFileSystem sliderFileSystem, + Path tempPath, + String libDir, + String srcPath) throws IOException, SliderException { + log.info("Loading all dependencies from {}", srcPath); + if (SliderUtils.isSet(srcPath)) { + File srcFolder = new File(srcPath); + FilenameFilter jarFilter = createJarFilter(); + File[] listOfJars = srcFolder.listFiles(jarFilter); + if (listOfJars == null || listOfJars.length == 0) { + return; + } + for (File jarFile : listOfJars) { + LocalResource res = sliderFileSystem.submitFile(jarFile, tempPath, libDir, jarFile.getName()); + providerResources.put(libDir + "/" + jarFile.getName(), res); + } + } + } + + /** + * Accept all filenames ending with {@code .jar} + * @return a filename filter + */ + public static FilenameFilter createJarFilter() { + return new FilenameFilter() { + public boolean accept(File dir, String name) { + return name.toLowerCase(Locale.ENGLISH).endsWith(".jar"); + } + }; + } + + /** + * Submit the AM tar.gz containing all dependencies and map it + * @param providerResources provider map to build up + * @param sliderFileSystem remote fs + */ + public static void putAmTarGzipAndUpdate( + Map providerResources, + SliderFileSystem sliderFileSystem + ) throws IOException, SliderException { + log.info("Loading all dependencies from {}{}", + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_NAME, + YarnServiceConstants.DEPENDENCY_TAR_GZ_FILE_EXT); + sliderFileSystem.submitTarGzipAndUpdate(providerResources); + } + + /** + * Create a file:// path from a local file + * @param file file to point the path + * @return a new Path + */ + public static Path createLocalPath(File file) { + return new Path(file.toURI()); + } + + /** + * Build up the classpath for execution + * -behaves very differently on a mini test cluster vs a production + * production one. + * + * @param sliderConfDir relative path to the dir containing slider config + * options to put on the classpath -or null + * @param libdir directory containing the JAR files + * @param usingMiniMRCluster flag to indicate the MiniMR cluster is in use + * (and hence the current classpath should be used, not anything built up) + * @return a classpath + */ + public static ClasspathConstructor buildClasspath(String sliderConfDir, + String libdir, + SliderFileSystem sliderFileSystem, + boolean usingMiniMRCluster) { + + ClasspathConstructor classpath = new ClasspathConstructor(); + classpath.append(YarnServiceConstants.YARN_SERVICE_LOG4J_FILENAME); + + // add the runtime classpath needed for tests to work + if (usingMiniMRCluster) { + // for mini cluster we pass down the java CP properties + // and nothing else + classpath.appendAll(classpath.localJVMClasspath()); + } else { + if (sliderConfDir != null) { + classpath.addClassDirectory(sliderConfDir); + } + classpath.addLibDir(libdir); + if (sliderFileSystem.isFile(sliderFileSystem.getDependencyTarGzip())) { + classpath.addLibDir(YarnServiceConstants.DEPENDENCY_LOCALIZED_DIR_LINK); + } else { + log.info( + "For faster submission of apps, upload dependencies using cmd " + + "enableFastLaunch"); + } + classpath.addRemoteClasspathEnvVar(); + classpath.append(ApplicationConstants.Environment.HADOOP_CONF_DIR.$$()); + } + return classpath; + } + + /** + * Given a source folder create a tar.gz file + * + * @param libDirs + * @param tarGzipFile + * + * @throws IOException + */ + public static void tarGzipFolder(String[] libDirs, File tarGzipFile, + FilenameFilter filter) throws IOException { + log.info("Tar-gzipping folders {} to {}", libDirs, + tarGzipFile.getAbsolutePath()); + + try(TarArchiveOutputStream taos = + new TarArchiveOutputStream(new GZIPOutputStream( + new BufferedOutputStream(new FileOutputStream(tarGzipFile))))) { + for (String libDir : libDirs) { + File srcFolder = new File(libDir); + List files = new ArrayList<>(); + generateFileList(files, srcFolder, srcFolder, true, filter); + for (String file : files) { + File srcFile = new File(srcFolder, file); + TarArchiveEntry tarEntry = new TarArchiveEntry( + srcFile, file); + taos.putArchiveEntry(tarEntry); + try(FileInputStream in = new FileInputStream(srcFile)) { + org.apache.commons.io.IOUtils.copy(in, taos); + } + taos.flush(); + taos.closeArchiveEntry(); + } + } + } + } + + private static void generateFileList(List fileList, File node, + File rootFolder, Boolean relative, FilenameFilter filter) { + if (node.isFile()) { + String fileFullPath = node.toString(); + if (relative) { + fileList.add(fileFullPath.substring(rootFolder.toString().length() + 1, + fileFullPath.length())); + } else { + fileList.add(fileFullPath); + } + } + + if (node.isDirectory()) { + String[] subNode = node.list(filter); + if (subNode == null || subNode.length == 0) { + return; + } + for (String filename : subNode) { + generateFileList(fileList, new File(node, filename), rootFolder, + relative, filter); + } + } + } + + public static String createNameTag(String name) { + return "Name: " + name; + } + + public static String createVersionTag(String version) { + return "Version: " + version; + } + + public static String createDescriptionTag(String description) { + return "Description: " + description; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java new file mode 100644 index 0000000..1fa07ce --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.utils; + +import com.google.common.net.HostAndPort; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.service.exceptions.BadConfigException; + +import java.util.ArrayList; +import java.util.List; + +public class ZookeeperUtils { + public static final int DEFAULT_PORT = 2181; + + public static String buildConnectionString(String zkHosts, int port) { + String zkPort = Integer.toString(port); + //parse the hosts + String[] hostlist = zkHosts.split(",", 0); + String quorum = SliderUtils.join(hostlist, ":" + zkPort + ",", false); + return quorum; + } + + /** + * Take a quorum list and split it to (trimmed) pairs + * @param hostPortQuorumList list of form h1:port, h2:port2,... + * @return a possibly empty list of values between commas. They may not be + * valid hostname:port pairs + */ + public static List splitToPairs(String hostPortQuorumList) { + // split an address hot + String[] strings = StringUtils.getStrings(hostPortQuorumList); + int len = 0; + if (strings != null) { + len = strings.length; + } + List tuples = new ArrayList(len); + if (strings != null) { + for (String s : strings) { + tuples.add(s.trim()); + } + } + return tuples; + } + + /** + * Split a quorum list into a list of hostnames and ports + * @param hostPortQuorumList split to a list of hosts and ports + * @return a list of values + */ + public static List splitToHostsAndPorts(String hostPortQuorumList) { + // split an address hot + String[] strings = StringUtils.getStrings(hostPortQuorumList); + int len = 0; + if (strings != null) { + len = strings.length; + } + List list = new ArrayList(len); + if (strings != null) { + for (String s : strings) { + list.add(HostAndPort.fromString(s.trim()).withDefaultPort(DEFAULT_PORT)); + } + } + return list; + } + + /** + * Build up to a hosts only list + * @param hostAndPorts + * @return a list of the hosts only + */ + public static String buildHostsOnlyList(List hostAndPorts) { + StringBuilder sb = new StringBuilder(); + for (HostAndPort hostAndPort : hostAndPorts) { + sb.append(hostAndPort.getHostText()).append(","); + } + if (sb.length() > 0) { + sb.delete(sb.length() - 1, sb.length()); + } + return sb.toString(); + } + + public static String buildQuorumEntry(HostAndPort hostAndPort, + int defaultPort) { + String s = hostAndPort.toString(); + if (hostAndPort.hasPort()) { + return s; + } else { + return s + ":" + defaultPort; + } + } + + /** + * Build a quorum list, injecting a ":defaultPort" ref if needed on + * any entry without one + * @param hostAndPorts + * @param defaultPort + * @return + */ + public static String buildQuorum(List hostAndPorts, int defaultPort) { + List entries = new ArrayList(hostAndPorts.size()); + for (HostAndPort hostAndPort : hostAndPorts) { + entries.add(buildQuorumEntry(hostAndPort, defaultPort)); + } + return SliderUtils.join(entries, ",", false); + } + + public static String convertToHostsOnlyList(String quorum) throws + BadConfigException { + List hostAndPorts = splitToHostsAndPortsStrictly(quorum); + return ZookeeperUtils.buildHostsOnlyList(hostAndPorts); + } + + public static List splitToHostsAndPortsStrictly(String quorum) throws + BadConfigException { + List hostAndPorts = + ZookeeperUtils.splitToHostsAndPorts(quorum); + if (hostAndPorts.isEmpty()) { + throw new BadConfigException("empty zookeeper quorum"); + } + return hostAndPorts; + } + + public static int getFirstPort(String quorum, int defVal) throws + BadConfigException { + List hostAndPorts = splitToHostsAndPortsStrictly(quorum); + int port = hostAndPorts.get(0).getPortOrDefault(defVal); + return port; + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/proto/ClientAMProtocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/proto/ClientAMProtocol.proto new file mode 100644 index 0000000..0a21c24 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/proto/ClientAMProtocol.proto @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.yarn.proto"; +option java_outer_classname = "ClientAMProtocol"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +package hadoop.yarn; + +service ClientAMProtocolService { + rpc flexComponents(FlexComponentsRequestProto) returns (FlexComponentsResponseProto); + rpc getStatus(GetStatusRequestProto) returns (GetStatusResponseProto); + rpc stop(StopRequestProto) returns (StopResponseProto); +} + +message FlexComponentsRequestProto { + repeated ComponentCountProto components = 1; +} + +message ComponentCountProto { + optional string name = 1; + optional int64 numberOfContainers = 2; +} + +message FlexComponentsResponseProto{ +} + +message GetStatusRequestProto { + +} +message GetStatusResponseProto { + optional string status = 1; +} + +message StopRequestProto { + +} + +message StopResponseProto { + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java new file mode 100644 index 0000000..d343a03 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import com.google.common.base.Supplier; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.hadoop.yarn.client.api.NMClient; +import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; +import org.apache.hadoop.yarn.client.api.async.NMClientAsync; +import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.proto.ClientAMProtocol; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.component.Component; +import org.apache.hadoop.yarn.service.component.ComponentState; +import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException; +import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; + +import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeoutException; + +import static org.mockito.Mockito.mock; + +public class MockServiceAM extends ServiceMaster { + + Service service; + // The list of containers fed by tests to be returned on + // AMRMClientCallBackHandler#onContainersAllocated + final List feedContainers = + Collections.synchronizedList(new LinkedList<>()); + + public MockServiceAM(Service service) { + super(service.getName()); + this.service = service; + } + + + @Override + protected ContainerId getAMContainerId() + throws BadClusterStateException { + return ContainerId.newContainerId(ApplicationAttemptId + .newInstance(ApplicationId.fromString(service.getId()), 1), 1); + } + + @Override + protected Path getAppDir() { + Path path = new Path(new Path("target", "apps"), service.getName()); + System.out.println("Service path: " + path); + return path; + } + + @Override + protected ServiceScheduler createServiceScheduler(ServiceContext context) + throws IOException, YarnException { + return new ServiceScheduler(context) { + + @Override + protected YarnRegistryViewForProviders createYarnRegistryOperations( + ServiceContext context, RegistryOperations registryClient) { + return mock(YarnRegistryViewForProviders.class); + } + + @Override + protected AMRMClientAsync createAMRMClient() { + AMRMClientImpl client1 = new AMRMClientImpl() { + @Override public AllocateResponse allocate(float progressIndicator) + throws YarnException, IOException { + + AllocateResponse.AllocateResponseBuilder builder = + AllocateResponse.newBuilder(); + synchronized (feedContainers) { + if (feedContainers.isEmpty()) { + System.out.println("Allocating........ no containers"); + return builder.build(); + } else { + // The AMRMClient will return containers for compoenent that are + // at FLEXING state + List allocatedContainers = new LinkedList<>(); + Iterator itor = feedContainers.iterator(); + while (itor.hasNext()) { + Container c = itor.next(); + org.apache.hadoop.yarn.service.component.Component component = + componentsById.get(c.getAllocationRequestId()); + if (component.getState() == ComponentState.FLEXING) { + System.out.println("Allocated container " + c.getId()); + allocatedContainers.add(c); + itor.remove(); + } + } + return builder.allocatedContainers(allocatedContainers).build(); + } + } + } + + @Override + public RegisterApplicationMasterResponse registerApplicationMaster( + String appHostName, int appHostPort, String appTrackingUrl) { + return mock(RegisterApplicationMasterResponse.class); + } + + @Override public void unregisterApplicationMaster( + FinalApplicationStatus appStatus, String appMessage, + String appTrackingUrl) { + // DO nothing + } + }; + + return AMRMClientAsync + .createAMRMClientAsync(client1, 1000, + this.new AMRMClientCallback()); + } + + @Override + public NMClientAsync createNMClient() { + NMClientAsync nmClientAsync = super.createNMClient(); + nmClientAsync.setClient(mock(NMClient.class)); + return nmClientAsync; + } + }; + } + + @Override protected void loadApplicationJson(ServiceContext context, + SliderFileSystem fs) throws IOException { + context.service = service; + } + + /** + * + * @param service The service for the component + * @param id The id for the container + * @param compName The component to which the container is fed + * @return + */ + public Container feedContainerToComp(Service service, int id, + String compName) { + ApplicationId applicationId = ApplicationId.fromString(service.getId()); + ContainerId containerId = ContainerId + .newContainerId(ApplicationAttemptId.newInstance(applicationId, 1), id); + NodeId nodeId = NodeId.newInstance("localhost", 1234); + Container container = Container + .newInstance(containerId, nodeId, "localhost", + Resource.newInstance(100, 1), Priority.newInstance(0), null); + + long allocateId = + context.scheduler.getAllComponents().get(compName).getAllocateId(); + container.setAllocationRequestId(allocateId); + synchronized (feedContainers) { + feedContainers.add(container); + } + return container; + } + + public void flexComponent(String compName, long numberOfContainers) + throws IOException { + ClientAMProtocol.ComponentCountProto componentCountProto = + ClientAMProtocol.ComponentCountProto.newBuilder().setName(compName) + .setNumberOfContainers(numberOfContainers).build(); + ClientAMProtocol.FlexComponentsRequestProto requestProto = + ClientAMProtocol.FlexComponentsRequestProto.newBuilder() + .addComponents(componentCountProto).build(); + context.clientAMService.flexComponents(requestProto); + } + + public Component getComponent(String compName) { + return context.scheduler.getAllComponents().get(compName); + } + + public void waitForDependenciesSatisfied(String compName) + throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor(new Supplier() { + @Override public Boolean get() { + return context.scheduler.getAllComponents().get(compName) + .areDependenciesReady(); + } + }, 1000, 20000); + } + + public void waitForNumDesiredContainers(String compName, + int numDesiredContainers) throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor(new Supplier() { + @Override public Boolean get() { + return context.scheduler.getAllComponents().get(compName) + .getNumDesiredInstances() == numDesiredContainers; + } + }, 1000, 20000); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java new file mode 100644 index 0000000..19a5177 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java @@ -0,0 +1,258 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.commons.io.FileUtils; +import org.apache.curator.test.TestingCluster; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.MiniYARNCluster; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Resource; +import org.apache.hadoop.yarn.service.utils.JsonSerDeser; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin; +import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree; +import org.codehaus.jackson.map.PropertyNamingStrategy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.net.URL; + +import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_PMEM_CHECK_ENABLED; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.NM_VMEM_CHECK_ENABLED; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_ENABLED; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.AM_RESOURCE_MEM; +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ServiceTestUtils { + + private static final Logger LOG = + LoggerFactory.getLogger(ServiceTestUtils.class); + + private MiniYARNCluster yarnCluster = null; + private MiniDFSCluster hdfsCluster = null; + private FileSystem fs = null; + private Configuration conf = null; + public static final int NUM_NMS = 1; + private File basedir; + + public static final JsonSerDeser JSON_SER_DESER = + new JsonSerDeser<>(Service.class, + PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES); + + // Example service definition + // 2 components, each of which has 2 containers. + protected Service createExampleApplication() { + Service exampleApp = new Service(); + exampleApp.setName("example-app"); + exampleApp.addComponent(createComponent("compa")); + exampleApp.addComponent(createComponent("compb")); + return exampleApp; + } + + public static Component createComponent(String name) { + return createComponent(name, 2L, "sleep 1000"); + } + + protected static Component createComponent(String name, long numContainers, + String command) { + Component comp1 = new Component(); + comp1.setNumberOfContainers(numContainers); + comp1.setLaunchCommand(command); + comp1.setName(name); + Resource resource = new Resource(); + comp1.setResource(resource); + resource.setMemory("128"); + resource.setCpus(1); + return comp1; + } + + public static SliderFileSystem initMockFs() throws IOException { + return initMockFs(null); + } + + public static SliderFileSystem initMockFs(Service ext) throws IOException { + SliderFileSystem sfs = mock(SliderFileSystem.class); + FileSystem mockFs = mock(FileSystem.class); + JsonSerDeser jsonSerDeser = mock(JsonSerDeser.class); + when(sfs.getFileSystem()).thenReturn(mockFs); + when(sfs.buildClusterDirPath(anyObject())).thenReturn( + new Path("cluster_dir_path")); + if (ext != null) { + when(jsonSerDeser.load(anyObject(), anyObject())).thenReturn(ext); + } + ServiceApiUtil.setJsonSerDeser(jsonSerDeser); + return sfs; + } + + protected void setConf(YarnConfiguration conf) { + this.conf = conf; + } + + protected Configuration getConf() { + return conf; + } + + protected FileSystem getFS() { + return fs; + } + + protected void setupInternal(int numNodeManager) + throws Exception { + LOG.info("Starting up YARN cluster"); +// Logger rootLogger = LogManager.getRootLogger(); +// rootLogger.setLevel(Level.DEBUG); + setConf(new YarnConfiguration()); + conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128); + // reduce the teardown waiting time + conf.setLong(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 1000); + conf.set("yarn.log.dir", "target"); + // mark if we need to launch the v1 timeline server + // disable aux-service based timeline aggregators + conf.set(YarnConfiguration.NM_AUX_SERVICES, ""); + conf.set(YarnConfiguration.NM_VMEM_PMEM_RATIO, "8"); + // Enable ContainersMonitorImpl + conf.set(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, + LinuxResourceCalculatorPlugin.class.getName()); + conf.set(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, + ProcfsBasedProcessTree.class.getName()); + conf.setBoolean( + YarnConfiguration.YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING, true); + conf.setBoolean(TIMELINE_SERVICE_ENABLED, false); + conf.setInt(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 100); + conf.setLong(DEBUG_NM_DELETE_DELAY_SEC, 60000); + conf.setLong(AM_RESOURCE_MEM, 526); + conf.setLong(YarnServiceConf.READINESS_CHECK_INTERVAL, 5); + // Disable vmem check to disallow NM killing the container + conf.setBoolean(NM_VMEM_CHECK_ENABLED, false); + conf.setBoolean(NM_PMEM_CHECK_ENABLED, false); + // setup zk cluster + TestingCluster zkCluster; + zkCluster = new TestingCluster(1); + zkCluster.start(); + conf.set(YarnConfiguration.RM_ZK_ADDRESS, zkCluster.getConnectString()); + conf.set(KEY_REGISTRY_ZK_QUORUM, zkCluster.getConnectString()); + LOG.info("ZK cluster: " + zkCluster.getConnectString()); + + fs = FileSystem.get(conf); + basedir = new File("target", "apps"); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + + conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath()); + + if (yarnCluster == null) { + yarnCluster = + new MiniYARNCluster(TestYarnNativeServices.class.getSimpleName(), 1, + numNodeManager, 1, 1); + yarnCluster.init(conf); + yarnCluster.start(); + + waitForNMsToRegister(); + + URL url = Thread.currentThread().getContextClassLoader() + .getResource("yarn-site.xml"); + if (url == null) { + throw new RuntimeException( + "Could not find 'yarn-site.xml' dummy file in classpath"); + } + Configuration yarnClusterConfig = yarnCluster.getConfig(); + yarnClusterConfig.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, + new File(url.getPath()).getParent()); + //write the document to a buffer (not directly to the file, as that + //can cause the file being written to get read -which will then fail. + ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); + yarnClusterConfig.writeXml(bytesOut); + bytesOut.close(); + //write the bytes to the file in the classpath + OutputStream os = new FileOutputStream(new File(url.getPath())); + os.write(bytesOut.toByteArray()); + os.close(); + LOG.info("Write yarn-site.xml configs to: " + url); + } + if (hdfsCluster == null) { + HdfsConfiguration hdfsConfig = new HdfsConfiguration(); + hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig) + .numDataNodes(1).build(); + } + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + LOG.info("setup thread sleep interrupted. message=" + e.getMessage()); + } + } + + public void shutdown() throws IOException { + if (yarnCluster != null) { + try { + yarnCluster.stop(); + } finally { + yarnCluster = null; + } + } + if (hdfsCluster != null) { + try { + hdfsCluster.shutdown(); + } finally { + hdfsCluster = null; + } + } + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + SliderFileSystem sfs = new SliderFileSystem(conf); + Path appDir = sfs.getBaseApplicationPath(); + sfs.getFileSystem().delete(appDir, true); + } + + private void waitForNMsToRegister() throws Exception { + int sec = 60; + while (sec >= 0) { + if (yarnCluster.getResourceManager().getRMContext().getRMNodes().size() + >= NUM_NMS) { + break; + } + Thread.sleep(1000); + sec--; + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java new file mode 100644 index 0000000..55c096e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java @@ -0,0 +1,480 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Resource; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import static org.apache.hadoop.yarn.service.conf.RestApiConstants.DEFAULT_UNLIMITED_LIFETIME; +import static org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +/** + * Test for ServiceApiUtil helper methods. + */ +public class TestServiceApiUtil { + private static final Logger LOG = LoggerFactory + .getLogger(TestServiceApiUtil.class); + private static final String EXCEPTION_PREFIX = "Should have thrown " + + "exception: "; + private static final String NO_EXCEPTION_PREFIX = "Should not have thrown " + + "exception: "; + + private static final String LEN_64_STR = + "abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz01"; + + private static final YarnConfiguration CONF_DEFAULT_DNS = new + YarnConfiguration(); + private static final YarnConfiguration CONF_DNS_ENABLED = new + YarnConfiguration(); + + @BeforeClass + public static void init() { + CONF_DNS_ENABLED.setBoolean(RegistryConstants.KEY_DNS_ENABLED, true); + } + + @Test(timeout = 90000) + public void testResourceValidation() throws Exception { + assertEquals(RegistryConstants.MAX_FQDN_LABEL_LENGTH + 1, LEN_64_STR + .length()); + + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + + Service app = new Service(); + + // no name + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no name"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage()); + } + + // bad format name + String[] badNames = {"4finance", "Finance", "finance@home", LEN_64_STR}; + for (String badName : badNames) { + app.setName(badName); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with bad name " + badName); + } catch (IllegalArgumentException e) { + + } + } + + // launch command not specified + app.setName(LEN_64_STR); + Component comp = new Component().name("comp1"); + app.addComponent(comp); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DEFAULT_DNS); + Assert.fail(EXCEPTION_PREFIX + "service with no launch command"); + } catch (IllegalArgumentException e) { + assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, + e.getMessage()); + } + + // launch command not specified + app.setName(LEN_64_STR.substring(0, RegistryConstants + .MAX_FQDN_LABEL_LENGTH)); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no launch command"); + } catch (IllegalArgumentException e) { + assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, + e.getMessage()); + } + + // memory not specified + comp.setLaunchCommand("sleep 1"); + Resource res = new Resource(); + app.setResource(res); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no memory"); + } catch (IllegalArgumentException e) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID, + comp.getName()), e.getMessage()); + } + + // invalid no of cpus + res.setMemory("100mb"); + res.setCpus(-2); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail( + EXCEPTION_PREFIX + "service with invalid no of cpus"); + } catch (IllegalArgumentException e) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE, + comp.getName()), e.getMessage()); + } + + // number of containers not specified + res.setCpus(2); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no container count"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(e.getMessage() + .contains(ERROR_CONTAINERS_COUNT_INVALID)); + } + + // specifying profile along with cpus/memory raises exception + res.setProfile("hbase_finance_large"); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + + "service with resource profile along with cpus/memory"); + } catch (IllegalArgumentException e) { + assertEquals(String.format(RestApiErrorMessages + .ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED, + comp.getName()), + e.getMessage()); + } + + // currently resource profile alone is not supported. + // TODO: remove the next test once resource profile alone is supported. + res.setCpus(null); + res.setMemory(null); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with resource profile only"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET, + e.getMessage()); + } + + // unset profile here and add cpus/memory back + res.setProfile(null); + res.setCpus(2); + res.setMemory("2gb"); + + // null number of containers + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "null number of containers"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(e.getMessage() + .startsWith(ERROR_CONTAINERS_COUNT_INVALID)); + } + } + + @Test + public void testArtifacts() throws IOException { + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + + Service app = new Service(); + app.setName("service1"); + Resource res = new Resource(); + app.setResource(res); + res.setMemory("512M"); + + // no artifact id fails with default type + Artifact artifact = new Artifact(); + app.setArtifact(artifact); + Component comp = ServiceTestUtils.createComponent("comp1"); + + app.setComponents(Collections.singletonList(comp)); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // no artifact id fails with SERVICE type + artifact.setType(Artifact.TypeEnum.SERVICE); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // no artifact id fails with TARBALL type + artifact.setType(Artifact.TypeEnum.TARBALL); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // everything valid here + artifact.setType(Artifact.TypeEnum.DOCKER); + artifact.setId("docker.io/centos:centos7"); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + LOG.error("service attributes specified should be valid here", e); + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(app.getLifetime(), DEFAULT_UNLIMITED_LIFETIME); + } + + private static Resource createValidResource() { + Resource res = new Resource(); + res.setMemory("512M"); + return res; + } + + private static Component createValidComponent(String compName) { + Component comp = new Component(); + comp.setName(compName); + comp.setResource(createValidResource()); + comp.setNumberOfContainers(1L); + comp.setLaunchCommand("sleep 1"); + return comp; + } + + private static Service createValidApplication(String compName) { + Service app = new Service(); + app.setName("name"); + app.setResource(createValidResource()); + if (compName != null) { + app.addComponent(createValidComponent(compName)); + } + return app; + } + + @Test + public void testExternalApplication() throws IOException { + Service ext = createValidApplication("comp1"); + SliderFileSystem sfs = ServiceTestUtils.initMockFs(ext); + + Service app = createValidApplication(null); + + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.SERVICE); + artifact.setId("id"); + app.setArtifact(artifact); + app.addComponent(ServiceTestUtils.createComponent("comp2")); + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + assertNotNull(app.getComponent("comp2")); + } + + @Test + public void testDuplicateComponents() throws IOException { + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + + String compName = "comp1"; + Service app = createValidApplication(compName); + app.addComponent(createValidComponent(compName)); + + // duplicate component name fails + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with component collision"); + } catch (IllegalArgumentException e) { + assertEquals("Component name collision: " + compName, e.getMessage()); + } + } + + @Test + public void testExternalDuplicateComponent() throws IOException { + Service ext = createValidApplication("comp1"); + SliderFileSystem sfs = ServiceTestUtils.initMockFs(ext); + + Service app = createValidApplication("comp1"); + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.SERVICE); + artifact.setId("id"); + app.getComponent("comp1").setArtifact(artifact); + + // duplicate component name okay in the case of SERVICE component + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + } + + @Test + public void testExternalComponent() throws IOException { + Service ext = createValidApplication("comp1"); + SliderFileSystem sfs = ServiceTestUtils.initMockFs(ext); + + Service app = createValidApplication("comp2"); + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.SERVICE); + artifact.setId("id"); + app.setArtifact(artifact); + + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + // artifact ID not inherited from global + assertNotNull(app.getComponent("comp2")); + + // set SERVICE artifact id on component + app.getComponent("comp2").setArtifact(artifact); + + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + // original component replaced by external component + assertNotNull(app.getComponent("comp1")); + } + + public static void verifyDependencySorting(List components, + Component... expectedSorting) { + Collection actualSorting = ServiceApiUtil.sortByDependencies( + components); + assertEquals(expectedSorting.length, actualSorting.size()); + int i = 0; + for (Component component : actualSorting) { + assertEquals(expectedSorting[i++], component); + } + } + + @Test + public void testDependencySorting() throws IOException { + Component a = ServiceTestUtils.createComponent("a"); + Component b = ServiceTestUtils.createComponent("b"); + Component c = ServiceTestUtils.createComponent("c"); + Component d = + ServiceTestUtils.createComponent("d").dependencies(Arrays.asList("c")); + Component e = ServiceTestUtils.createComponent("e") + .dependencies(Arrays.asList("b", "d")); + + verifyDependencySorting(Arrays.asList(a, b, c), a, b, c); + verifyDependencySorting(Arrays.asList(c, a, b), c, a, b); + verifyDependencySorting(Arrays.asList(a, b, c, d, e), a, b, c, d, e); + verifyDependencySorting(Arrays.asList(e, d, c, b, a), c, b, a, d, e); + + c.setDependencies(Arrays.asList("e")); + try { + verifyDependencySorting(Arrays.asList(a, b, c, d, e)); + Assert.fail(EXCEPTION_PREFIX + "components with dependency cycle"); + } catch (IllegalArgumentException ex) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_DEPENDENCY_CYCLE, Arrays.asList(c, d, + e)), ex.getMessage()); + } + + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + Service service = createValidApplication(null); + service.setComponents(Arrays.asList(c, d, e)); + try { + ServiceApiUtil.validateAndResolveService(service, sfs, + CONF_DEFAULT_DNS); + Assert.fail(EXCEPTION_PREFIX + "components with bad dependencies"); + } catch (IllegalArgumentException ex) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, "b", "e"), ex + .getMessage()); + } + } + + @Test + public void testInvalidComponent() throws IOException { + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + testComponent(sfs); + } + + @Test + public void testValidateCompName() { + String[] invalidNames = { + "EXAMPLE", // UPPER case not allowed + "example_app" // underscore not allowed. + }; + for (String name : invalidNames) { + try { + ServiceApiUtil.validateNameFormat(name, new Configuration()); + Assert.fail(); + } catch (IllegalArgumentException ex) { + ex.printStackTrace(); + } + } + } + + private static void testComponent(SliderFileSystem sfs) + throws IOException { + int maxLen = RegistryConstants.MAX_FQDN_LABEL_LENGTH; + assertEquals(19, Long.toString(Long.MAX_VALUE).length()); + maxLen = maxLen - Long.toString(Long.MAX_VALUE).length(); + + String compName = LEN_64_STR.substring(0, maxLen + 1); + Service app = createValidApplication(null); + app.addComponent(createValidComponent(compName)); + + // invalid component name fails if dns is enabled + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "service with invalid component name"); + } catch (IllegalArgumentException e) { + assertEquals(String.format(RestApiErrorMessages + .ERROR_COMPONENT_NAME_INVALID, maxLen, compName), e.getMessage()); + } + + // does not fail if dns is disabled + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DEFAULT_DNS); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + compName = LEN_64_STR.substring(0, maxLen); + app = createValidApplication(null); + app.addComponent(createValidComponent(compName)); + + // does not fail + try { + ServiceApiUtil.validateAndResolveService(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java new file mode 100644 index 0000000..64fcf57 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java @@ -0,0 +1,322 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Container; +import org.apache.hadoop.yarn.service.api.records.ContainerState; +import org.apache.hadoop.yarn.service.client.ServiceClient; +import org.apache.hadoop.yarn.service.exceptions.SliderException; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeSet; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.yarn.api.records.YarnApplicationState.FINISHED; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.*; + +/** + * End to end tests to test deploying services with MiniYarnCluster and a in-JVM + * ZK testing cluster. + */ +public class TestYarnNativeServices extends ServiceTestUtils { + + private static final Logger LOG = + LoggerFactory.getLogger(TestYarnNativeServices.class); + + @Rule + public TemporaryFolder tmpFolder = new TemporaryFolder(); + + @Before + public void setup() throws Exception { + setupInternal(NUM_NMS); + } + + @After + public void tearDown() throws IOException { + shutdown(); + } + + // End-to-end test to use ServiceClient to deploy a service. + // 1. Create a service with 2 components, each of which has 2 containers + // 2. Flex up each component to 3 containers and check the component instance names + // 3. Flex down each component to 1 container and check the component instance names + // 4. Flex up each component to 2 containers and check the component instance names + // 5. Stop the service + // 6. Destroy the service + @Test (timeout = 200000) + public void testCreateFlexStopDestroyService() throws Exception { + ServiceClient client = createClient(); + Service exampleApp = createExampleApplication(); + client.actionCreate(exampleApp); + SliderFileSystem fileSystem = new SliderFileSystem(getConf()); + Path appDir = fileSystem.buildClusterDirPath(exampleApp.getName()); + // check app.json is persisted. + Assert.assertTrue( + getFS().exists(new Path(appDir, exampleApp.getName() + ".json"))); + waitForAllCompToBeReady(client, exampleApp); + + // Flex two components, each from 2 container to 3 containers. + flexComponents(client, exampleApp, 3L); + // wait for flex to be completed, increase from 2 to 3 containers. + waitForAllCompToBeReady(client, exampleApp); + // check all instances name for each component are in sequential order. + checkCompInstancesInOrder(client, exampleApp); + + // flex down to 1 + flexComponents(client, exampleApp, 1L); + waitForAllCompToBeReady(client, exampleApp); + checkCompInstancesInOrder(client, exampleApp); + + // check component dir and registry are cleaned up. + + // flex up again to 2 + flexComponents(client, exampleApp, 2L); + waitForAllCompToBeReady(client, exampleApp); + checkCompInstancesInOrder(client, exampleApp); + + // stop the service + LOG.info("Stop the service"); + client.actionStop(exampleApp.getName(), true); + ApplicationReport report = client.getYarnClient() + .getApplicationReport(ApplicationId.fromString(exampleApp.getId())); + // AM unregisters with RM successfully + Assert.assertEquals(FINISHED, report.getYarnApplicationState()); + Assert.assertEquals(FinalApplicationStatus.ENDED, + report.getFinalApplicationStatus()); + + LOG.info("Destroy the service"); + //destroy the service and check the app dir is deleted from fs. + client.actionDestroy(exampleApp.getName()); + // check the service dir on hdfs (in this case, local fs) are deleted. + Assert.assertFalse(getFS().exists(appDir)); + } + + // Create compa with 2 containers + // Create compb with 2 containers which depends on compa + // Check containers for compa started before containers for compb + @Test (timeout = 200000) + public void testComponentStartOrder() throws Exception { + ServiceClient client = createClient(); + Service exampleApp = new Service(); + exampleApp.setName("teststartorder"); + exampleApp.addComponent(createComponent("compa", 2, "sleep 1000")); + Component compb = createComponent("compb", 2, "sleep 1000"); + + // Let compb depedends on compa; + compb.setDependencies(Collections.singletonList("compa")); + exampleApp.addComponent(compb); + + client.actionCreate(exampleApp); + waitForAllCompToBeReady(client, exampleApp); + + // check that containers for compa are launched before containers for compb + checkContainerLaunchDependencies(client, exampleApp, "compa", "compb"); + + client.actionStop(exampleApp.getName(), true); + client.actionDestroy(exampleApp.getName()); + } + + // Check containers launched are in dependency order + // Get all containers into a list and sort based on container launch time e.g. + // compa-c1, compa-c2, compb-c1, compb-c2; + // check that the container's launch time are align with the dependencies. + private void checkContainerLaunchDependencies(ServiceClient client, + Service exampleApp, String... compOrder) + throws IOException, YarnException { + Service retrievedApp = client.getStatus(exampleApp.getName()); + List containerList = new ArrayList<>(); + for (Component component : retrievedApp.getComponents()) { + containerList.addAll(component.getContainers()); + } + // sort based on launchTime + containerList + .sort((o1, o2) -> o1.getLaunchTime().compareTo(o2.getLaunchTime())); + LOG.info("containerList: " + containerList); + // check the containers are in the dependency order. + int index = 0; + for (String comp : compOrder) { + long num = retrievedApp.getComponent(comp).getNumberOfContainers(); + for (int i = 0; i < num; i++) { + String compInstanceName = containerList.get(index).getComponentInstanceName(); + String compName = + compInstanceName.substring(0, compInstanceName.lastIndexOf('-')); + Assert.assertEquals(comp, compName); + index++; + } + } + } + + + private Map flexComponents(ServiceClient client, + Service exampleApp, long count) throws YarnException, IOException { + Map compCounts = new HashMap<>(); + compCounts.put("compa", count); + compCounts.put("compb", count); + // flex will update the persisted conf to reflect latest number of containers. + exampleApp.getComponent("compa").setNumberOfContainers(count); + exampleApp.getComponent("compb").setNumberOfContainers(count); + client.flexByRestService(exampleApp.getName(), compCounts); + return compCounts; + } + + // Check each component's comp instances name are in sequential order. + // E.g. If there are two instances compA-1 and compA-2 + // When flex up to 4 instances, it should be compA-1 , compA-2, compA-3, compA-4 + // When flex down to 3 instances, it should be compA-1 , compA-2, compA-3. + private void checkCompInstancesInOrder(ServiceClient client, + Service exampleApp) throws IOException, YarnException { + Service service = client.getStatus(exampleApp.getName()); + for (Component comp : service.getComponents()) { + checkEachCompInstancesInOrder(comp); + } + } + + private void checkRegistryAndCompDirDeleted() { + + } + + private void checkEachCompInstancesInOrder(Component component) { + long expectedNumInstances = component.getNumberOfContainers(); + Assert.assertEquals(expectedNumInstances, component.getContainers().size()); + TreeSet instances = new TreeSet<>(); + for (Container container : component.getContainers()) { + instances.add(container.getComponentInstanceName()); + } + + int i = 0; + for (String s : instances) { + Assert.assertEquals(component.getName() + "-" + i, s); + i++; + } + } + + private void waitForOneCompToBeReady(ServiceClient client, + Service exampleApp, String readyComp) + throws TimeoutException, InterruptedException { + long numExpectedContainers = + exampleApp.getComponent(readyComp).getNumberOfContainers(); + GenericTestUtils.waitFor(() -> { + try { + Service retrievedApp = client.getStatus(exampleApp.getName()); + Component retrievedComp = retrievedApp.getComponent(readyComp); + + if (retrievedComp.getContainers() != null + && retrievedComp.getContainers().size() == numExpectedContainers) { + LOG.info(readyComp + " found " + numExpectedContainers + + " containers running"); + return true; + } else { + LOG.info(" Waiting for " + readyComp + "'s containers to be running"); + return false; + } + } catch (Exception e) { + e.printStackTrace(); + return false; + } + }, 2000, 200000); + } + + // wait until all the containers for all components become ready state + private void waitForAllCompToBeReady(ServiceClient client, + Service exampleApp) throws TimeoutException, InterruptedException { + int expectedTotalContainers = countTotalContainers(exampleApp); + GenericTestUtils.waitFor(() -> { + try { + Service retrievedApp = client.getStatus(exampleApp.getName()); + int totalReadyContainers = 0; + LOG.info("Num Components " + retrievedApp.getComponents().size()); + for (Component component : retrievedApp.getComponents()) { + LOG.info("looking for " + component.getName()); + LOG.info(component.toString()); + if (component.getContainers() != null) { + if (component.getContainers().size() == exampleApp + .getComponent(component.getName()).getNumberOfContainers()) { + for (Container container : component.getContainers()) { + LOG.info( + "Container state " + container.getState() + ", component " + + component.getName()); + if (container.getState() == ContainerState.READY) { + totalReadyContainers++; + LOG.info("Found 1 ready container " + container.getId()); + } + } + } else { + LOG.info(component.getName() + " Expected number of containers " + + exampleApp.getComponent(component.getName()) + .getNumberOfContainers() + ", current = " + component + .getContainers()); + } + } + } + LOG.info("Exit loop, totalReadyContainers= " + totalReadyContainers + + " expected = " + expectedTotalContainers); + return totalReadyContainers == expectedTotalContainers; + } catch (Exception e) { + e.printStackTrace(); + return false; + } + }, 2000, 200000); + } + + private ServiceClient createClient() throws Exception { + ServiceClient client = new ServiceClient() { + @Override protected Path addJarResource(String appName, + Map localResources) + throws IOException, SliderException { + // do nothing, the Unit test will use local jars + return null; + } + }; + client.init(getConf()); + client.start(); + return client; + } + + + private int countTotalContainers(Service service) { + int totalContainers = 0; + for (Component component : service.getComponents()) { + totalContainers += component.getNumberOfContainers(); + } + return totalContainers; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java new file mode 100644 index 0000000..1f4581e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.client; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.client.api.AppAdminClient; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.conf.ExampleAppJson; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH; + +/** + * Test for building / resolving components of type SERVICE. + */ +public class TestBuildExternalComponents { + + protected Configuration conf = new YarnConfiguration(); + private File basedir; + + // Check component names match with expected + private static void checkComponentNames(List components, + Set expectedComponents) { + Assert.assertEquals(expectedComponents.size(), components.size()); + for (Component comp : components) { + Assert.assertTrue(expectedComponents.contains(comp.getName())); + } + } + + // 1. Build the def file and store on fs + // 2. check component names + private void buildAndCheckComponents(String appName, String appDef, + SliderFileSystem sfs, Set names) throws Throwable { + AppAdminClient client = AppAdminClient.createAppAdminClient(AppAdminClient + .DEFAULT_TYPE, conf); + client.actionSave(ExampleAppJson.resourceName(appDef), null, null, + null); + + // verify generated conf + List components = + ServiceApiUtil.getComponents(sfs, appName); + checkComponentNames(components, names); + } + + @Before + public void setup() throws IOException { + basedir = new File("target", "apps"); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath()); + } + + @After + public void tearDown() throws IOException { + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + } + + // Test applications defining external components(SERVICE type) + // can be resolved correctly + @Test + public void testExternalComponentBuild() throws Throwable { + SliderFileSystem sfs = new SliderFileSystem(conf); + + Set nameSet = new HashSet<>(); + nameSet.add("simple"); + nameSet.add("master"); + nameSet.add("worker"); + + // app-1 has 3 components: simple, master, worker + buildAndCheckComponents("app-1", ExampleAppJson.APP_JSON, sfs, nameSet); + buildAndCheckComponents("external-0", ExampleAppJson.EXTERNAL_JSON_0, sfs, + nameSet); + + nameSet.add("other"); + + // external1 has 3 components: simple(SERVICE - app1), master and other + buildAndCheckComponents("external-1", ExampleAppJson.EXTERNAL_JSON_1, sfs, + nameSet); + + nameSet.add("another"); + + // external2 has 2 components: ext(SERVICE - external1), another + buildAndCheckComponents("external-2", ExampleAppJson.EXTERNAL_JSON_2, sfs, + nameSet); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java new file mode 100644 index 0000000..df4b1df --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.client; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.ToolRunner; +import org.apache.hadoop.yarn.client.cli.ApplicationCLI; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.conf.ExampleAppJson; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.YARN_SERVICE_BASE_PATH; + +public class TestServiceCLI { + private static final Logger LOG = LoggerFactory.getLogger(TestServiceCLI + .class); + + private Configuration conf = new YarnConfiguration(); + private File basedir; + private SliderFileSystem fs; + private String basedirProp; + + private void runCLI(String[] args) throws Exception { + LOG.info("running CLI: yarn {}", Arrays.asList(args)); + ApplicationCLI cli = new ApplicationCLI(); + cli.setSysOutPrintStream(System.out); + cli.setSysErrPrintStream(System.err); + int res = ToolRunner.run(cli, ApplicationCLI.preProcessArgs(args)); + cli.stop(); + } + + private void buildApp(String serviceName, String appDef) throws Throwable { + String[] args = {"app", "-D", basedirProp, "-save", serviceName, + ExampleAppJson.resourceName(appDef)}; + runCLI(args); + } + + private void buildApp(String serviceName, String appDef, String lifetime, + String queue) throws Throwable { + String[] args = {"app", "-D", basedirProp, "-save", serviceName, + ExampleAppJson.resourceName(appDef), "-updateLifetime", lifetime, + "-changeQueue", queue}; + runCLI(args); + } + + @Before + public void setup() throws Throwable { + basedir = new File("target", "apps"); + basedirProp = YARN_SERVICE_BASE_PATH + "=" + basedir.getAbsolutePath(); + conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath()); + fs = new SliderFileSystem(conf); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + } + + @After + public void tearDown() throws IOException { + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + } + + @Test + public void testFlexComponents() throws Throwable { + // currently can only test building apps, since that is the only + // operation that doesn't require an RM + // TODO: expand CLI test to try other commands + String serviceName = "app-1"; + buildApp(serviceName, ExampleAppJson.APP_JSON); + checkApp(serviceName, "master", 1L, 3600L, null); + + serviceName = "app-2"; + buildApp(serviceName, ExampleAppJson.APP_JSON, "1000", "qname"); + checkApp(serviceName, "master", 1L, 1000L, "qname"); + } + + private void checkApp(String serviceName, String compName, long count, Long + lifetime, String queue) throws IOException { + Service service = ServiceApiUtil.loadService(fs, serviceName); + Assert.assertEquals(serviceName, service.getName()); + Assert.assertEquals(lifetime, service.getLifetime()); + Assert.assertEquals(queue, service.getQueue()); + List components = service.getComponents(); + for (Component component : components) { + if (component.getName().equals(compName)) { + Assert.assertEquals(count, component.getNumberOfContainers() + .longValue()); + return; + } + } + Assert.fail(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/ExampleAppJson.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/ExampleAppJson.java new file mode 100644 index 0000000..5fdd2ab --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/ExampleAppJson.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + + +import org.apache.hadoop.yarn.service.api.records.Service; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.apache.hadoop.yarn.service.ServiceTestUtils.JSON_SER_DESER; + +/** + * Names of the example configs. + */ +public final class ExampleAppJson { + + public static final String APP_JSON = "app.json"; + public static final String OVERRIDE_JSON = "app-override.json"; + public static final String DEFAULT_JSON = "default.json"; + public static final String EXTERNAL_JSON_0 = "external0.json"; + public static final String EXTERNAL_JSON_1 = "external1.json"; + public static final String EXTERNAL_JSON_2 = "external2.json"; + + public static final String PACKAGE = "/org/apache/hadoop/yarn/service/conf/examples/"; + + + private static final String[] ALL_EXAMPLES = {APP_JSON, OVERRIDE_JSON, + DEFAULT_JSON}; + + public static final List ALL_EXAMPLE_RESOURCES = new ArrayList<>(); + static { + for (String example : ALL_EXAMPLES) { + ALL_EXAMPLE_RESOURCES.add(PACKAGE + example); + } + } + + private ExampleAppJson() { + } + + public static Service loadResource(String name) throws IOException { + return JSON_SER_DESER.fromResource(PACKAGE + name); + } + + public static String resourceName(String name) { + return "target/test-classes" + PACKAGE + name; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java new file mode 100644 index 0000000..8739382 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestAppJsonResolve.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.ServiceTestUtils; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.api.records.Configuration; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.conf.ExampleAppJson.*; + +/** + * Test global configuration resolution. + */ +public class TestAppJsonResolve extends Assert { + protected static final Logger LOG = + LoggerFactory.getLogger(TestAppJsonResolve.class); + + @Test + public void testOverride() throws Throwable { + Service orig = ExampleAppJson.loadResource(OVERRIDE_JSON); + + Configuration global = orig.getConfiguration(); + assertEquals("a", global.getProperty("g1")); + assertEquals("b", global.getProperty("g2")); + assertEquals(2, global.getFiles().size()); + + Configuration simple = orig.getComponent("simple").getConfiguration(); + assertEquals(0, simple.getProperties().size()); + assertEquals(1, simple.getFiles().size()); + + Configuration master = orig.getComponent("master").getConfiguration(); + assertEquals("m", master.getProperty("name")); + assertEquals("overridden", master.getProperty("g1")); + assertEquals(0, master.getFiles().size()); + + Configuration worker = orig.getComponent("worker").getConfiguration(); + LOG.info("worker = {}", worker); + assertEquals(3, worker.getProperties().size()); + assertEquals(0, worker.getFiles().size()); + + assertEquals("worker", worker.getProperty("name")); + assertEquals("overridden-by-worker", worker.getProperty("g1")); + assertNull(worker.getProperty("g2")); + assertEquals("1000", worker.getProperty("timeout")); + + // here is the resolution + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + ServiceApiUtil.validateAndResolveService(orig, sfs, new + YarnConfiguration()); + + global = orig.getConfiguration(); + LOG.info("global = {}", global); + assertEquals("a", global.getProperty("g1")); + assertEquals("b", global.getProperty("g2")); + assertEquals(2, global.getFiles().size()); + + simple = orig.getComponent("simple").getConfiguration(); + assertEquals(2, simple.getProperties().size()); + assertEquals("a", simple.getProperty("g1")); + assertEquals("b", simple.getProperty("g2")); + assertEquals(2, simple.getFiles().size()); + + Set files = new HashSet<>(); + Map props = new HashMap<>(); + props.put("k1", "overridden"); + props.put("k2", "v2"); + files.add(new ConfigFile().destFile("file1").type(ConfigFile.TypeEnum + .PROPERTIES).props(props)); + files.add(new ConfigFile().destFile("file2").type(ConfigFile.TypeEnum + .XML).props(Collections.singletonMap("k3", "v3"))); + assertTrue(files.contains(simple.getFiles().get(0))); + assertTrue(files.contains(simple.getFiles().get(1))); + + master = orig.getComponent("master").getConfiguration(); + LOG.info("master = {}", master); + assertEquals(3, master.getProperties().size()); + assertEquals("m", master.getProperty("name")); + assertEquals("overridden", master.getProperty("g1")); + assertEquals("b", master.getProperty("g2")); + assertEquals(2, master.getFiles().size()); + + props.put("k1", "v1"); + files.clear(); + files.add(new ConfigFile().destFile("file1").type(ConfigFile.TypeEnum + .PROPERTIES).props(props)); + files.add(new ConfigFile().destFile("file2").type(ConfigFile.TypeEnum + .XML).props(Collections.singletonMap("k3", "v3"))); + + assertTrue(files.contains(master.getFiles().get(0))); + assertTrue(files.contains(master.getFiles().get(1))); + + worker = orig.getComponent("worker").getConfiguration(); + LOG.info("worker = {}", worker); + assertEquals(4, worker.getProperties().size()); + + assertEquals("worker", worker.getProperty("name")); + assertEquals("overridden-by-worker", worker.getProperty("g1")); + assertEquals("b", worker.getProperty("g2")); + assertEquals("1000", worker.getProperty("timeout")); + assertEquals(2, worker.getFiles().size()); + + assertTrue(files.contains(worker.getFiles().get(0))); + assertTrue(files.contains(worker.getFiles().get(1))); + } + + @Test + public void testOverrideExternalConfiguration() throws IOException { + Service orig = ExampleAppJson.loadResource(EXTERNAL_JSON_1); + + Configuration global = orig.getConfiguration(); + assertEquals(0, global.getProperties().size()); + + assertEquals(3, orig.getComponents().size()); + + Configuration simple = orig.getComponent("simple").getConfiguration(); + assertEquals(0, simple.getProperties().size()); + + Configuration master = orig.getComponent("master").getConfiguration(); + assertEquals(1, master.getProperties().size()); + assertEquals("is-overridden", master.getProperty("g3")); + + Configuration other = orig.getComponent("other").getConfiguration(); + assertEquals(0, other.getProperties().size()); + + // load the external service + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + Service ext = ExampleAppJson.loadResource(APP_JSON); + ServiceApiUtil.validateAndResolveService(ext, sfs, new + YarnConfiguration()); + + // perform the resolution on original service + sfs = ServiceTestUtils.initMockFs(ext); + ServiceApiUtil.validateAndResolveService(orig, sfs, new + YarnConfiguration()); + + global = orig.getConfiguration(); + assertEquals(0, global.getProperties().size()); + + assertEquals(4, orig.getComponents().size()); + + simple = orig.getComponent("simple").getConfiguration(); + assertEquals(3, simple.getProperties().size()); + assertEquals("a", simple.getProperty("g1")); + assertEquals("b", simple.getProperty("g2")); + assertEquals("60", + simple.getProperty("yarn.service.failure-count-reset.window")); + + master = orig.getComponent("master").getConfiguration(); + assertEquals(5, master.getProperties().size()); + assertEquals("512M", master.getProperty("jvm.heapsize")); + assertEquals("overridden", master.getProperty("g1")); + assertEquals("b", master.getProperty("g2")); + assertEquals("is-overridden", master.getProperty("g3")); + assertEquals("60", + simple.getProperty("yarn.service.failure-count-reset.window")); + + Configuration worker = orig.getComponent("worker").getConfiguration(); + LOG.info("worker = {}", worker); + assertEquals(4, worker.getProperties().size()); + assertEquals("512M", worker.getProperty("jvm.heapsize")); + assertEquals("overridden-by-worker", worker.getProperty("g1")); + assertEquals("b", worker.getProperty("g2")); + assertEquals("60", + worker.getProperty("yarn.service.failure-count-reset.window")); + + other = orig.getComponent("other").getConfiguration(); + assertEquals(0, other.getProperties().size()); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestLoadExampleAppJson.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestLoadExampleAppJson.java new file mode 100644 index 0000000..a813da3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestLoadExampleAppJson.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.ServiceTestUtils; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderFileSystem; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.Arrays; +import java.util.Collection; + +import static org.apache.hadoop.yarn.service.ServiceTestUtils.JSON_SER_DESER; + +/** + * Test loading example resources. + */ +@RunWith(value = Parameterized.class) +public class TestLoadExampleAppJson extends Assert { + private String resource; + + public TestLoadExampleAppJson(String resource) { + this.resource = resource; + } + + @Parameterized.Parameters + public static Collection filenames() { + String[][] stringArray = new String[ExampleAppJson + .ALL_EXAMPLE_RESOURCES.size()][1]; + int i = 0; + for (String s : ExampleAppJson.ALL_EXAMPLE_RESOURCES) { + stringArray[i++][0] = s; + } + return Arrays.asList(stringArray); + } + + @Test + public void testLoadResource() throws Throwable { + try { + Service service = JSON_SER_DESER.fromResource(resource); + + SliderFileSystem sfs = ServiceTestUtils.initMockFs(); + + ServiceApiUtil.validateAndResolveService(service, sfs, + new YarnConfiguration()); + } catch (Exception e) { + throw new Exception("exception loading " + resource + ":" + e.toString()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestValidateServiceNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestValidateServiceNames.java new file mode 100644 index 0000000..6159215 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/conf/TestValidateServiceNames.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.conf; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.apache.hadoop.yarn.service.utils.SliderUtils; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +/** + * Test cluster name validation. + */ +public class TestValidateServiceNames { + + void assertValidName(String name) { + ServiceApiUtil.validateNameFormat(name, new Configuration()); + } + + void assertInvalidName(String name) { + try { + ServiceApiUtil.validateNameFormat(name, new Configuration()); + Assert.fail(); + } catch (IllegalArgumentException e) { + // + } + } + + void assertInvalid(List names) { + for (String name : names) { + assertInvalidName(name); + } + } + + void assertValid(List names) { + for (String name : names) { + assertValidName(name); + } + } + + @Test + public void testEmptyName() throws Throwable { + assertInvalidName(""); + } + + @Test + public void testSpaceName() throws Throwable { + assertInvalidName(" "); + } + + + @Test + public void testLeadingHyphen() throws Throwable { + assertInvalidName("-hyphen"); + } + + @Test + public void testTitleLetters() throws Throwable { + assertInvalidName("Title"); + } + + @Test + public void testCapitalLetters() throws Throwable { + assertInvalidName("UPPER-CASE-CLUSTER"); + } + + @Test + public void testInnerBraced() throws Throwable { + assertInvalidName("a[a"); + } + + @Test + public void testLeadingBrace() throws Throwable { + assertInvalidName("["); + } + + @Test + public void testNonalphaLeadingChars() throws Throwable { + assertInvalid(Arrays.asList( + "[a", "#", "@", "=", "*", "." + )); + } + + @Test + public void testNonalphaInnerChars() throws Throwable { + assertInvalid(Arrays.asList( + "a[a", "b#", "c@", "d=", "e*", "f.", "g ", "h i" + )); + } + + @Test + public void testClusterValid() throws Throwable { + assertValidName("cluster"); + } + + @Test + public void testValidNames() throws Throwable { + assertValid(Arrays.asList( + "cluster", + "cluster1", + "very-very-very-long-cluster-name", + "c1234567890" + )); + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/TestServiceMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/TestServiceMonitor.java new file mode 100644 index 0000000..0e03a2c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/TestServiceMonitor.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.yarn.service.monitor; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.MockServiceAM; +import org.apache.hadoop.yarn.service.ServiceTestUtils; + +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.conf.YarnServiceConf; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.Collections; + +public class TestServiceMonitor extends ServiceTestUtils { + + private File basedir; + YarnConfiguration conf = new YarnConfiguration(); + + @Before + public void setup() throws Exception { + basedir = new File("target", "apps"); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + conf.setLong(YarnServiceConf.READINESS_CHECK_INTERVAL, 2); + } + + @After + public void tearDown() throws IOException { + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + } + + // Create compa with 1 container + // Create compb with 1 container + // Verify compb dependency satisfied + // Increase compa to 2 containers + // Verify compb dependency becomes unsatisfied. + @Test + public void testComponentDependency() throws Exception{ + ApplicationId applicationId = ApplicationId.newInstance(123456, 1); + Service exampleApp = new Service(); + exampleApp.setId(applicationId.toString()); + exampleApp.setName("testComponentDependency"); + exampleApp.addComponent(createComponent("compa", 1, "sleep 1000")); + Component compb = createComponent("compb", 1, "sleep 1000"); + + // Let compb depends on compa; + compb.setDependencies(Collections.singletonList("compa")); + exampleApp.addComponent(compb); + + MockServiceAM am = new MockServiceAM(exampleApp); + am.init(conf); + am.start(); + + // compa ready + Assert.assertTrue(am.getComponent("compa").areDependenciesReady()); + //compb not ready + Assert.assertFalse(am.getComponent("compb").areDependenciesReady()); + + // feed 1 container to compa, + am.feedContainerToComp(exampleApp, 1, "compa"); + // waiting for compb's dependencies are satisfied + am.waitForDependenciesSatisfied("compb"); + + // feed 1 container to compb + am.feedContainerToComp(exampleApp, 2, "compb"); + am.flexComponent("compa", 2); + am.waitForNumDesiredContainers("compa", 2); + + // compb dependencies not satisfied again. + Assert.assertFalse(am.getComponent("compb").areDependenciesReady()); + am.stop(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java new file mode 100644 index 0000000..79406e9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestAbstractClientProvider.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.providers; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.ConfigFile; +import org.apache.hadoop.yarn.service.provider.AbstractClientProvider; +import org.junit.Assert; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test the AbstractClientProvider shared methods. + */ +public class TestAbstractClientProvider { + private static final String EXCEPTION_PREFIX = "Should have thrown " + + "exception: "; + private static final String NO_EXCEPTION_PREFIX = "Should not have thrown " + + "exception: "; + + private static class ClientProvider extends AbstractClientProvider { + @Override + public void validateArtifact(Artifact artifact, FileSystem fileSystem) + throws IOException { + } + + @Override + protected void validateConfigFile(ConfigFile configFile, + FileSystem fileSystem) throws IOException { + } + } + + @Test + public void testConfigFiles() throws IOException { + ClientProvider clientProvider = new ClientProvider(); + FileSystem mockFs = mock(FileSystem.class); + when(mockFs.exists(anyObject())).thenReturn(true); + + ConfigFile configFile = new ConfigFile(); + List configFiles = new ArrayList<>(); + configFiles.add(configFile); + + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "null file type"); + } catch (IllegalArgumentException e) { + } + + configFile.setType(ConfigFile.TypeEnum.TEMPLATE); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "empty src_file for type template"); + } catch (IllegalArgumentException e) { + } + + configFile.setSrcFile("srcfile"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "empty dest file"); + } catch (IllegalArgumentException e) { + } + + configFile.setDestFile("destfile"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + configFile = new ConfigFile(); + configFile.setType(ConfigFile.TypeEnum.JSON); + configFile.setSrcFile(null); + configFile.setDestFile("path/destfile2"); + configFiles.add(configFile); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "dest file with multiple path elements"); + } catch (IllegalArgumentException e) { + } + + configFile.setDestFile("/path/destfile2"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + configFile.setDestFile("destfile"); + try { + clientProvider.validateConfigFiles(configFiles, mockFs); + Assert.fail(EXCEPTION_PREFIX + "duplicate dest file"); + } catch (IllegalArgumentException e) { + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestProviderFactory.java new file mode 100644 index 0000000..56f4555 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/providers/TestProviderFactory.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.providers; + +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum; +import org.apache.hadoop.yarn.service.provider.ProviderFactory; +import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultClientProvider; +import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultProviderFactory; +import org.apache.hadoop.yarn.service.provider.defaultImpl.DefaultProviderService; +import org.apache.hadoop.yarn.service.provider.docker.DockerClientProvider; +import org.apache.hadoop.yarn.service.provider.docker.DockerProviderFactory; +import org.apache.hadoop.yarn.service.provider.docker.DockerProviderService; +import org.apache.hadoop.yarn.service.provider.tarball.TarballClientProvider; +import org.apache.hadoop.yarn.service.provider.tarball.TarballProviderFactory; +import org.apache.hadoop.yarn.service.provider.tarball.TarballProviderService; + +import org.junit.Test; + +import static org.junit.Assert.assertTrue; + +/** + * Test provider factories. + */ +public class TestProviderFactory { + @Test + public void testDockerFactory() throws Throwable { + ProviderFactory factory = ProviderFactory + .createServiceProviderFactory(new Artifact().type(TypeEnum.DOCKER)); + assertTrue(factory instanceof DockerProviderFactory); + assertTrue(factory.createClientProvider() instanceof DockerClientProvider); + assertTrue(factory.createServerProvider() instanceof DockerProviderService); + assertTrue(ProviderFactory.getProviderService(new Artifact() + .type(TypeEnum.DOCKER)) instanceof DockerProviderService); + } + + @Test + public void testTarballFactory() throws Throwable { + ProviderFactory factory = ProviderFactory + .createServiceProviderFactory(new Artifact().type(TypeEnum.TARBALL)); + assertTrue(factory instanceof TarballProviderFactory); + assertTrue(factory.createClientProvider() instanceof TarballClientProvider); + assertTrue(factory.createServerProvider() instanceof + TarballProviderService); + assertTrue(ProviderFactory.getProviderService(new Artifact() + .type(TypeEnum.TARBALL)) instanceof TarballProviderService); + } + + @Test + public void testDefaultFactory() throws Throwable { + ProviderFactory factory = ProviderFactory + .createServiceProviderFactory(null); + assertTrue(factory instanceof DefaultProviderFactory); + assertTrue(factory.createClientProvider() instanceof DefaultClientProvider); + assertTrue(factory.createServerProvider() instanceof DefaultProviderService); + assertTrue(ProviderFactory.getProviderService(null) instanceof + DefaultProviderService); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/timelineservice/TestServiceTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/timelineservice/TestServiceTimelinePublisher.java new file mode 100644 index 0000000..e7c7600 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/timelineservice/TestServiceTimelinePublisher.java @@ -0,0 +1,291 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service.timelineservice; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; +import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier; +import org.apache.hadoop.yarn.client.api.TimelineV2Client; +import org.apache.hadoop.yarn.client.api.impl.TimelineV2ClientImpl; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.service.ServiceContext; +import org.apache.hadoop.yarn.service.api.records.Service; +import org.apache.hadoop.yarn.service.api.records.ServiceState; +import org.apache.hadoop.yarn.service.api.records.Artifact; +import org.apache.hadoop.yarn.service.api.records.Component; +import org.apache.hadoop.yarn.service.api.records.Container; +import org.apache.hadoop.yarn.service.api.records.ContainerState; +import org.apache.hadoop.yarn.service.api.records.PlacementPolicy; +import org.apache.hadoop.yarn.service.api.records.Resource; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstance; +import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test class for ServiceTimelinePublisher. + */ +public class TestServiceTimelinePublisher { + private TimelineV2Client timelineClient; + private Configuration config; + private ServiceTimelinePublisher serviceTimelinePublisher; + private static String SERVICE_NAME = "HBASE"; + private static String SERVICEID = "application_1490093646524_0005"; + private static String ARTIFACTID = "ARTIFACTID"; + private static String COMPONENT_NAME = "DEFAULT"; + private static String CONTAINER_ID = + "container_e02_1490093646524_0005_01_000001"; + private static String CONTAINER_IP = + "localhost"; + private static String CONTAINER_HOSTNAME = + "cnl124-localhost.site"; + private static String CONTAINER_BAREHOST = + "localhost.com"; + + @Before + public void setUp() throws Exception { + config = new Configuration(); + config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); + config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f); + timelineClient = + new DummyTimelineClient(ApplicationId.fromString(SERVICEID)); + serviceTimelinePublisher = new ServiceTimelinePublisher(timelineClient); + serviceTimelinePublisher.init(config); + serviceTimelinePublisher.start(); + } + + @After + public void tearDown() throws Exception { + if (serviceTimelinePublisher != null) { + serviceTimelinePublisher.stop(); + } + if (timelineClient != null) { + timelineClient.stop(); + } + } + + @Test + public void testServiceAttemptEntity() { + Service service = createMockApplication(); + serviceTimelinePublisher + .serviceAttemptRegistered(service, new YarnConfiguration()); + + Collection lastPublishedEntities = + ((DummyTimelineClient) timelineClient).getLastPublishedEntities(); + // 2 entities because during registration component also registered. + assertEquals(2, lastPublishedEntities.size()); + for (TimelineEntity timelineEntity : lastPublishedEntities) { + if (timelineEntity.getType() == ServiceTimelineEntityType.COMPONENT + .toString()) { + verifyComponentTimelineEntity(timelineEntity); + } else { + verifyServiceAttemptTimelineEntity(timelineEntity, null, true); + } + } + + ServiceContext context = new ServiceContext(); + context.attemptId = ApplicationAttemptId + .newInstance(ApplicationId.fromString(service.getId()), 1); + String exitDiags = "service killed"; + serviceTimelinePublisher.serviceAttemptUnregistered(context, exitDiags); + lastPublishedEntities = + ((DummyTimelineClient) timelineClient).getLastPublishedEntities(); + for (TimelineEntity timelineEntity : lastPublishedEntities) { + if (timelineEntity.getType() == ServiceTimelineEntityType.SERVICE_ATTEMPT + .toString()) { + verifyServiceAttemptTimelineEntity(timelineEntity, exitDiags, + false); + } + } + } + + @Test + public void testComponentInstanceEntity() { + Container container = new Container(); + container.id(CONTAINER_ID).ip(CONTAINER_IP).bareHost(CONTAINER_BAREHOST) + .hostname(CONTAINER_HOSTNAME).state(ContainerState.RUNNING_BUT_UNREADY) + .launchTime(new Date()); + ComponentInstanceId id = new ComponentInstanceId(0, COMPONENT_NAME); + ComponentInstance instance = mock(ComponentInstance.class); + when(instance.getCompName()).thenReturn(COMPONENT_NAME); + when(instance.getCompInstanceName()).thenReturn("comp_instance_name"); + serviceTimelinePublisher.componentInstanceStarted(container, + instance); + + Collection lastPublishedEntities = + ((DummyTimelineClient) timelineClient).getLastPublishedEntities(); + assertEquals(1, lastPublishedEntities.size()); + TimelineEntity entity = lastPublishedEntities.iterator().next(); + + assertEquals(1, entity.getEvents().size()); + assertEquals(CONTAINER_ID, entity.getId()); + assertEquals(CONTAINER_BAREHOST, + entity.getInfo().get(ServiceTimelineMetricsConstants.BARE_HOST)); + assertEquals(COMPONENT_NAME, + entity.getInfo().get(ServiceTimelineMetricsConstants.COMPONENT_NAME)); + assertEquals(ContainerState.RUNNING_BUT_UNREADY.toString(), + entity.getInfo().get(ServiceTimelineMetricsConstants.STATE)); + + // updated container state + container.setState(ContainerState.READY); + serviceTimelinePublisher.componentInstanceUpdated(container); + lastPublishedEntities = + ((DummyTimelineClient) timelineClient).getLastPublishedEntities(); + assertEquals(1, lastPublishedEntities.size()); + entity = lastPublishedEntities.iterator().next(); + assertEquals(2, entity.getEvents().size()); + assertEquals(ContainerState.READY.toString(), + entity.getInfo().get(ServiceTimelineMetricsConstants.STATE)); + + } + + private void verifyServiceAttemptTimelineEntity(TimelineEntity timelineEntity, + String message, boolean isRegistedEntity) { + assertEquals(SERVICEID, timelineEntity.getId()); + assertEquals(SERVICE_NAME, + timelineEntity.getInfo().get(ServiceTimelineMetricsConstants.NAME)); + if (isRegistedEntity) { + assertEquals(ServiceState.STARTED.toString(), + timelineEntity.getInfo().get(ServiceTimelineMetricsConstants.STATE)); + assertEquals(ServiceTimelineEvent.SERVICE_ATTEMPT_REGISTERED.toString(), + timelineEntity.getEvents().iterator().next().getId()); + } else { + assertEquals("ENDED", + timelineEntity.getInfo().get(ServiceTimelineMetricsConstants.STATE).toString()); + assertEquals(message, timelineEntity.getInfo() + .get(ServiceTimelineMetricsConstants.DIAGNOSTICS_INFO)); + assertEquals(2, timelineEntity.getEvents().size()); + assertEquals(ServiceTimelineEvent.SERVICE_ATTEMPT_UNREGISTERED.toString(), + timelineEntity.getEvents().iterator().next().getId()); + } + } + + private void verifyComponentTimelineEntity(TimelineEntity entity) { + Map info = entity.getInfo(); + assertEquals("DEFAULT", entity.getId()); + assertEquals(ARTIFACTID, + info.get(ServiceTimelineMetricsConstants.ARTIFACT_ID)); + assertEquals("DOCKER", + info.get(ServiceTimelineMetricsConstants.ARTIFACT_TYPE)); + assertEquals("medium", + info.get(ServiceTimelineMetricsConstants.RESOURCE_PROFILE)); + assertEquals(1, info.get(ServiceTimelineMetricsConstants.RESOURCE_CPU)); + assertEquals("1024", + info.get(ServiceTimelineMetricsConstants.RESOURCE_MEMORY)); + assertEquals("sleep 1", + info.get(ServiceTimelineMetricsConstants.LAUNCH_COMMAND)); + assertEquals("false", + info.get(ServiceTimelineMetricsConstants.RUN_PRIVILEGED_CONTAINER)); + assertEquals("label", + info.get(ServiceTimelineMetricsConstants.PLACEMENT_POLICY)); + } + + private static Service createMockApplication() { + Service service = mock(Service.class); + + when(service.getId()).thenReturn(SERVICEID); + when(service.getLaunchTime()).thenReturn(new Date()); + when(service.getState()).thenReturn(ServiceState.STARTED); + when(service.getName()).thenReturn(SERVICE_NAME); + when(service.getConfiguration()).thenReturn( + new org.apache.hadoop.yarn.service.api.records.Configuration()); + + Component component = mock(Component.class); + Artifact artifact = new Artifact(); + artifact.setId(ARTIFACTID); + Resource resource = new Resource(); + resource.setCpus(1); + resource.setMemory(1024 + ""); + resource.setProfile("medium"); + when(component.getArtifact()).thenReturn(artifact); + when(component.getName()).thenReturn(COMPONENT_NAME); + when(component.getResource()).thenReturn(resource); + when(component.getLaunchCommand()).thenReturn("sleep 1"); + PlacementPolicy placementPolicy = new PlacementPolicy(); + placementPolicy.setLabel("label"); + when(component.getPlacementPolicy()).thenReturn(placementPolicy); + when(component.getConfiguration()).thenReturn( + new org.apache.hadoop.yarn.service.api.records.Configuration()); + List components = new ArrayList(); + components.add(component); + + when(service.getComponents()).thenReturn(components); + return service; + } + + protected static class DummyTimelineClient extends TimelineV2ClientImpl { + private Map lastPublishedEntities = + new HashMap<>(); + + public DummyTimelineClient(ApplicationId appId) { + super(appId); + } + + @Override + public void putEntitiesAsync(TimelineEntity... entities) + throws IOException, YarnException { + putEntities(entities); + } + + @Override + public void putEntities(TimelineEntity... entities) + throws IOException, YarnException { + for (TimelineEntity timelineEntity : entities) { + TimelineEntity entity = + lastPublishedEntities.get(timelineEntity.getIdentifier()); + if (entity == null) { + lastPublishedEntities.put(timelineEntity.getIdentifier(), + timelineEntity); + } else { + entity.addMetrics(timelineEntity.getMetrics()); + entity.addEvents(timelineEntity.getEvents()); + entity.addInfo(timelineEntity.getInfo()); + entity.addConfigs(timelineEntity.getConfigs()); + entity.addRelatesToEntities(timelineEntity.getRelatesToEntities()); + entity + .addIsRelatedToEntities(timelineEntity.getIsRelatedToEntities()); + } + } + } + + public Collection getLastPublishedEntities() { + return lastPublishedEntities.values(); + } + + public void reset() { + lastPublishedEntities = null; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/example-app.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/example-app.json new file mode 100644 index 0000000..5dfbd64 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/example-app.json @@ -0,0 +1,15 @@ +{ + "name": "example-app", + "components" : + [ + { + "name": "simple", + "number_of_containers": 1, + "launch_command": "sleep 2", + "resource": { + "cpus": 1, + "memory": "128" + } + } + ] +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app-override.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app-override.json new file mode 100644 index 0000000..753a9cd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app-override.json @@ -0,0 +1,76 @@ +{ + "name": "app-1", + "lifetime": "3600", + "configuration": { + "properties": { + "g1": "a", + "g2": "b" + }, + "files": [ + { + "type": "PROPERTIES", + "dest_file": "file1", + "props": { + "k1": "v1", + "k2": "v2" + } + }, + { + "type": "XML", + "dest_file": "file2", + "props": { + "k3": "v3" + } + } + ] + }, + "resource": { + "cpus": 1, + "memory": "512" + }, + "components": [ + { + "name": "simple", + "launch_command": "sleep 3600", + "number_of_containers": 2, + "configuration": { + "files": [ + { + "type": "PROPERTIES", + "dest_file": "file1", + "props": { + "k1": "overridden" + } + } + ] + } + }, + { + "name": "master", + "launch_command": "sleep 3600", + "number_of_containers": 2, + "configuration": { + "properties": { + "name": "m", + "g1": "overridden" + } + } + }, + { + "name": "worker", + "number_of_containers": 2, + "launch_command": "sleep 3600", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "properties": { + "name": "worker", + "g1": "overridden-by-worker", + "timeout": "1000" + } + } + } + ] +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json new file mode 100644 index 0000000..2eb477f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/app.json @@ -0,0 +1,50 @@ +{ + "name": "app-1", + "id" : "application_1503358878042_0011", + "lifetime": "3600", + "configuration": { + "properties": { + "g1": "a", + "g2": "b", + "yarn.service.failure-count-reset.window": "60" + } + }, + "resource": { + "cpus": 1, + "memory": "512" + }, + "components": [ + { + "name": "simple", + "number_of_containers": 2, + "launch_command": "sleep 3600" + }, + { + "name": "master", + "number_of_containers": 1, + "launch_command": "sleep 3600", + "configuration": { + "properties": { + "g1": "overridden", + "g3": "will-be-overridden", + "jvm.heapsize": "512M" + } + } + }, + { + "name": "worker", + "number_of_containers": 5, + "launch_command": "sleep 3600", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "properties": { + "g1": "overridden-by-worker", + "jvm.heapsize": "512M" + } + } + } + ] +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/default.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/default.json new file mode 100644 index 0000000..73d4e7b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/default.json @@ -0,0 +1,16 @@ +{ + "name": "default-app-1", + "lifetime": "3600", + "components" : + [ + { + "name": "sleep", + "number_of_containers": 1, + "launch_command": "sleep 3600", + "resource": { + "cpus": 2, + "memory": "256" + } + } + ] +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external0.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external0.json new file mode 100644 index 0000000..f0163bc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external0.json @@ -0,0 +1,15 @@ +{ + "name": "external-0", + "lifetime": "3600", + + "components" : [ + { + "name" : "comp1", + "artifact": { + "type": "SERVICE", + "id": "app-1" + } + } + ] + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external1.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external1.json new file mode 100644 index 0000000..4afdb8b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external1.json @@ -0,0 +1,30 @@ +{ + "name": "external-1", + "lifetime": "3600", + "components": [ + { + "name": "simple", + "artifact": { + "type": "SERVICE", + "id": "app-1" + } + }, + { + "name": "master", + "configuration": { + "properties": { + "g3": "is-overridden" + } + } + }, + { + "name": "other", + "launch_command": "sleep 3600", + "number_of_containers": 2, + "resource": { + "cpus": 1, + "memory": "512" + } + } + ] +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external2.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external2.json new file mode 100644 index 0000000..0df8e0a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/org/apache/hadoop/yarn/service/conf/examples/external2.json @@ -0,0 +1,22 @@ +{ + "name": "external-2", + "lifetime": "3600", + "components": [ + { + "name": "ext", + "artifact": { + "type": "SERVICE", + "id": "external-1" + } + }, + { + "name": "another", + "launch_command": "sleep 3600", + "number_of_containers": 1, + "resource": { + "cpus": 1, + "memory": "512" + } + } + ] +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/yarn-site.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/yarn-site.xml new file mode 100644 index 0000000..266caa9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/resources/yarn-site.xml @@ -0,0 +1,19 @@ + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml new file mode 100644 index 0000000..716fdb7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/pom.xml @@ -0,0 +1,38 @@ + + + + + hadoop-yarn-applications + org.apache.hadoop + 3.1.0-SNAPSHOT + + 4.0.0 + hadoop-yarn-services + Apache Hadoop YARN Services + pom + + + ${basedir}/../../../../hadoop-common-project/hadoop-common/target + + + + + + hadoop-yarn-services-core + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml index 644a1dc..4fb579c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml @@ -36,6 +36,8 @@ hadoop-yarn-applications-distributedshell hadoop-yarn-applications-unmanaged-am-launcher + hadoop-yarn-services + hadoop-yarn-services-api diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java new file mode 100644 index 0000000..6310178 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AppAdminClient.java @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.client.api; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; + +import java.io.IOException; +import java.util.Map; + +/** + * Client for managing applications. + */ +@Public +@Unstable +public abstract class AppAdminClient extends CompositeService { + public static final String YARN_APP_ADMIN_CLIENT_PREFIX = "yarn" + + ".application.admin.client.class."; + public static final String DEFAULT_TYPE = "yarn-service"; + public static final String DEFAULT_CLASS_NAME = "org.apache.hadoop.yarn" + + ".service.client.ServiceClient"; + + @Private + protected AppAdminClient() { + super(AppAdminClient.class.getName()); + } + + /** + *

+ * Create a new instance of AppAdminClient. + *

+ * + * @param appType application type + * @param conf configuration + * @return app admin client + */ + @Public + @Unstable + public static AppAdminClient createAppAdminClient(String appType, + Configuration conf) { + Map clientClassMap = + conf.getPropsWithPrefix(YARN_APP_ADMIN_CLIENT_PREFIX); + if (!clientClassMap.containsKey(DEFAULT_TYPE)) { + clientClassMap.put(DEFAULT_TYPE, DEFAULT_CLASS_NAME); + } + if (!clientClassMap.containsKey(appType)) { + throw new IllegalArgumentException("App admin client class name not " + + "specified for type " + appType); + } + String clientClassName = clientClassMap.get(appType); + Class clientClass; + try { + clientClass = (Class) Class.forName( + clientClassName); + } catch (ClassNotFoundException e) { + throw new YarnRuntimeException("Invalid app admin client class", e); + } + + AppAdminClient appAdminClient = ReflectionUtils.newInstance(clientClass, + conf); + appAdminClient.init(conf); + appAdminClient.start(); + return appAdminClient; + } + + /** + *

+ * Launch a new YARN application. + *

+ * + * @param fileName specification of application + * @param appName name of the application + * @param lifetime lifetime of the application + * @param queue queue of the application + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionLaunch(String fileName, String appName, Long + lifetime, String queue) throws IOException, YarnException; + + /** + *

+ * Stop a YARN application (attempt to stop gracefully before killing the + * application). In the case of a long-running service, the service may be + * restarted later. + *

+ * + * @param appName the name of the application + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionStop(String appName) throws IOException, + YarnException; + + /** + *

+ * Start a YARN application from a previously saved specification. In the + * case of a long-running service, the service must have been previously + * launched/started and then stopped, or previously saved but not started. + *

+ * + * @param appName the name of the application + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionStart(String appName) throws IOException, + YarnException; + + /** + *

+ * Save the specification for a YARN application / long-running service. + * The application may be started later. + *

+ * + * @param fileName specification of application to save + * @param appName name of the application + * @param lifetime lifetime of the application + * @param queue queue of the application + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionSave(String fileName, String appName, Long + lifetime, String queue) throws IOException, YarnException; + + /** + *

+ * Remove the specification and all application data for a YARN application. + * The application cannot be running. + *

+ * + * @param appName the name of the application + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionDestroy(String appName) throws IOException, + YarnException; + + /** + *

+ * Change the number of running containers for a component of a YARN + * application / long-running service. + *

+ * + * @param appName the name of the application + * @param componentCounts map of component name to new component count or + * amount to change existing component count (e.g. + * 5, +5, -5) + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int actionFlex(String appName, Map + componentCounts) throws IOException, YarnException; + + /** + *

+ * Upload AM dependencies to HDFS. This makes future application launches + * faster since the dependencies do not have to be uploaded on each launch. + *

+ * + * @return exit code + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract int enableFastLaunch() throws IOException, + YarnException; + + /** + *

+ * Get detailed status string for a YARN application. + *

+ * + * @param applicationId application id + * @return status string + * @throws IOException IOException + * @throws YarnException exception in client or server + */ + @Public + @Unstable + public abstract String getStatusString(String applicationId) throws + IOException, YarnException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index 2a9b3bc..fb08fcd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -23,12 +23,7 @@ import java.io.PrintWriter; import java.nio.charset.Charset; import java.text.DecimalFormat; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumSet; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; @@ -54,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.SignalContainerCommand; import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.client.api.AppAdminClient; import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; @@ -85,6 +81,7 @@ "%30s\t%20s\t%20s\t%20s\t%20s\t%20s\t%35s" + System.getProperty("line.separator"); + public static final String APP = "app"; public static final String APPLICATION = "application"; public static final String APPLICATION_ATTEMPT = "applicationattempt"; public static final String CONTAINER = "container"; @@ -93,22 +90,52 @@ public static final String UPDATE_LIFETIME = "updateLifetime"; public static final String CHANGE_APPLICATION_QUEUE = "changeQueue"; + // app admin options + public static final String LAUNCH_CMD = "launch"; + public static final String STOP_CMD = "stop"; + public static final String START_CMD = "start"; + public static final String SAVE_CMD = "save"; + public static final String DESTROY_CMD = "destroy"; + public static final String FLEX_CMD = "flex"; + public static final String COMPONENT = "component"; + public static final String ENABLE_FAST_LAUNCH = "enableFastLaunch"; + + private static String firstArg = null; + private boolean allAppStates; public static void main(String[] args) throws Exception { ApplicationCLI cli = new ApplicationCLI(); cli.setSysOutPrintStream(System.out); cli.setSysErrPrintStream(System.err); - int res = ToolRunner.run(cli, args); + int res = ToolRunner.run(cli, preProcessArgs(args)); cli.stop(); System.exit(res); } + @VisibleForTesting + public static String[] preProcessArgs(String[] args) { + if (args.length > 0) { + // first argument (app|application|applicationattempt|container) must + // be stripped off for GenericOptionsParser to work + firstArg = args[0]; + return Arrays.copyOfRange(args, 1, args.length); + } else { + return args; + } + } + @Override public int run(String[] args) throws Exception { Options opts = new Options(); String title = null; - if (args.length > 0 && args[0].equalsIgnoreCase(APPLICATION)) { + if (firstArg != null) { + title = firstArg; + } else if (args.length > 0) { + title = args[0]; + } + if (title != null && (title.equalsIgnoreCase(APPLICATION) || title + .equalsIgnoreCase(APP))) { title = APPLICATION; opts.addOption(STATUS_CMD, true, "Prints the status of the application."); @@ -168,8 +195,52 @@ public int run(String[] args) throws Exception { opts.getOption(UPDATE_PRIORITY).setArgName("Priority"); opts.getOption(UPDATE_LIFETIME).setArgName("Timeout"); opts.getOption(CHANGE_APPLICATION_QUEUE).setArgName("Queue Name"); - } else if (args.length > 0 && args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) { - title = APPLICATION_ATTEMPT; + opts.addOption(LAUNCH_CMD, true, "Launches application from " + + "specification file (saves specification and starts application). " + + "Options -updateLifetime and -changeQueue can be specified to alter" + + " the values provided in the file. Supports -appTypes option to " + + "specify which client implementation to use."); + opts.addOption(STOP_CMD, true, "Stops application gracefully (may be " + + "started again later). If name is provided, appType must be " + + "provided unless it is the default yarn-service. If ID is provided," + + " the appType will be looked up. Supports -appTypes option to " + + "specify which client implementation to use."); + opts.addOption(START_CMD, true, "Starts a previously saved " + + "application. Supports -appTypes option to specify which client " + + "implementation to use."); + opts.addOption(SAVE_CMD, true, "Saves specification file for " + + "an application. Options -updateLifetime and -changeQueue can be " + + "specified to alter the values provided in the file. Supports " + + "-appTypes option to specify which client implementation to use."); + opts.addOption(DESTROY_CMD, true, "Destroys a saved application " + + "specification and removes all application data permanently. " + + "Supports -appTypes option to specify which client implementation " + + "to use."); + opts.addOption(FLEX_CMD, true, "Changes number of " + + "running containers for a component of an application / " + + "long-running service. Requires -component option. If name is " + + "provided, appType must be provided unless it is the default " + + "yarn-service. If ID is provided, the appType will be looked up. " + + "Supports -appTypes option to specify which client implementation " + + "to use."); + opts.addOption(COMPONENT, true, "Works with -flex option to change " + + "the number of components/containers running for an application / " + + "long-running service. Supports absolute or relative changes, such " + + "as +1, 2, or -3."); + opts.addOption(ENABLE_FAST_LAUNCH, false, "Uploads AM dependencies " + + "to HDFS to make future launches faster. Supports -appTypes option" + + " to specify which client implementation to use."); + opts.getOption(LAUNCH_CMD).setArgName("Application Name> 0 && args[0].equalsIgnoreCase(CONTAINER)) { - title = CONTAINER; + } else if (title != null && title.equalsIgnoreCase(CONTAINER)) { opts.addOption(STATUS_CMD, true, "Prints the status of the container."); opts.addOption(LIST_CMD, true, @@ -205,23 +275,53 @@ public int run(String[] args) throws Exception { printUsage(title, opts); return exitCode; } + String[] unparsedArgs = cliParser.getArgs(); + if (firstArg == null) { + if (unparsedArgs.length != 1) { + printUsage(title, opts); + return exitCode; + } + } else { + if (unparsedArgs.length != 0) { + printUsage(title, opts); + return exitCode; + } + } if (cliParser.hasOption(STATUS_CMD)) { - if (args.length != 3) { + if (hasAnyOtherCLIOptions(cliParser, opts, STATUS_CMD)) { printUsage(title, opts); return exitCode; } - if (args[0].equalsIgnoreCase(APPLICATION)) { - exitCode = printApplicationReport(cliParser.getOptionValue(STATUS_CMD)); - } else if (args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) { + if (title.equalsIgnoreCase(APPLICATION) || + title.equalsIgnoreCase(APP)) { + ApplicationReport report = printApplicationReport(cliParser + .getOptionValue(STATUS_CMD)); + if (report == null) { + exitCode = -1; + } else { + exitCode = 0; + String appType = report.getApplicationType(); + try { + AppAdminClient client = AppAdminClient.createAppAdminClient(appType, + getConf()); + sysout.println("Detailed Application Status :"); + sysout.println(client.getStatusString(cliParser.getOptionValue( + STATUS_CMD))); + } catch (IllegalArgumentException e) { + // app type does not have app admin client implementation + } + } + } else if (title.equalsIgnoreCase(APPLICATION_ATTEMPT)) { exitCode = printApplicationAttemptReport(cliParser .getOptionValue(STATUS_CMD)); - } else if (args[0].equalsIgnoreCase(CONTAINER)) { + } else if (title.equalsIgnoreCase(CONTAINER)) { exitCode = printContainerReport(cliParser.getOptionValue(STATUS_CMD)); } return exitCode; } else if (cliParser.hasOption(LIST_CMD)) { - if (args[0].equalsIgnoreCase(APPLICATION)) { + if (title.equalsIgnoreCase(APPLICATION) || + title.equalsIgnoreCase(APP)) { allAppStates = false; Set appTypes = new HashSet(); if (cliParser.hasOption(APP_TYPE_CMD)) { @@ -272,21 +372,21 @@ public int run(String[] args) throws Exception { } } listApplications(appTypes, appStates, appTags); - } else if (args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) { - if (args.length != 3) { + } else if (title.equalsIgnoreCase(APPLICATION_ATTEMPT)) { + if (hasAnyOtherCLIOptions(cliParser, opts, LIST_CMD)) { printUsage(title, opts); return exitCode; } listApplicationAttempts(cliParser.getOptionValue(LIST_CMD)); - } else if (args[0].equalsIgnoreCase(CONTAINER)) { - if (args.length != 3) { + } else if (title.equalsIgnoreCase(CONTAINER)) { + if (hasAnyOtherCLIOptions(cliParser, opts, LIST_CMD)) { printUsage(title, opts); return exitCode; } listContainers(cliParser.getOptionValue(LIST_CMD)); } } else if (cliParser.hasOption(KILL_CMD)) { - if (args.length < 3 || hasAnyOtherCLIOptions(cliParser, opts, KILL_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, KILL_CMD)) { printUsage(title, opts); return exitCode; } @@ -299,7 +399,7 @@ public int run(String[] args) throws Exception { moveApplicationAcrossQueues(cliParser.getOptionValue(MOVE_TO_QUEUE_CMD), cliParser.getOptionValue(QUEUE_CMD)); } else if (cliParser.hasOption(FAIL_CMD)) { - if (!args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) { + if (!title.equalsIgnoreCase(APPLICATION_ATTEMPT)) { printUsage(title, opts); return exitCode; } @@ -314,6 +414,103 @@ public int run(String[] args) throws Exception { } updateApplicationPriority(cliParser.getOptionValue(APP_ID), cliParser.getOptionValue(UPDATE_PRIORITY)); + } else if (cliParser.hasOption(SIGNAL_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, SIGNAL_CMD)) { + printUsage(title, opts); + return exitCode; + } + final String[] signalArgs = cliParser.getOptionValues(SIGNAL_CMD); + final String containerId = signalArgs[0]; + SignalContainerCommand command = + SignalContainerCommand.OUTPUT_THREAD_DUMP; + if (signalArgs.length == 2) { + command = SignalContainerCommand.valueOf(signalArgs[1]); + } + signalToContainer(containerId, command); + } else if (cliParser.hasOption(LAUNCH_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, LAUNCH_CMD, APP_TYPE_CMD, + UPDATE_LIFETIME, CHANGE_APPLICATION_QUEUE)) { + printUsage(title, opts); + return exitCode; + } + String appType = getSingleAppTypeFromCLI(cliParser); + Long lifetime = null; + if (cliParser.hasOption(UPDATE_LIFETIME)) { + lifetime = Long.parseLong(cliParser.getOptionValue(UPDATE_LIFETIME)); + } + String queue = null; + if (cliParser.hasOption(CHANGE_APPLICATION_QUEUE)) { + queue = cliParser.getOptionValue(CHANGE_APPLICATION_QUEUE); + } + String[] nameAndFile = cliParser.getOptionValues(LAUNCH_CMD); + return AppAdminClient.createAppAdminClient(appType, getConf()) + .actionLaunch(nameAndFile[1], nameAndFile[0], lifetime, queue); + } else if (cliParser.hasOption(STOP_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, STOP_CMD, APP_TYPE_CMD)) { + printUsage(title, opts); + return exitCode; + } + String[] appNameAndType = getAppNameAndType(cliParser, STOP_CMD); + return AppAdminClient.createAppAdminClient(appNameAndType[1], getConf()) + .actionStop(appNameAndType[0]); + } else if (cliParser.hasOption(START_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, START_CMD, APP_TYPE_CMD)) { + printUsage(title, opts); + return exitCode; + } + String appType = getSingleAppTypeFromCLI(cliParser); + return AppAdminClient.createAppAdminClient(appType, getConf()) + .actionStart(cliParser.getOptionValue(START_CMD)); + } else if (cliParser.hasOption(SAVE_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, SAVE_CMD, APP_TYPE_CMD, + UPDATE_LIFETIME, CHANGE_APPLICATION_QUEUE)) { + printUsage(title, opts); + return exitCode; + } + String appType = getSingleAppTypeFromCLI(cliParser); + Long lifetime = null; + if (cliParser.hasOption(UPDATE_LIFETIME)) { + lifetime = Long.parseLong(cliParser.getOptionValue(UPDATE_LIFETIME)); + } + String queue = null; + if (cliParser.hasOption(CHANGE_APPLICATION_QUEUE)) { + queue = cliParser.getOptionValue(CHANGE_APPLICATION_QUEUE); + } + String[] nameAndFile = cliParser.getOptionValues(SAVE_CMD); + return AppAdminClient.createAppAdminClient(appType, getConf()) + .actionSave(nameAndFile[1], nameAndFile[0], lifetime, queue); + } else if (cliParser.hasOption(DESTROY_CMD)) { + if (hasAnyOtherCLIOptions(cliParser, opts, DESTROY_CMD, APP_TYPE_CMD)) { + printUsage(title, opts); + return exitCode; + } + String appType = getSingleAppTypeFromCLI(cliParser); + return AppAdminClient.createAppAdminClient(appType, getConf()) + .actionDestroy(cliParser.getOptionValue(DESTROY_CMD)); + } else if (cliParser.hasOption(FLEX_CMD)) { + if (!cliParser.hasOption(COMPONENT) || + hasAnyOtherCLIOptions(cliParser, opts, FLEX_CMD, COMPONENT, + APP_TYPE_CMD)) { + printUsage(title, opts); + return exitCode; + } + String[] rawCounts = cliParser.getOptionValues(COMPONENT); + Map counts = new HashMap<>(rawCounts.length/2); + for (int i = 0; i < rawCounts.length - 1; i+=2) { + counts.put(rawCounts[i], rawCounts[i+1]); + } + String[] appNameAndType = getAppNameAndType(cliParser, FLEX_CMD); + return AppAdminClient.createAppAdminClient(appNameAndType[1], getConf()) + .actionFlex(appNameAndType[0], counts); + } else if (cliParser.hasOption(ENABLE_FAST_LAUNCH)) { + String appType = getSingleAppTypeFromCLI(cliParser); + if (hasAnyOtherCLIOptions(cliParser, opts, ENABLE_FAST_LAUNCH, + APP_TYPE_CMD)) { + printUsage(title, opts); + return exitCode; + } + return AppAdminClient.createAppAdminClient(appType, getConf()) + .enableFastLaunch(); } else if (cliParser.hasOption(UPDATE_LIFETIME)) { if (!cliParser.hasOption(APP_ID)) { printUsage(title, opts); @@ -332,19 +529,6 @@ public int run(String[] args) throws Exception { } moveApplicationAcrossQueues(cliParser.getOptionValue(APP_ID), cliParser.getOptionValue(CHANGE_APPLICATION_QUEUE)); - } else if (cliParser.hasOption(SIGNAL_CMD)) { - if (args.length < 3 || args.length > 4) { - printUsage(title, opts); - return exitCode; - } - final String[] signalArgs = cliParser.getOptionValues(SIGNAL_CMD); - final String containerId = signalArgs[0]; - SignalContainerCommand command = - SignalContainerCommand.OUTPUT_THREAD_DUMP; - if (signalArgs.length == 2) { - command = SignalContainerCommand.valueOf(signalArgs[1]); - } - signalToContainer(containerId, command); } else { syserr.println("Invalid Command Usage : "); printUsage(title, opts); @@ -352,6 +536,47 @@ public int run(String[] args) throws Exception { return 0; } + private ApplicationReport getApplicationReport(ApplicationId applicationId) + throws IOException, YarnException { + ApplicationReport appReport = null; + try { + appReport = client.getApplicationReport(applicationId); + } catch (ApplicationNotFoundException e) { + throw new YarnException("Application with id '" + applicationId + + "' doesn't exist in RM or Timeline Server."); + } + return appReport; + } + + private String[] getAppNameAndType(CommandLine cliParser, String option) + throws IOException, YarnException { + String applicationIdOrName = cliParser.getOptionValue(option); + try { + ApplicationId id = ApplicationId.fromString(applicationIdOrName); + ApplicationReport report = getApplicationReport(id); + return new String[]{report.getName(), report.getApplicationType()}; + } catch (IllegalArgumentException e) { + // assume CLI option provided the app name + // and read appType from command line since id wasn't provided + String appType = getSingleAppTypeFromCLI(cliParser); + return new String[]{applicationIdOrName, appType}; + } + } + + private static String getSingleAppTypeFromCLI(CommandLine cliParser) { + if (cliParser.hasOption(APP_TYPE_CMD)) { + String[] types = cliParser.getOptionValues(APP_TYPE_CMD); + if (types != null) { + for (String type : types) { + if (!type.trim().isEmpty()) { + return StringUtils.toLowerCase(type).trim(); + } + } + } + } + return AppAdminClient.DEFAULT_TYPE; + } + private void updateApplicationTimeout(String applicationId, ApplicationTimeoutType timeoutType, long timeoutInSec) throws YarnException, IOException { @@ -572,7 +797,7 @@ private void listApplications(Set appTypes, /** * Kills applications with the application id as appId * - * @param Array of applicationIds + * @param applicationIds Array of applicationIds * @return errorCode * @throws YarnException * @throws IOException @@ -663,10 +888,10 @@ private void failApplicationAttempt(String attemptId) throws YarnException, * Prints the application report for an application id. * * @param applicationId - * @return exitCode + * @return ApplicationReport * @throws YarnException */ - private int printApplicationReport(String applicationId) + private ApplicationReport printApplicationReport(String applicationId) throws YarnException, IOException { ApplicationReport appReport = null; try { @@ -675,7 +900,7 @@ private int printApplicationReport(String applicationId) } catch (ApplicationNotFoundException e) { sysout.println("Application with id '" + applicationId + "' doesn't exist in RM or Timeline Server."); - return -1; + return null; } // Use PrintWriter.println, which uses correct platform line ending. ByteArrayOutputStream baos = new ByteArrayOutputStream(); @@ -739,11 +964,11 @@ private int printApplicationReport(String applicationId) + "' doesn't exist in RM."); appReportStr.close(); sysout.println(baos.toString("UTF-8")); - return -1; + return null; } appReportStr.close(); sysout.println(baos.toString("UTF-8")); - return 0; + return appReport; } private void printResourceUsage(PrintWriter appReportStr, @@ -856,11 +1081,12 @@ private void updateApplicationPriority(String applicationId, String priority) @SuppressWarnings("unchecked") private boolean hasAnyOtherCLIOptions(CommandLine cliParser, Options opts, - String excludeOption) { + String... excludeOptions) { Collection
+ org.apache.zookeeper + zookeeper + + + + org.apache.curator + curator-client + + + org.apache.curator curator-framework org.apache.curator - curator-test - test + curator-recipes + + + + commons-cli + commons-cli + + + + commons-daemon + commons-daemon + + + + commons-io + commons-io + + + + commons-lang + commons-lang + + + + commons-net + commons-net + + + + com.fasterxml.jackson.core + jackson-annotations + + + + com.fasterxml.jackson.core + jackson-core + + + + com.fasterxml.jackson.core + jackson-databind + + + + com.google.guava + guava + + + + dnsjava + dnsjava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperations.java new file mode 100644 index 0000000..3abfb6c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperations.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.registry.client.api; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.service.Service; + +import java.io.IOException; + +/** + * DNS Operations. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface DNSOperations extends Service { + + /** + * Register a service based on a service record. + * + * @param path the ZK path. + * @param record record providing DNS registration info. + * @throws IOException Any other IO Exception. + */ + void register(String path, ServiceRecord record) + throws IOException; + + + /** + * Delete a service's registered endpoints. + * + * If the operation returns without an error then the entry has been + * deleted. + * + * @param path the ZK path. + * @param record service record + * @throws IOException Any other IO Exception + * + */ + void delete(String path, ServiceRecord record) + throws IOException; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperationsFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperationsFactory.java new file mode 100644 index 0000000..1a8bb3e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperationsFactory.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.registry.client.api; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.registry.server.dns.RegistryDNS; + +/** + * A factory for DNS operation service instances. + */ +public final class DNSOperationsFactory implements RegistryConstants { + + /** + * DNS Implementation type. + */ + public enum DNSImplementation { + DNSJAVA + } + + private DNSOperationsFactory() { + } + + /** + * Create and initialize a DNS operations instance. + * + * @param conf configuration + * @return a DNS operations instance + */ + public static DNSOperations createInstance(Configuration conf) { + return createInstance("DNSOperations", DNSImplementation.DNSJAVA, conf); + } + + /** + * Create and initialize a registry operations instance. + * Access rights will be determined from the configuration. + * + * @param name name of the instance + * @param impl the DNS implementation. + * @param conf configuration + * @return a registry operations instance + */ + public static DNSOperations createInstance(String name, + DNSImplementation impl, + Configuration conf) { + Preconditions.checkArgument(conf != null, "Null configuration"); + DNSOperations operations = null; + switch (impl) { + case DNSJAVA: + operations = new RegistryDNS(name); + break; + + default: + throw new IllegalArgumentException( + String.format("%s is not available", impl.toString())); + } + + //operations.init(conf); + return operations; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java index a6fe216..0006dfd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java @@ -44,17 +44,131 @@ String ZK_PREFIX = REGISTRY_PREFIX + "zk."; /** + * Prefix for dns-specific options: {@value} + *

+ * For clients using other protocols, these options are not supported. + */ + String DNS_PREFIX = REGISTRY_PREFIX + "dns."; + + /** * flag to indicate whether or not the registry should - * be enabled in the RM: {@value} + * be enabled in the RM: {@value}. */ String KEY_REGISTRY_ENABLED = REGISTRY_PREFIX + "rm.enabled"; /** - * Defaut value for enabling the registry in the RM: {@value} + * Defaut value for enabling the registry in the RM: {@value}. */ boolean DEFAULT_REGISTRY_ENABLED = false; /** + * flag to indicate whether or not the registry should + * be enabled in the RM: {@value}. + */ + String KEY_DNS_ENABLED = DNS_PREFIX + "enabled"; + + /** + * Defaut value for enabling the DNS in the Registry: {@value}. + */ + boolean DEFAULT_DNS_ENABLED = false; + + /** + * DNS domain name key. + */ + String KEY_DNS_DOMAIN = DNS_PREFIX + "domain-name"; + + /** + * Max length of a label (node delimited by a dot in the FQDN). + */ + int MAX_FQDN_LABEL_LENGTH = 63; + + /** + * DNS bind address. + */ + String KEY_DNS_BIND_ADDRESS = DNS_PREFIX + "bind-address"; + + /** + * DNS port number key. + */ + String KEY_DNS_PORT = DNS_PREFIX + "bind-port"; + + /** + * Default DNS port number. + */ + int DEFAULT_DNS_PORT = 5353; + + /** + * DNSSEC Enabled? + */ + String KEY_DNSSEC_ENABLED = DNS_PREFIX + "dnssec.enabled"; + + /** + * DNSSEC Enabled? + */ + String KEY_DNSSEC_PUBLIC_KEY = DNS_PREFIX + "public-key"; + + /** + * DNSSEC private key file. + */ + String KEY_DNSSEC_PRIVATE_KEY_FILE = DNS_PREFIX + "private-key-file"; + + /** + * Default DNSSEC private key file path. + */ + String DEFAULT_DNSSEC_PRIVATE_KEY_FILE = + "/etc/hadoop/conf/registryDNS.private"; + + /** + * Zone subnet. + */ + String KEY_DNS_ZONE_SUBNET = DNS_PREFIX + "zone-subnet"; + + /** + * Zone subnet mask. + */ + String KEY_DNS_ZONE_MASK = DNS_PREFIX + "zone-mask"; + + /** + * Zone subnet IP min. + */ + String KEY_DNS_ZONE_IP_MIN = DNS_PREFIX + "zone-ip-min"; + + /** + * Zone subnet IP max. + */ + String KEY_DNS_ZONE_IP_MAX = DNS_PREFIX + "zone-ip-max"; + + /** + * DNS Record TTL. + */ + String KEY_DNS_TTL = DNS_PREFIX + "dns-ttl"; + + /** + * DNS Record TTL. + */ + String KEY_DNS_ZONES_DIR = DNS_PREFIX + "zones-dir"; + + /** + * Split Reverse Zone. + * It may be necessary to spit large reverse zone subnets + * into multiple zones to handle existing hosts collocated + * with containers. + */ + String KEY_DNS_SPLIT_REVERSE_ZONE = DNS_PREFIX + "split-reverse-zone"; + + /** + * Default value for splitting the reverse zone. + */ + boolean DEFAULT_DNS_SPLIT_REVERSE_ZONE = false; + + /** + * Split Reverse Zone IP Range. + * How many IPs should be part of each reverse zone split + */ + String KEY_DNS_SPLIT_REVERSE_ZONE_RANGE = DNS_PREFIX + + "split-reverse-zone-range"; + + /** * Key to set if the registry is secure: {@value}. * Turning it on changes the permissions policy from "open access" * to restrictions on kerberos with the option of @@ -69,12 +183,12 @@ boolean DEFAULT_REGISTRY_SECURE = false; /** - * Root path in the ZK tree for the registry: {@value} + * Root path in the ZK tree for the registry: {@value}. */ String KEY_REGISTRY_ZK_ROOT = ZK_PREFIX + "root"; /** - * Default root of the yarn registry: {@value} + * Default root of the yarn registry: {@value}. */ String DEFAULT_ZK_REGISTRY_ROOT = "/registry"; @@ -92,7 +206,7 @@ /** * Registry client uses Kerberos: authentication is automatic from - * logged in user + * logged in user. */ String REGISTRY_CLIENT_AUTH_KERBEROS = "kerberos"; @@ -104,12 +218,12 @@ String REGISTRY_CLIENT_AUTH_DIGEST = "digest"; /** - * No authentication; client is anonymous + * No authentication; client is anonymous. */ String REGISTRY_CLIENT_AUTH_ANONYMOUS = ""; /** - * Registry client authentication ID + * Registry client authentication ID. *

* This is only used in secure clusters with * {@link #KEY_REGISTRY_CLIENT_AUTH} set to @@ -134,17 +248,17 @@ /** * List of hostname:port pairs defining the - * zookeeper quorum binding for the registry {@value} + * zookeeper quorum binding for the registry {@value}. */ String KEY_REGISTRY_ZK_QUORUM = ZK_PREFIX + "quorum"; /** - * The default zookeeper quorum binding for the registry: {@value} + * The default zookeeper quorum binding for the registry: {@value}. */ String DEFAULT_REGISTRY_ZK_QUORUM = "localhost:2181"; /** - * Zookeeper session timeout in milliseconds: {@value} + * Zookeeper session timeout in milliseconds: {@value}. */ String KEY_REGISTRY_ZK_SESSION_TIMEOUT = ZK_PREFIX + "session.timeout.ms"; @@ -259,7 +373,7 @@ String KEY_REGISTRY_CLIENT_JAAS_CONTEXT = REGISTRY_PREFIX + "jaas.context"; /** - * default client-side registry JAAS context: {@value} + * default client-side registry JAAS context: {@value}. */ String DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT = "Client"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java index 443654d..704b097 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java @@ -67,6 +67,14 @@ public static RegistryOperations createInstance(String name, Configuration conf) return operations; } + public static RegistryOperationsClient createClient(String name, + Configuration conf) { + Preconditions.checkArgument(conf != null, "Null configuration"); + RegistryOperationsClient operations = new RegistryOperationsClient(name); + operations.init(conf); + return operations; + } + /** * Create and initialize an anonymous read/write registry operations instance. * In a secure cluster, this instance will only have read access to the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java index ec59d59..05df325 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java @@ -22,7 +22,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.registry.client.exceptions.InvalidRecordException; -import static org.apache.hadoop.registry.client.types.AddressTypes.*; import org.apache.hadoop.registry.client.types.Endpoint; import org.apache.hadoop.registry.client.types.ProtocolTypes; import org.apache.hadoop.registry.client.types.ServiceRecord; @@ -36,6 +35,8 @@ import java.util.List; import java.util.Map; +import static org.apache.hadoop.registry.client.types.AddressTypes.*; + /** * Static methods to work with registry types —primarily endpoints and the * list representation of addresses. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java index 7f35c3f..8713920 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java @@ -28,6 +28,9 @@ import org.apache.curator.framework.api.CreateBuilder; import org.apache.curator.framework.api.DeleteBuilder; import org.apache.curator.framework.api.GetChildrenBuilder; +import org.apache.curator.framework.recipes.cache.TreeCache; +import org.apache.curator.framework.recipes.cache.TreeCacheEvent; +import org.apache.curator.framework.recipes.cache.TreeCacheListener; import org.apache.curator.retry.BoundedExponentialBackoffRetry; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -36,14 +39,14 @@ import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.service.CompositeService; -import org.apache.hadoop.service.ServiceStateException; import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.registry.client.binding.RegistryPathUtils; import org.apache.hadoop.registry.client.exceptions.AuthenticationFailedException; import org.apache.hadoop.registry.client.exceptions.NoChildrenForEphemeralsException; import org.apache.hadoop.registry.client.exceptions.NoPathPermissionsException; import org.apache.hadoop.registry.client.exceptions.RegistryIOException; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.service.ServiceStateException; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooDefs; @@ -69,12 +72,12 @@ LoggerFactory.getLogger(CuratorService.class); /** - * the Curator binding + * the Curator binding. */ private CuratorFramework curator; /** - * Path to the registry root + * Path to the registry root. */ private String registryRoot; @@ -85,17 +88,17 @@ private final RegistryBindingSource bindingSource; /** - * Security service + * Security service. */ private RegistrySecurity registrySecurity; /** - * the connection binding text for messages + * the connection binding text for messages. */ private String connectionDescription; /** - * Security connection diagnostics + * Security connection diagnostics. */ private String securityConnectionDiagnostics = ""; @@ -106,10 +109,16 @@ private EnsembleProvider ensembleProvider; /** + * Registry tree cache. + */ + private TreeCache treeCache; + + /** * Construct the service. - * @param name service name + * + * @param name service name * @param bindingSource source of binding information. - * If null: use this instance + * If null: use this instance */ public CuratorService(String name, RegistryBindingSource bindingSource) { super(name); @@ -122,7 +131,8 @@ public CuratorService(String name, RegistryBindingSource bindingSource) { /** * Create an instance using this service as the binding source (i.e. read - * configuration options from the registry) + * configuration options from the registry). + * * @param name service name */ public CuratorService(String name) { @@ -131,7 +141,8 @@ public CuratorService(String name) { /** * Init the service. - * This is where the security bindings are set up + * This is where the security bindings are set up. + * * @param conf configuration of the service * @throws Exception */ @@ -155,6 +166,7 @@ protected void serviceInit(Configuration conf) throws Exception { /** * Start the service. * This is where the curator instance is started. + * * @throws Exception */ @Override @@ -167,29 +179,35 @@ protected void serviceStart() throws Exception { } /** - * Close the ZK connection if it is open + * Close the ZK connection if it is open. */ @Override protected void serviceStop() throws Exception { IOUtils.closeStream(curator); + + if (treeCache != null) { + treeCache.close(); + } super.serviceStop(); } /** - * Internal check that a service is in the live state + * Internal check that a service is in the live state. + * * @throws ServiceStateException if not */ private void checkServiceLive() throws ServiceStateException { if (!isInState(STATE.STARTED)) { throw new ServiceStateException( "Service " + getName() + " is in wrong state: " - + getServiceState()); + + getServiceState()); } } /** * Flag to indicate whether or not the registry is secure. * Valid once the service is inited. + * * @return service security policy */ public boolean isSecure() { @@ -197,7 +215,8 @@ public boolean isSecure() { } /** - * Get the registry security helper + * Get the registry security helper. + * * @return the registry security helper */ protected RegistrySecurity getRegistrySecurity() { @@ -205,7 +224,8 @@ protected RegistrySecurity getRegistrySecurity() { } /** - * Build the security diagnostics string + * Build the security diagnostics string. + * * @return a string for diagnostics */ protected String buildSecurityDiagnostics() { @@ -224,6 +244,7 @@ protected String buildSecurityDiagnostics() { * Create a new curator instance off the root path; using configuration * options provided in the service configuration to set timeouts and * retry policy. + * * @return the newly created creator */ private CuratorFramework createCurator() throws IOException { @@ -240,24 +261,24 @@ private CuratorFramework createCurator() throws IOException { int retryCeiling = conf.getInt(KEY_REGISTRY_ZK_RETRY_CEILING, DEFAULT_ZK_RETRY_CEILING); - if (LOG.isDebugEnabled()) { - LOG.debug("Creating CuratorService with connection {}", + LOG.info("Creating CuratorService with connection {}", connectionDescription); - } + CuratorFramework framework; synchronized (CuratorService.class) { // set the security options // build up the curator itself - CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder(); + CuratorFrameworkFactory.Builder builder = + CuratorFrameworkFactory.builder(); builder.ensembleProvider(ensembleProvider) - .connectionTimeoutMs(connectionTimeout) - .sessionTimeoutMs(sessionTimeout) + .connectionTimeoutMs(connectionTimeout) + .sessionTimeoutMs(sessionTimeout) - .retryPolicy(new BoundedExponentialBackoffRetry(retryInterval, - retryCeiling, - retryTimes)); + .retryPolicy(new BoundedExponentialBackoffRetry(retryInterval, + retryCeiling, + retryTimes)); // set up the builder AND any JVM context registrySecurity.applySecurityEnvironment(builder); @@ -273,21 +294,23 @@ private CuratorFramework createCurator() throws IOException { @Override public String toString() { return super.toString() - + " " + bindingDiagnosticDetails(); + + " " + bindingDiagnosticDetails(); } /** - * Get the binding diagnostics + * Get the binding diagnostics. + * * @return a diagnostics string valid after the service is started. */ public String bindingDiagnosticDetails() { return " Connection=\"" + connectionDescription + "\"" - + " root=\"" + registryRoot + "\"" - + " " + securityConnectionDiagnostics; + + " root=\"" + registryRoot + "\"" + + " " + securityConnectionDiagnostics; } /** - * Create a full path from the registry root and the supplied subdir + * Create a full path from the registry root and the supplied subdir. + * * @param path path of operation * @return an absolute path * @throws IllegalArgumentException if the path is invalide @@ -299,6 +322,7 @@ protected String createFullPath(String path) throws IOException { /** * Get the registry binding source ... this can be used to * create new ensemble providers + * * @return the registry binding source in use */ public RegistryBindingSource getBindingSource() { @@ -308,23 +332,23 @@ public RegistryBindingSource getBindingSource() { /** * Create the ensemble provider for this registry, by invoking * {@link RegistryBindingSource#supplyBindingInformation()} on - * the provider stored in {@link #bindingSource} + * the provider stored in {@link #bindingSource}. * Sets {@link #ensembleProvider} to that value; * sets {@link #connectionDescription} to the binding info * for use in toString and logging; - * */ protected void createEnsembleProvider() { BindingInformation binding = bindingSource.supplyBindingInformation(); connectionDescription = binding.description - + " " + securityConnectionDiagnostics; + + " " + securityConnectionDiagnostics; ensembleProvider = binding.ensembleProvider; } /** * Supply the binding information. * This implementation returns a fixed ensemble bonded to - * the quorum supplied by {@link #buildConnectionString()} + * the quorum supplied by {@link #buildConnectionString()}. + * * @return the binding information */ @Override @@ -339,17 +363,19 @@ public BindingInformation supplyBindingInformation() { /** * Override point: get the connection string used to connect to - * the ZK service + * the ZK service. + * * @return a registry quorum */ protected String buildConnectionString() { return getConfig().getTrimmed(KEY_REGISTRY_ZK_QUORUM, - DEFAULT_REGISTRY_ZK_QUORUM); + DEFAULT_REGISTRY_ZK_QUORUM); } /** - * Create an IOE when an operation fails - * @param path path of operation + * Create an IOE when an operation fails. + * + * @param path path of operation * @param operation operation attempted * @param exception caught the exception caught * @return an IOE to throw that contains the path and operation details. @@ -361,8 +387,9 @@ protected IOException operationFailure(String path, } /** - * Create an IOE when an operation fails - * @param path path of operation + * Create an IOE when an operation fails. + * + * @param path path of operation * @param operation operation attempted * @param exception caught the exception caught * @return an IOE to throw that contains the path and operation details. @@ -385,9 +412,10 @@ protected IOException operationFailure(String path, } else if (exception instanceof KeeperException.AuthFailedException) { ioe = new AuthenticationFailedException(path, "Authentication Failed: " + exception - + "; " + securityConnectionDiagnostics, + + "; " + securityConnectionDiagnostics, exception); - } else if (exception instanceof KeeperException.NoChildrenForEphemeralsException) { + } else if (exception instanceof + KeeperException.NoChildrenForEphemeralsException) { ioe = new NoChildrenForEphemeralsException(path, "Cannot create a path under an ephemeral node: " + exception, exception); @@ -402,7 +430,7 @@ protected IOException operationFailure(String path, } else { ioe = new RegistryIOException(path, "Failure of " + operation + " on " + path + ": " + - exception.toString(), + exception.toString(), exception); } if (ioe.getCause() == null) { @@ -417,8 +445,8 @@ protected IOException operationFailure(String path, * may create the same path before the create() operation is executed/ * propagated to the ZK node polled. * - * @param path path to create - * @param acl ACL for path -used when creating a new entry + * @param path path to create + * @param acl ACL for path -used when creating a new entry * @param createParents flag to trigger parent creation * @return true iff the path was created * @throws IOException @@ -432,10 +460,11 @@ public boolean maybeCreate(String path, } /** - * Stat the file + * Stat the file. + * * @param path path of operation * @return a curator stat entry - * @throws IOException on a failure + * @throws IOException on a failure * @throws PathNotFoundException if the path was not found */ public Stat zkStat(String path) throws IOException { @@ -457,7 +486,8 @@ public Stat zkStat(String path) throws IOException { } /** - * Get the ACLs of a path + * Get the ACLs of a path. + * * @param path path of operation * @return a possibly empty list of ACLs * @throws IOException @@ -481,12 +511,13 @@ public Stat zkStat(String path) throws IOException { } /** - * Probe for a path existing + * Probe for a path existing. + * * @param path path of operation * @return true if the path was visible from the ZK server * queried. * @throws IOException on any exception other than - * {@link PathNotFoundException} + * {@link PathNotFoundException} */ public boolean zkPathExists(String path) throws IOException { checkServiceLive(); @@ -503,7 +534,8 @@ public boolean zkPathExists(String path) throws IOException { } /** - * Verify a path exists + * Verify a path exists. + * * @param path path of operation * @throws PathNotFoundException if the path is absent * @throws IOException @@ -514,11 +546,12 @@ public String zkPathMustExist(String path) throws IOException { } /** - * Create a directory. It is not an error if it already exists - * @param path path to create - * @param mode mode for path + * Create a directory. It is not an error if it already exists. + * + * @param path path to create + * @param mode mode for path * @param createParents flag to trigger parent creation - * @param acls ACL for path + * @param acls ACL for path * @throws IOException any problem */ public boolean zkMkPath(String path, @@ -558,9 +591,10 @@ public boolean zkMkPath(String path, } /** - * Recursively make a path + * Recursively make a path. + * * @param path path to create - * @param acl ACL for path + * @param acl ACL for path * @throws IOException any problem */ public void zkMkParentPath(String path, @@ -574,7 +608,8 @@ public void zkMkParentPath(String path, /** * Create a path with given data. byte[0] is used for a path - * without data + * without data. + * * @param path path of operation * @param data initial data * @param acls @@ -600,7 +635,8 @@ public void zkCreate(String path, } /** - * Update the data for a path + * Update the data for a path. + * * @param path path of operation * @param data new data * @throws IOException @@ -620,13 +656,14 @@ public void zkUpdate(String path, byte[] data) throws IOException { } /** - * Create or update an entry - * @param path path - * @param data data - * @param acl ACL for path -used when creating a new entry + * Create or update an entry. + * + * @param path path + * @param data data + * @param acl ACL for path -used when creating a new entry * @param overwrite enable overwrite - * @throws IOException * @return true if the entry was created, false if it was simply updated. + * @throws IOException */ public boolean zkSet(String path, CreateMode mode, @@ -649,12 +686,13 @@ public boolean zkSet(String path, /** * Delete a directory/directory tree. - * It is not an error to delete a path that does not exist - * @param path path of operation - * @param recursive flag to trigger recursive deletion + * It is not an error to delete a path that does not exist. + * + * @param path path of operation + * @param recursive flag to trigger recursive deletion * @param backgroundCallback callback; this being set converts the operation - * into an async/background operation. - * task + * into an async/background operation. + * task * @throws IOException on problems other than no-such-path */ public void zkDelete(String path, @@ -682,7 +720,8 @@ public void zkDelete(String path, } /** - * List all children of a path + * List all children of a path. + * * @param path path of operation * @return a possibly empty list of children * @throws IOException @@ -703,7 +742,8 @@ public void zkDelete(String path, } /** - * Read data on a path + * Read data on a path. + * * @param path path of operation * @return the data * @throws IOException read failure @@ -724,9 +764,10 @@ public void zkDelete(String path, /** * Return a path dumper instance which can do a full dump * of the registry tree in its toString() - * operation - * @return a class to dump the registry + * operation. + * * @param verbose verbose flag - includes more details (such as ACLs) + * @return a class to dump the registry */ public ZKPathDumper dumpPath(boolean verbose) { return new ZKPathDumper(curator, registryRoot, verbose); @@ -734,7 +775,8 @@ public ZKPathDumper dumpPath(boolean verbose) { /** * Add a new write access entry for all future write operations. - * @param id ID to use + * + * @param id ID to use * @param pass password * @throws IOException on any failure to build the digest */ @@ -746,16 +788,16 @@ public boolean addWriteAccessor(String id, String pass) throws IOException { } /** - * Clear all write accessors + * Clear all write accessors. */ public void clearWriteAccessors() { getRegistrySecurity().resetDigestACLs(); } - /** * Diagnostics method to dump a registry robustly. - * Any exception raised is swallowed + * Any exception raised is swallowed. + * * @param verbose verbose path dump * @return the registry tree */ @@ -769,4 +811,79 @@ protected String dumpRegistryRobustly(boolean verbose) { } return ""; } + + /** + * Registers a listener to path related events. + * + * @param listener the listener. + * @return a handle allowing for the management of the listener. + * @throws Exception if registration fails due to error. + */ + public ListenerHandle registerPathListener(final PathListener listener) + throws Exception { + + final TreeCacheListener pathChildrenCacheListener = + new TreeCacheListener() { + + public void childEvent(CuratorFramework curatorFramework, + TreeCacheEvent event) + throws Exception { + String path = null; + if (event != null && event.getData() != null) { + path = event.getData().getPath(); + } + assert event != null; + switch (event.getType()) { + case NODE_ADDED: + LOG.info("Informing listener of added node {}", path); + listener.nodeAdded(path); + + break; + + case NODE_REMOVED: + LOG.info("Informing listener of removed node {}", path); + listener.nodeRemoved(path); + + break; + + case NODE_UPDATED: + LOG.info("Informing listener of updated node {}", path); + listener.nodeAdded(path); + + break; + + default: + // do nothing + break; + + } + } + }; + treeCache.getListenable().addListener(pathChildrenCacheListener); + + return new ListenerHandle() { + @Override + public void remove() { + treeCache.getListenable().removeListener(pathChildrenCacheListener); + } + }; + + } + + // TODO: should caches be stopped and then restarted if need be? + + /** + * Create the tree cache that monitors the registry for node addition, update, + * and deletion. + * + * @throws Exception if any issue arises during monitoring. + */ + public void monitorRegistryEntries() + throws Exception { + String registryPath = + getConfig().get(RegistryConstants.KEY_REGISTRY_ZK_ROOT, + RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT); + treeCache = new TreeCache(curator, registryPath); + treeCache.start(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ListenerHandle.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ListenerHandle.java new file mode 100644 index 0000000..e43dbbe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ListenerHandle.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.client.impl.zk; + +/** + * + */ +public interface ListenerHandle { + void remove(); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/PathListener.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/PathListener.java new file mode 100644 index 0000000..db1e509 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/PathListener.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.client.impl.zk; + +import java.io.IOException; + +/** + * + */ +public interface PathListener { + + void nodeAdded(String path) throws IOException; + + void nodeRemoved(String path) throws IOException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java index 271ab25..4c911da 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java @@ -107,8 +107,10 @@ public void bind(String path, validatePath(path); // validate the record before putting it RegistryTypeUtils.validateServiceRecord(path, record); - LOG.info("Bound at {} : {}", path, record); + if (LOG.isDebugEnabled()) { + LOG.debug("Bound at {} : ServiceRecord = {}", path, record); + } CreateMode mode = CreateMode.PERSISTENT; byte[] bytes = serviceRecordMarshal.toBytes(record); zkSet(path, mode, bytes, getClientAcls(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java index 7b78932..5eaa9c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java @@ -19,13 +19,23 @@ package org.apache.hadoop.registry.client.types.yarn; /** - * YARN specific attributes in the registry + * YARN specific attributes in the registry. */ -public class YarnRegistryAttributes { +public final class YarnRegistryAttributes { /** - * ID. For containers: container ID. For application instances, application ID. + * Hidden constructor. + */ + private YarnRegistryAttributes() { + } + + /** + * ID. For containers: container ID. For application instances, + * application ID. */ public static final String YARN_ID = "yarn:id"; public static final String YARN_PERSISTENCE = "yarn:persistence"; + public static final String YARN_PATH = "yarn:path"; + public static final String YARN_HOSTNAME = "yarn:hostname"; + public static final String YARN_IP = "yarn:ip"; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ApplicationServiceRecordProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ApplicationServiceRecordProcessor.java new file mode 100644 index 0000000..0b5f724 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ApplicationServiceRecordProcessor.java @@ -0,0 +1,366 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import org.apache.hadoop.registry.client.types.Endpoint; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.xbill.DNS.Name; +import org.xbill.DNS.Type; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.List; + +/** + * A processor for generating application DNS records from registry service + * records. + */ +public class ApplicationServiceRecordProcessor extends + BaseServiceRecordProcessor { + private static final Logger LOG = + LoggerFactory.getLogger(ApplicationServiceRecordProcessor.class); + /** + * Create an application service record processor. + * + * @param record the service record + * @param path the service record registry node path + * @param domain the DNS zone/domain name + * @param zoneSelector returns the zone associated with the provided name. + * @throws Exception if an issue is generated during instantiation. + */ + public ApplicationServiceRecordProcessor( + ServiceRecord record, String path, String domain, + ZoneSelector zoneSelector) throws Exception { + super(record, path, domain, zoneSelector); + } + + /** + * Initializes the DNS record type to descriptor mapping based on the + * provided service record. + * + * @param serviceRecord the registry service record. + * @throws Exception if an issue is encountered. + */ + @Override public void initTypeToInfoMapping(ServiceRecord serviceRecord) + throws Exception { + if (serviceRecord.external.isEmpty()) { + LOG.info(serviceRecord.description + ": No external endpoints defined."); + return; + } + for (int type : getRecordTypes()) { + switch (type) { + case Type.A: + createAInfo(serviceRecord); + break; + case Type.AAAA: + createAAAAInfo(serviceRecord); + break; + case Type.TXT: + createTXTInfo(serviceRecord); + break; + case Type.CNAME: + createCNAMEInfo(serviceRecord); + break; + case Type.SRV: + createSRVInfo(serviceRecord); + break; + default: + throw new IllegalArgumentException("Unknown type " + type); + + } + } + } + + /** + * Create an application TXT record descriptor. + * + * @param serviceRecord the service record. + * @throws Exception if there is an issue during descriptor creation. + */ + protected void createTXTInfo(ServiceRecord serviceRecord) throws Exception { + List endpoints = serviceRecord.external; + List recordDescriptors = new ArrayList<>(); + TXTApplicationRecordDescriptor txtInfo; + for (Endpoint endpoint : endpoints) { + txtInfo = new TXTApplicationRecordDescriptor( + serviceRecord, endpoint); + recordDescriptors.add(txtInfo); + } + registerRecordDescriptor(Type.TXT, recordDescriptors); + } + + /** + * Create an application SRV record descriptor. + * + * @param serviceRecord the service record. + * @throws Exception if there is an issue during descriptor creation. + */ + protected void createSRVInfo(ServiceRecord serviceRecord) throws Exception { + List endpoints = serviceRecord.external; + List recordDescriptors = new ArrayList<>(); + SRVApplicationRecordDescriptor srvInfo; + for (Endpoint endpoint : endpoints) { + srvInfo = new SRVApplicationRecordDescriptor( + serviceRecord, endpoint); + recordDescriptors.add(srvInfo); + } + registerRecordDescriptor(Type.SRV, recordDescriptors); + } + + /** + * Create an application CNAME record descriptor. + * + * @param serviceRecord the service record. + * @throws Exception if there is an issue during descriptor creation. + */ + protected void createCNAMEInfo(ServiceRecord serviceRecord) throws Exception { + List endpoints = serviceRecord.external; + List recordDescriptors = new ArrayList<>(); + CNAMEApplicationRecordDescriptor cnameInfo; + for (Endpoint endpoint : endpoints) { + cnameInfo = new CNAMEApplicationRecordDescriptor( + serviceRecord, endpoint); + recordDescriptors.add(cnameInfo); + } + registerRecordDescriptor(Type.CNAME, recordDescriptors); + } + + /** + * Create an application AAAA record descriptor. + * + * @param record the service record. + * @throws Exception if there is an issue during descriptor creation. + */ + protected void createAAAAInfo(ServiceRecord record) + throws Exception { + AAAAApplicationRecordDescriptor + recordInfo = new AAAAApplicationRecordDescriptor( + getPath(), record); + registerRecordDescriptor(Type.AAAA, recordInfo); + } + + /** + * Create an application A record descriptor. + * + * @param record the service record. + * @throws Exception if there is an issue during descriptor creation. + */ + protected void createAInfo(ServiceRecord record) throws Exception { + AApplicationRecordDescriptor recordInfo = new AApplicationRecordDescriptor( + getPath(), record); + registerRecordDescriptor(Type.A, recordInfo); + } + + /** + * Returns the record types associated with a container service record. + * + * @return the record type array + */ + @Override public int[] getRecordTypes() { + return new int[] {Type.A, Type.AAAA, Type.CNAME, Type.SRV, Type.TXT}; + } + + /** + * An application TXT record descriptor. + */ + class TXTApplicationRecordDescriptor + extends ApplicationRecordDescriptor> { + + /** + * Creates an application TXT record descriptor. + * + * @param record service record + * @throws Exception + */ + public TXTApplicationRecordDescriptor(ServiceRecord record, + Endpoint endpoint) throws Exception { + super(record, endpoint); + } + + /** + * Initializes the descriptor parameters. + * + * @param serviceRecord the service record. + */ + @Override protected void init(ServiceRecord serviceRecord) + throws Exception { + if (getEndpoint() != null) { + this.setNames(new Name[] {getServiceName(), getEndpointName()}); + this.setTarget(getTextRecords(getEndpoint())); + } + } + + } + + /** + * An application SRV record descriptor. + */ + class SRVApplicationRecordDescriptor extends + ApplicationRecordDescriptor { + + /** + * Creates an application SRV record descriptor. + * + * @param record service record + * @throws Exception + */ + public SRVApplicationRecordDescriptor(ServiceRecord record, + Endpoint endpoint) throws Exception { + super(record, endpoint); + } + + /** + * Initializes the descriptor parameters. + * + * @param serviceRecord the service record. + */ + @Override protected void init(ServiceRecord serviceRecord) + throws Exception { + if (getEndpoint() != null) { + this.setNames(new Name[] {getServiceName(), getEndpointName()}); + this.setTarget(new RecordCreatorFactory.HostPortInfo( + Name.fromString(getHost(getEndpoint()) + "."), getPort( + getEndpoint()))); + } + } + + } + + /** + * An application CNAME record descriptor. + */ + class CNAMEApplicationRecordDescriptor extends + ApplicationRecordDescriptor { + + /** + * Creates an application CNAME record descriptor. + * + * @param path registry path for service record + * @param record service record + * @throws Exception + */ + public CNAMEApplicationRecordDescriptor(String path, + ServiceRecord record) throws Exception { + super(record); + } + + /** + * Creates an application CNAME record descriptor. This descriptor is the + * source for API related CNAME records. + * + * @param record service record + * @param endpoint the API endpoint + * @throws Exception + */ + public CNAMEApplicationRecordDescriptor(ServiceRecord record, + Endpoint endpoint) throws Exception { + super(record, endpoint); + } + + /** + * Initializes the descriptor parameters. + * + * @param serviceRecord the service record. + */ + @Override protected void init(ServiceRecord serviceRecord) + throws Exception { + if (getEndpoint() != null) { + this.setNames(new Name[] {getEndpointName()}); + this.setTarget(getServiceName()); + } + } + + } + + /** + * An application A record descriptor. + */ + class AApplicationRecordDescriptor + extends ApplicationRecordDescriptor { + + /** + * Creates an application A record descriptor. + * + * @param path registry path for service record + * @param record service record + * @throws Exception + */ + public AApplicationRecordDescriptor(String path, + ServiceRecord record) throws Exception { + super(record); + } + + /** + * Initializes the descriptor parameters. + * + * @param serviceRecord the service record. + */ + @Override protected void init(ServiceRecord serviceRecord) + throws Exception { + this.setNames(new Name[] {getServiceName()}); + List endpoints = serviceRecord.external; + if (endpoints.isEmpty()) { + return; + } + // TODO: do we need a "hostname" attribute for an application record or + // can we rely on the first endpoint record. + this.setTarget(InetAddress.getByName( + getHost(endpoints.get(0)))); + } + + } + + /** + * An application AAAA record descriptor. + */ + class AAAAApplicationRecordDescriptor extends AApplicationRecordDescriptor { + + /** + * Creates an application AAAA record descriptor. + * + * @param path registry path for service record + * @param record service record + * @throws Exception + */ + public AAAAApplicationRecordDescriptor(String path, + ServiceRecord record) throws Exception { + super(path, record); + } + + /** + * Initializes the descriptor parameters. + * + * @param serviceRecord the service record. + */ + @Override protected void init(ServiceRecord serviceRecord) + throws Exception { + super.init(serviceRecord); + if (getTarget() == null) { + return; + } + try { + this.setTarget(getIpv6Address(getTarget())); + } catch (UnknownHostException e) { + throw new IllegalStateException(e); + } + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java new file mode 100644 index 0000000..fd5c74f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/BaseServiceRecordProcessor.java @@ -0,0 +1,468 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.registry.client.types.AddressTypes; +import org.apache.hadoop.registry.client.types.Endpoint; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.xbill.DNS.Name; +import org.xbill.DNS.ReverseMap; +import org.xbill.DNS.TextParseException; + +import java.io.IOException; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.URI; +import java.net.UnknownHostException; +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Provides common service record processing logic. + */ +public abstract class BaseServiceRecordProcessor + implements ServiceRecordProcessor { + + private final ZoneSelector zoneSelctor; + private Map> typeToDescriptorMap = + new HashMap<>(); + private String path; + private String domain; + + private static final Pattern USER_NAME = Pattern.compile("/users/(\\w*)/?"); + private static final String YARN_SERVICE_API_PREFIX = + "classpath:org.apache.hadoop.yarn.service."; + private static final String HTTP_API_TYPE = "http://"; + + /** + * Creates a service record processor. + * + * @param record the service record. + * @param path the node path for the record in the registry. + * @param domain the target DNS domain for the service record + * associated DNS records. + * @param zoneSelector A selector of the best zone for a given DNS name. + * @throws Exception if an issue is generated during instantiation. + */ + public BaseServiceRecordProcessor(ServiceRecord record, String path, + String domain, ZoneSelector zoneSelector) + throws Exception { + this.setPath(path); + this.domain = domain; + this.zoneSelctor = zoneSelector; + initTypeToInfoMapping(record); + } + + /** + * Return the username found in the ZK path. + * + * @param recPath the ZK recPath. + * @return the user name. + */ + protected String getUsername(String recPath) { + String user = "anonymous"; + Matcher matcher = USER_NAME.matcher(recPath); + if (matcher.find()) { + user = matcher.group(1); + } + return user; + } + + /** + * Return the IPv6 mapped address for the provided IPv4 address. Utilized + * to create corresponding AAAA records. + * + * @param address the IPv4 address. + * @return the mapped IPv6 address. + * @throws UnknownHostException + */ + static InetAddress getIpv6Address(InetAddress address) + throws UnknownHostException { + String[] octets = address.getHostAddress().split("\\."); + byte[] octetBytes = new byte[4]; + for (int i = 0; i < 4; ++i) { + octetBytes[i] = (byte) Integer.parseInt(octets[i]); + } + + byte[] ipv4asIpV6addr = new byte[16]; + ipv4asIpV6addr[10] = (byte) 0xff; + ipv4asIpV6addr[11] = (byte) 0xff; + ipv4asIpV6addr[12] = octetBytes[0]; + ipv4asIpV6addr[13] = octetBytes[1]; + ipv4asIpV6addr[14] = octetBytes[2]; + ipv4asIpV6addr[15] = octetBytes[3]; + + return Inet6Address.getByAddress(null, ipv4asIpV6addr, 0); + } + + /** + * Reverse the string representation of the input IP address. + * + * @param ip the string representation of the IP address. + * @return the reversed IP address. + * @throws UnknownHostException if the ip is unknown. + */ + protected Name reverseIP(String ip) throws UnknownHostException { + return ReverseMap.fromAddress(ip); + } + + /** + * Manages the creation and registration of service record generated DNS + * records. + * + * @param command the DNS registration command object (e.g. add_record, + * remove record) + * @throws IOException if the creation or registration generates an issue. + */ + @SuppressWarnings({"unchecked"}) + public void manageDNSRecords(RegistryDNS.RegistryCommand command) + throws IOException { + for (Map.Entry> entry : + typeToDescriptorMap.entrySet()) { + for (RecordDescriptor recordDescriptor : entry.getValue()) { + for (Name name : recordDescriptor.getNames()) { + RecordCreatorFactory.RecordCreator recordCreator = + RecordCreatorFactory.getRecordCreator(entry.getKey()); + command.exec(zoneSelctor.findBestZone(name), + recordCreator.create(name, recordDescriptor.getTarget())); + } + } + } + } + + /** + * Add the DNS record descriptor object to the record type to descriptor + * mapping. + * + * @param type the DNS record type. + * @param recordDescriptor the DNS record descriptor + */ + protected void registerRecordDescriptor(int type, + RecordDescriptor recordDescriptor) { + List infos = new ArrayList<>(); + infos.add(recordDescriptor); + typeToDescriptorMap.put(type, infos); + } + + /** + * Add the DNS record descriptor objects to the record type to descriptor + * mapping. + * + * @param type the DNS record type. + * @param recordDescriptors the DNS record descriptors + */ + protected void registerRecordDescriptor(int type, + List recordDescriptors) { + typeToDescriptorMap.put(type, recordDescriptors); + } + + /** + * Return the path associated with the record. + * @return the path. + */ + protected String getPath() { + return path; + } + + /** + * Set the path associated with the record. + * @param path the path. + */ + protected void setPath(String path) { + this.path = path; + } + + /** + * A descriptor container the information to be populated into a DNS record. + * + * @param the DNS record type/class. + */ + abstract class RecordDescriptor { + private final ServiceRecord record; + private Name[] names; + private T target; + + /** + * Creates a DNS record descriptor. + * + * @param record the associated service record. + */ + public RecordDescriptor(ServiceRecord record) { + this.record = record; + } + + /** + * Returns the DNS names associated with the record type and information. + * + * @return the array of names. + */ + public Name[] getNames() { + return names; + } + + /** + * Return the target object for the DNS record. + * + * @return the DNS record target. + */ + public T getTarget() { + return target; + } + + /** + * Initializes the names and information for this DNS record descriptor. + * + * @param serviceRecord the service record. + * @throws Exception + */ + protected abstract void init(ServiceRecord serviceRecord) throws Exception; + + /** + * Returns the service record. + * @return the service record. + */ + public ServiceRecord getRecord() { + return record; + } + + /** + * Sets the names associated with the record type and information. + * @param names the names. + */ + public void setNames(Name[] names) { + this.names = names; + } + + /** + * Sets the target object associated with the record. + * @param target the target. + */ + public void setTarget(T target) { + this.target = target; + } + } + + /** + * A container-based DNS record descriptor. + * + * @param the DNS record type/class. + */ + abstract class ContainerRecordDescriptor extends RecordDescriptor { + + public ContainerRecordDescriptor(String path, ServiceRecord record) + throws Exception { + super(record); + init(record); + } + + /** + * Returns the DNS name constructed from the YARN container ID. + * + * @return the container ID name. + * @throws TextParseException + */ + protected Name getContainerIDName() throws TextParseException { + String containerID = RegistryPathUtils.lastPathEntry(getPath()); + return Name.fromString(String.format("%s.%s", containerID, domain)); + } + + /** + * Returns the DNS name constructed from the container role/component name. + * + * @return the DNS naem. + * @throws PathNotFoundException + * @throws TextParseException + */ + protected Name getContainerName() + throws PathNotFoundException, TextParseException { + String service = RegistryPathUtils.lastPathEntry( + RegistryPathUtils.parentOf(RegistryPathUtils.parentOf(getPath()))); + String description = getRecord().description.toLowerCase(); + String user = getUsername(getPath()); + return Name.fromString(MessageFormat.format("{0}.{1}.{2}.{3}", + description, + service, + user, + domain)); + } + + } + + /** + * An application-based DNS record descriptor. + * + * @param the DNS record type/class. + */ + abstract class ApplicationRecordDescriptor extends RecordDescriptor { + + private Endpoint srEndpoint; + + /** + * Creates an application associated DNS record descriptor. + * + * @param record the service record. + * @throws Exception + */ + public ApplicationRecordDescriptor(ServiceRecord record) + throws Exception { + this(record, null); + } + + /** + * Creates an application associated DNS record descriptor. The endpoint + * is leverated to create an associated application API record. + * + * @param record the service record. + * @param endpoint an API endpoint. + * @throws Exception + */ + public ApplicationRecordDescriptor(ServiceRecord record, + Endpoint endpoint) throws Exception { + super(record); + this.setEndpoint(endpoint); + init(record); + } + + /** + * Get the service's DNS name for registration. + * + * @return the service DNS name. + * @throws TextParseException + */ + protected Name getServiceName() throws TextParseException { + String user = getUsername(getPath()); + String service = + String.format("%s.%s.%s", + RegistryPathUtils.lastPathEntry(getPath()), + user, + domain); + return Name.fromString(service); + } + + /** + * Get the host from the provided endpoint record. + * + * @param endpoint the endpoint info. + * @return the host name. + */ + protected String getHost(Endpoint endpoint) { + String host = null; + // assume one address for now + Map address = endpoint.addresses.get(0); + if (endpoint.addressType.equals(AddressTypes.ADDRESS_HOSTNAME_AND_PORT)) { + host = address.get(AddressTypes.ADDRESS_HOSTNAME_FIELD); + } else if (endpoint.addressType.equals(AddressTypes.ADDRESS_URI)) { + URI uri = URI.create(address.get("uri")); + host = uri.getHost(); + } + return host; + } + + /** + * Get the post from the provided endpoint record. + * + * @param endpoint the endpoint info. + * @return the port. + */ + protected int getPort(Endpoint endpoint) { + int port = -1; + // assume one address for now + Map address = endpoint.addresses.get(0); + if (endpoint.addressType.equals(AddressTypes.ADDRESS_HOSTNAME_AND_PORT)) { + port = Integer.parseInt(address.get(AddressTypes.ADDRESS_PORT_FIELD)); + } else if (endpoint.addressType.equals(AddressTypes.ADDRESS_URI)) { + URI uri = URI.create(address.get("uri")); + port = uri.getPort(); + } + return port; + } + + /** + * Get the list of strings that can be related in a TXT record for the given + * endpoint. + * + * @param endpoint the endpoint information. + * @return the list of strings relating endpoint info. + */ + protected List getTextRecords(Endpoint endpoint) { + Map address = endpoint.addresses.get(0); + List txtRecs = new ArrayList(); + txtRecs.add("api=" + getDNSApiFragment(endpoint.api)); + if (endpoint.addressType.equals(AddressTypes.ADDRESS_URI)) { + URI uri = URI.create(address.get("uri")); + txtRecs.add("path=" + uri.getPath()); + } + return txtRecs; + } + + /** + * Get an API name that is compatible with DNS standards (and shortened). + * + * @param api the api indicator. + * @return the shortened and compatible api name. + */ + protected String getDNSApiFragment(String api) { + String dnsApi = null; + if (api.startsWith(YARN_SERVICE_API_PREFIX)) { + dnsApi = api.substring(YARN_SERVICE_API_PREFIX.length()); + } else if (api.startsWith(HTTP_API_TYPE)) { + dnsApi = "http"; + } + assert dnsApi != null; + dnsApi = dnsApi.replace('.', '-'); + return dnsApi; + } + + /** + * Return the DNS name associated with the API endpoint. + * + * @return the name. + * @throws TextParseException + */ + protected Name getEndpointName() throws TextParseException { + return Name.fromString(String.format("%s-api.%s", + getDNSApiFragment( + getEndpoint().api), + getServiceName())); + } + + /** + * Returns the endpoint. + * @return the endpoint. + */ + public Endpoint getEndpoint() { + return srEndpoint; + } + + /** + * Sets the endpoint. + * @param endpoint the endpoint. + */ + public void setEndpoint( + Endpoint endpoint) { + this.srEndpoint = endpoint; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java new file mode 100644 index 0000000..2e95f54 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ContainerServiceRecordProcessor.java @@ -0,0 +1,283 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; +import org.xbill.DNS.Name; +import org.xbill.DNS.TextParseException; +import org.xbill.DNS.Type; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.List; + +/** + * A processor for generating container DNS records from registry service + * records. + */ +public class ContainerServiceRecordProcessor extends + BaseServiceRecordProcessor { + + /** + * Create a container service record processor. + * @param record the service record + * @param path the service record registry node path + * @param domain the DNS zone/domain name + * @param zoneSelector returns the zone associated with the provided name. + * @throws Exception if an issue is generated during instantiation. + */ + public ContainerServiceRecordProcessor( + ServiceRecord record, String path, String domain, + ZoneSelector zoneSelector) throws Exception { + super(record, path, domain, zoneSelector); + } + + /** + * Initializes the DNS record type to descriptor mapping based on the + * provided service record. + * @param serviceRecord the registry service record. + * @throws Exception if an issue arises. + */ + @Override public void initTypeToInfoMapping(ServiceRecord serviceRecord) + throws Exception { + if (serviceRecord.get(YarnRegistryAttributes.YARN_IP) != null) { + for (int type : getRecordTypes()) { + switch (type) { + case Type.A: + createAInfo(serviceRecord); + break; + case Type.AAAA: + createAAAAInfo(serviceRecord); + break; + case Type.PTR: + createPTRInfo(serviceRecord); + break; + case Type.TXT: + createTXTInfo(serviceRecord); + break; + default: + throw new IllegalArgumentException("Unknown type " + type); + + } + } + } + } + + /** + * Create a container TXT record descriptor. + * @param serviceRecord the service record. + * @throws Exception if the descriptor creation yields an issue. + */ + protected void createTXTInfo(ServiceRecord serviceRecord) throws Exception { + TXTContainerRecordDescriptor txtInfo = + new TXTContainerRecordDescriptor(getPath(), serviceRecord); + registerRecordDescriptor(Type.TXT, txtInfo); + } + + /** + * Creates a container PTR record descriptor. + * @param record the service record. + * @throws Exception if the descriptor creation yields an issue. + */ + protected void createPTRInfo(ServiceRecord record) throws Exception { + PTRContainerRecordDescriptor + ptrInfo = new PTRContainerRecordDescriptor(getPath(), record); + registerRecordDescriptor(Type.PTR, ptrInfo); + } + + /** + * Creates a container AAAA (IPv6) record descriptor. + * @param record the service record + * @throws Exception if the descriptor creation yields an issue. + */ + protected void createAAAAInfo(ServiceRecord record) + throws Exception { + AAAAContainerRecordDescriptor + recordInfo = new AAAAContainerRecordDescriptor( + getPath(), record); + registerRecordDescriptor(Type.AAAA, recordInfo); + } + + /** + * Creates a container A (IPv4) record descriptor. + * @param record service record. + * @throws Exception if the descriptor creation yields an issue. + */ + protected void createAInfo(ServiceRecord record) throws Exception { + AContainerRecordDescriptor recordInfo = new AContainerRecordDescriptor( + getPath(), record); + registerRecordDescriptor(Type.A, recordInfo); + } + + /** + * Returns the record types associated with a container service record. + * @return the record type array + */ + @Override public int[] getRecordTypes() { + return new int[] {Type.A, Type.AAAA, Type.PTR, Type.TXT}; + } + + /** + * A container TXT record descriptor. + */ + class TXTContainerRecordDescriptor + extends ContainerRecordDescriptor> { + + /** + * Creates a container TXT record descriptor. + * @param path registry path for service record + * @param record service record + * @throws Exception + */ + public TXTContainerRecordDescriptor(String path, + ServiceRecord record) throws Exception { + super(path, record); + } + + /** + * Initializes the descriptor parameters. + * @param serviceRecord the service record. + */ + @Override protected void init(ServiceRecord serviceRecord) { + try { + this.setNames(new Name[] {getContainerName()}); + } catch (TextParseException e) { + // log + } catch (PathNotFoundException e) { + // log + } + List txts = new ArrayList<>(); + txts.add("id=" + serviceRecord.get(YarnRegistryAttributes.YARN_ID)); + this.setTarget(txts); + } + + } + + /** + * A container PTR record descriptor. + */ + class PTRContainerRecordDescriptor extends ContainerRecordDescriptor { + + /** + * Creates a container PTR record descriptor. + * @param path registry path for service record + * @param record service record + * @throws Exception + */ + public PTRContainerRecordDescriptor(String path, + ServiceRecord record) throws Exception { + super(path, record); + } + + /** + * Initializes the descriptor parameters. + * @param serviceRecord the service record. + */ + @Override protected void init(ServiceRecord serviceRecord) { + String host = serviceRecord.get(YarnRegistryAttributes.YARN_HOSTNAME); + String ip = serviceRecord.get(YarnRegistryAttributes.YARN_IP); + Name reverseLookupName = null; + if (host != null && ip != null) { + try { + reverseLookupName = reverseIP(ip); + } catch (UnknownHostException e) { + //LOG + } + } + this.setNames(new Name[] {reverseLookupName}); + try { + this.setTarget(getContainerName()); + } catch (TextParseException e) { + //LOG + } catch (PathNotFoundException e) { + //LOG + } + } + + } + + + /** + * A container A record descriptor. + */ + class AContainerRecordDescriptor + extends ContainerRecordDescriptor { + + /** + * Creates a container A record descriptor. + * @param path registry path for service record + * @param record service record + * @throws Exception + */ + public AContainerRecordDescriptor(String path, + ServiceRecord record) throws Exception { + super(path, record); + } + + /** + * Initializes the descriptor parameters. + * @param serviceRecord the service record. + */ + @Override protected void init(ServiceRecord serviceRecord) { + String ip = serviceRecord.get(YarnRegistryAttributes.YARN_IP); + if (ip == null) { + throw new IllegalArgumentException("No IP specified"); + } + try { + this.setTarget(InetAddress.getByName(ip)); + this.setNames(new Name[] {getContainerName(), getContainerIDName()}); + } catch (Exception e) { + throw new IllegalStateException(e); + } + + } + + } + + /** + * A container AAAA record descriptor. + */ + class AAAAContainerRecordDescriptor extends AContainerRecordDescriptor { + + /** + * Creates a container AAAA record descriptor. + * @param path registry path for service record + * @param record service record + * @throws Exception + */ + public AAAAContainerRecordDescriptor(String path, + ServiceRecord record) throws Exception { + super(path, record); + } + + /** + * Initializes the descriptor parameters. + * @param serviceRecord the service record. + */ + @Override protected void init(ServiceRecord serviceRecord) { + super.init(serviceRecord); + try { + this.setTarget(getIpv6Address(getTarget())); + } catch (UnknownHostException e) { + throw new IllegalStateException(e); + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/PrivilegedRegistryDNSStarter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/PrivilegedRegistryDNSStarter.java new file mode 100644 index 0000000..dd4e1b8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/PrivilegedRegistryDNSStarter.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import org.apache.commons.daemon.Daemon; +import org.apache.commons.daemon.DaemonContext; +import org.apache.hadoop.registry.client.api.DNSOperationsFactory; +import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.hadoop.registry.client.api.RegistryConstants.DEFAULT_DNS_PORT; +import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_DNS_PORT; + +/** + * This class is used to allow the RegistryDNSServer to run on a privileged + * port (e.g. 53). + */ +public class PrivilegedRegistryDNSStarter implements Daemon { + private static final Logger LOG = + LoggerFactory.getLogger(PrivilegedRegistryDNSStarter.class); + + private YarnConfiguration conf; + private RegistryDNS registryDNS; + private RegistryDNSServer registryDNSServer; + + @Override + public void init(DaemonContext context) throws Exception { + String[] args = context.getArguments(); + StringUtils.startupShutdownMessage(RegistryDNSServer.class, args, LOG); + conf = new YarnConfiguration(); + new GenericOptionsParser(conf, args); + + int port = conf.getInt(KEY_DNS_PORT, DEFAULT_DNS_PORT); + if (port < 1 || port > 1023) { + throw new RuntimeException("Must start privileged registry DNS server " + + "with '" + KEY_DNS_PORT + "' configured to a privileged port."); + } + + try { + registryDNS = (RegistryDNS) DNSOperationsFactory.createInstance(conf); + registryDNS.initializeChannels(conf); + } catch (Exception e) { + LOG.error("Error initializing Registry DNS", e); + throw e; + } + } + + @Override + public void start() throws Exception { + registryDNSServer = RegistryDNSServer.launchDNSServer(conf, registryDNS); + } + + @Override + public void stop() throws Exception { + } + + @Override + public void destroy() { + registryDNSServer.stop(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RecordCreatorFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RecordCreatorFactory.java new file mode 100644 index 0000000..23f9501 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RecordCreatorFactory.java @@ -0,0 +1,275 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import org.xbill.DNS.AAAARecord; +import org.xbill.DNS.ARecord; +import org.xbill.DNS.CNAMERecord; +import org.xbill.DNS.DClass; +import org.xbill.DNS.Name; +import org.xbill.DNS.PTRRecord; +import org.xbill.DNS.Record; +import org.xbill.DNS.SRVRecord; +import org.xbill.DNS.TXTRecord; + +import java.net.InetAddress; +import java.util.List; + +import static org.xbill.DNS.Type.*; + +/** + * A factory for creating DNS records. + */ +public final class RecordCreatorFactory { + private static long ttl; + + /** + * Private constructor. + */ + private RecordCreatorFactory() { + } + + /** + * Returns the DNS record creator for the provided type. + * + * @param type the DNS record type. + * @return the record creator. + */ + static RecordCreator getRecordCreator(int type) { + switch (type) { + case A: + return new ARecordCreator(); + case CNAME: + return new CNAMERecordCreator(); + case TXT: + return new TXTRecordCreator(); + case AAAA: + return new AAAARecordCreator(); + case PTR: + return new PTRRecordCreator(); + case SRV: + return new SRVRecordCreator(); + default: + throw new IllegalArgumentException("No type " + type); + + } + } + + /** + * Set the TTL value for the records created by the factory. + * + * @param ttl the ttl value, in seconds. + */ + public static void setTtl(long ttl) { + RecordCreatorFactory.ttl = ttl; + } + + /** + * A DNS Record creator. + * + * @param the record type + * @param the record's target type + */ + public interface RecordCreator { + R create(Name name, T target); + } + + /** + * An A Record creator. + */ + static class ARecordCreator implements RecordCreator { + /** + * Creates an A record creator. + */ + public ARecordCreator() { + } + + /** + * Creates a DNS A record. + * + * @param name the record name. + * @param target the record target/value. + * @return an A record. + */ + @Override public ARecord create(Name name, InetAddress target) { + return new ARecord(name, DClass.IN, ttl, target); + } + } + + /** + * An AAAA Record creator. + */ + static class AAAARecordCreator + implements RecordCreator { + /** + * Creates an AAAA record creator. + */ + public AAAARecordCreator() { + } + + /** + * Creates a DNS AAAA record. + * + * @param name the record name. + * @param target the record target/value. + * @return an A record. + */ + @Override public AAAARecord create(Name name, InetAddress target) { + return new AAAARecord(name, DClass.IN, ttl, target); + } + } + + static class CNAMERecordCreator implements RecordCreator { + /** + * Creates a CNAME record creator. + */ + public CNAMERecordCreator() { + } + + /** + * Creates a DNS CNAME record. + * + * @param name the record name. + * @param target the record target/value. + * @return an A record. + */ + @Override public CNAMERecord create(Name name, Name target) { + return new CNAMERecord(name, DClass.IN, ttl, target); + } + } + + /** + * A TXT Record creator. + */ + static class TXTRecordCreator + implements RecordCreator> { + /** + * Creates a TXT record creator. + */ + public TXTRecordCreator() { + } + + /** + * Creates a DNS TXT record. + * + * @param name the record name. + * @param target the record target/value. + * @return an A record. + */ + @Override public TXTRecord create(Name name, List target) { + return new TXTRecord(name, DClass.IN, ttl, target); + } + } + + /** + * A PTR Record creator. + */ + static class PTRRecordCreator implements RecordCreator { + /** + * Creates a PTR record creator. + */ + public PTRRecordCreator() { + } + + /** + * Creates a DNS PTR record. + * + * @param name the record name. + * @param target the record target/value. + * @return an A record. + */ + @Override public PTRRecord create(Name name, Name target) { + return new PTRRecord(name, DClass.IN, ttl, target); + } + } + + /** + * A SRV Record creator. + */ + static class SRVRecordCreator + implements RecordCreator { + /** + * Creates a SRV record creator. + */ + public SRVRecordCreator() { + } + + /** + * Creates a DNS SRV record. + * + * @param name the record name. + * @param target the record target/value. + * @return an A record. + */ + @Override public SRVRecord create(Name name, HostPortInfo target) { + return new SRVRecord(name, DClass.IN, ttl, 1, 1, target.getPort(), + target.getHost()); + } + } + + /** + * An object for storing the host and port info used to generate SRV records. + */ + public static class HostPortInfo { + private Name host; + private int port; + + /** + * Creates an object with a host and port pair. + * + * @param host the hostname/ip + * @param port the port value + */ + public HostPortInfo(Name host, int port) { + this.setHost(host); + this.setPort(port); + } + + /** + * Return the host name. + * @return the host name. + */ + Name getHost() { + return host; + } + + /** + * Set the host name. + * @param host the host name. + */ + void setHost(Name host) { + this.host = host; + } + + /** + * Get the port. + * @return the port. + */ + int getPort() { + return port; + } + + /** + * Set the port. + * @param port the port. + */ + void setPort(int port) { + this.port = port; + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java new file mode 100644 index 0000000..d7ebece --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java @@ -0,0 +1,1756 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.filefilter.IOFileFilter; +import org.apache.commons.net.util.Base64; +import org.apache.commons.net.util.SubnetUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.registry.client.api.DNSOperations; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.xbill.DNS.CNAMERecord; +import org.xbill.DNS.DClass; +import org.xbill.DNS.DNSKEYRecord; +import org.xbill.DNS.DNSSEC; +import org.xbill.DNS.DSRecord; +import org.xbill.DNS.ExtendedFlags; +import org.xbill.DNS.ExtendedResolver; +import org.xbill.DNS.Flags; +import org.xbill.DNS.Header; +import org.xbill.DNS.Lookup; +import org.xbill.DNS.Message; +import org.xbill.DNS.NSRecord; +import org.xbill.DNS.Name; +import org.xbill.DNS.NameTooLongException; +import org.xbill.DNS.OPTRecord; +import org.xbill.DNS.Opcode; +import org.xbill.DNS.RRSIGRecord; +import org.xbill.DNS.RRset; +import org.xbill.DNS.Rcode; +import org.xbill.DNS.Record; +import org.xbill.DNS.Resolver; +import org.xbill.DNS.ResolverConfig; +import org.xbill.DNS.SOARecord; +import org.xbill.DNS.Section; +import org.xbill.DNS.SetResponse; +import org.xbill.DNS.SimpleResolver; +import org.xbill.DNS.TSIG; +import org.xbill.DNS.TSIGRecord; +import org.xbill.DNS.TextParseException; +import org.xbill.DNS.Type; +import org.xbill.DNS.Zone; + +import java.io.DataOutputStream; +import java.io.EOFException; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.math.BigInteger; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.NetworkInterface; +import java.net.Socket; +import java.net.SocketAddress; +import java.net.SocketException; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.nio.channels.DatagramChannel; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.security.KeyFactory; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.spec.InvalidKeySpecException; +import java.security.spec.RSAPrivateKeySpec; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Collection; +import java.util.Date; +import java.util.Enumeration; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.apache.hadoop.registry.client.api.RegistryConstants.*; + +/** + * A DNS service reflecting the state of the YARN registry. Records are created + * based on service records available in the YARN ZK-based registry. + */ +public class RegistryDNS extends AbstractService implements DNSOperations, + ZoneSelector { + + public static final String CONTAINER = "container"; + + static final int FLAG_DNSSECOK = 1; + static final int FLAG_SIGONLY = 2; + + private static final Logger LOG = + LoggerFactory.getLogger(RegistryDNS.class); + public static final String IN_ADDR_ARPA = "in-addr.arpa."; + public static final String ZONE_SUFFIX = ".zone"; + + private ExecutorService executor; + private ReentrantReadWriteLock zoneLock = new ReentrantReadWriteLock(); + private CloseableLock readLock = new CloseableLock(zoneLock.readLock()); + private CloseableLock writeLock = new CloseableLock(zoneLock.writeLock()); + private String domainName; + private long ttl = 0L; + + private static final Pattern USER_NAME = Pattern.compile("/users/(\\w*)/?"); + private Boolean dnssecEnabled; + private PrivateKey privateKey; + + private ConcurrentMap dnsKeyRecs = + new ConcurrentHashMap<>(); + private ConcurrentMap zones = new ConcurrentHashMap<>(); + private Name bindHost; + + private boolean channelsInitialized = false; + + /** + * Lock to update resolver only once per request. + */ + private final Object resolverUpdateLock = new Object(); + + /** + * Whether resolver update has been requested. + */ + private boolean resolverUpdateRequested = true; + + /** + * Construct the service. + * + * @param name service name + */ + public RegistryDNS(String name) { + super(name); + executor = HadoopExecutors.newCachedThreadPool( + new ThreadFactory() { + private AtomicInteger counter = new AtomicInteger(1); + + @Override + public Thread newThread(Runnable r) { + return new Thread(r, + "RegistryDNS " + + counter.getAndIncrement()); + } + }); + } + + public void initializeChannels(Configuration conf) throws Exception { + if (channelsInitialized) { + return; + } + channelsInitialized = true; + int port = conf.getInt(KEY_DNS_PORT, DEFAULT_DNS_PORT); + InetAddress addr = InetAddress.getLocalHost(); + + String bindAddress = conf.get(KEY_DNS_BIND_ADDRESS); + if (bindAddress != null) { + addr = InetAddress.getByName(bindAddress); + } + + LOG.info("Opening TCP and UDP channels on {} port {}", addr, port); + addNIOUDP(addr, port); + addNIOTCP(addr, port); + } + + /** + * Initialize registryDNS to use /etc/resolv.conf values + * as default resolvers. + */ + private void updateDNSServer(Configuration conf) { + synchronized (resolverUpdateLock) { + if (!resolverUpdateRequested) { + return; + } + int port = conf.getInt(KEY_DNS_PORT, DEFAULT_DNS_PORT); + resolverUpdateRequested = false; + List list = new ArrayList(); + try { + // If resolv.conf contains the server's own IP address, + // and RegistryDNS handles the lookup. Local IP address + // must be filter out from default resolvers to prevent + // self recursive loop. + if (port != 53) { + // When registryDNS is not running on default port, + // registryDNS can utilize local DNS server as upstream lookup. + throw new SocketException("Bypass filtering local DNS server."); + } + Enumeration net = + NetworkInterface.getNetworkInterfaces(); + while(net.hasMoreElements()) { + NetworkInterface n = (NetworkInterface) net.nextElement(); + Enumeration ee = n.getInetAddresses(); + while (ee.hasMoreElements()) { + InetAddress i = (InetAddress) ee.nextElement(); + list.add(i); + } + } + } catch (SocketException e) { + } + ResolverConfig.refresh(); + ExtendedResolver resolver; + try { + resolver = new ExtendedResolver(); + } catch (UnknownHostException e) { + LOG.error("Can not resolve DNS servers: ", e); + return; + } + for (Resolver check : resolver.getResolvers()) { + if (check instanceof SimpleResolver) { + InetAddress address = ((SimpleResolver) check).getAddress() + .getAddress(); + if (list.contains(address)) { + resolver.deleteResolver(check); + continue; + } else { + check.setTimeout(30); + } + } else { + LOG.error("Not simple resolver!!!?" + check); + } + } + synchronized (Lookup.class) { + Lookup.setDefaultResolver(resolver); + Lookup.setDefaultSearchPath(ResolverConfig.getCurrentConfig() + .searchPath()); + } + StringBuilder message = new StringBuilder(); + message.append("DNS servers: "); + if (ResolverConfig.getCurrentConfig().servers() != null) { + for (String server : ResolverConfig.getCurrentConfig() + .servers()) { + message.append(server); + message.append(" "); + } + } + LOG.info(message.toString()); + } + } + /** + * Initializes the registry. + * + * @param conf the hadoop configuration + * @throws Exception if there are tcp/udp issues + */ + @Override + protected void serviceInit(Configuration conf) throws Exception { + super.serviceInit(conf); + + // create the zone. for now create a "dummy" SOA record + try { + updateDNSServer(conf); + setDomainName(conf); + + initializeZones(conf); + + initializeChannels(conf); + } catch (IOException e) { + LOG.error("Error initializing Registry DNS Server", e); + throw e; + } + } + + /** + * Initializes the registry based on available parameters in the hadoop + * configuration. + * + * @param conf the hadoop configuration + * @return the listener port + * @throws IOException + */ + void initializeZones(Configuration conf) throws IOException { + ttl = conf.getTimeDuration(KEY_DNS_TTL, 1L, TimeUnit.SECONDS); + RecordCreatorFactory.setTtl(ttl); + + setDNSSECEnabled(conf); + + initializeZonesFromFiles(conf); + + Zone registryZone = configureZone(Name.fromString(domainName), conf); + zones.put(registryZone.getOrigin(), registryZone); + + initializeReverseLookupZone(conf); + + StringBuilder builder = new StringBuilder(); + builder.append("DNS zones: ").append(System.lineSeparator()); + for (Map.Entry entry : zones.entrySet()) { + builder.append(System.lineSeparator()).append(entry.getValue()); + } + LOG.info(builder.toString()); + } + + /** + * Signs zone records if necessary (DNSSEC enabled). Zones may not have + * their NS and SOA records signed if they were initialized from master files. + */ + private void signZones() throws IOException { + if (isDNSSECEnabled()) { + Collection zoneCollection = zones.values(); + for (Zone zone : zoneCollection) { + Iterator itor = zone.iterator(); + while (itor.hasNext()) { + RRset rRset = (RRset) itor.next(); + Iterator sigs = rRset.sigs(); + if (!sigs.hasNext()) { + try { + signSiteRecord(zone, rRset.first()); + } catch (DNSSEC.DNSSECException e) { + throw new IOException(e); + } + } + } + } + } + } + + /** + * Initializes a zone by reading any zone file by the same name in the + * designated zone file directory. + * + * @param conf the Hadoop configuration object. + * @throws IOException + */ + private void initializeZonesFromFiles(Configuration conf) throws IOException { + // should this be in HDFS? + String zonesDir = conf.get(KEY_DNS_ZONES_DIR); + if (zonesDir != null) { + Iterator iterator = FileUtils.iterateFiles(new File(zonesDir), + new IOFileFilter() { + @Override + public boolean accept( + File file) { + return file.getName().endsWith( + ZONE_SUFFIX); + } + + @Override + public boolean accept( + File file, + String s) { + return s.endsWith( + ZONE_SUFFIX); + } + }, null); + while (iterator.hasNext()) { + File file = iterator.next(); + String name = file.getName(); + name = name.substring(0, name.indexOf(ZONE_SUFFIX) + 1); + Zone zone = new SecureableZone(Name.fromString(name), + file.getAbsolutePath()); + zones.putIfAbsent(zone.getOrigin(), zone); + } + } + } + + /** + * Return the number of zones in the map. + * + * @return number of zones in the map + */ + @VisibleForTesting + protected int getZoneCount() { + return zones.size(); + } + + /** + * Initializes the reverse lookup zone (mapping IP to name). + * + * @param conf the Hadoop configuration. + * @throws IOException if the DNSSEC key can not be read. + */ + private void initializeReverseLookupZone(Configuration conf) + throws IOException { + // Determine if the subnet should be split into + // multiple reverse zones, this can be necessary in + // network configurations where the hosts and containers + // are part of the same subnet (i.e. the containers only use + // part of the subnet). + Boolean shouldSplitReverseZone = conf.getBoolean(KEY_DNS_SPLIT_REVERSE_ZONE, + DEFAULT_DNS_SPLIT_REVERSE_ZONE); + if (shouldSplitReverseZone) { + int subnetCount = ReverseZoneUtils.getSubnetCountForReverseZones(conf); + addSplitReverseZones(conf, subnetCount); + // Single reverse zone + } else { + Name reverseLookupZoneName = getReverseZoneName(conf); + Zone reverseLookupZone = configureZone(reverseLookupZoneName, conf); + zones.put(reverseLookupZone.getOrigin(), reverseLookupZone); + } + } + + /** + * Create the zones based on the zone count. + * + * @param conf the Hadoop configuration. + * @param subnetCount number of subnets to create reverse zones for. + * @throws IOException if the DNSSEC key can not be read. + */ + @VisibleForTesting + protected void addSplitReverseZones(Configuration conf, int subnetCount) + throws IOException { + String subnet = conf.get(KEY_DNS_ZONE_SUBNET); + String range = conf.get(KEY_DNS_SPLIT_REVERSE_ZONE_RANGE); + + // Add the split reverse zones + for (int idx = 0; idx < subnetCount; idx++) { + Name reverseLookupZoneName = getReverseZoneName(ReverseZoneUtils + .getReverseZoneNetworkAddress(subnet, Integer.parseInt(range), idx)); + Zone reverseLookupZone = configureZone(reverseLookupZoneName, conf); + zones.put(reverseLookupZone.getOrigin(), reverseLookupZone); + } + } + + /** + * Returns the list of reverse lookup zones. + * + * @param conf the hadoop configuration. + * @return the list of reverse zone names required based on the configuration + * properties. + */ + protected Name getReverseZoneName(Configuration conf) { + Name name = null; + String zoneSubnet = getZoneSubnet(conf); + if (zoneSubnet == null) { + LOG.warn("Zone subnet is not configured. Reverse lookups disabled"); + } else { + // is there a netmask + String mask = conf.get(KEY_DNS_ZONE_MASK); + if (mask != null) { + // get the range of IPs + SubnetUtils utils = new SubnetUtils(zoneSubnet, mask); + name = getReverseZoneName(utils, zoneSubnet); + } else { + name = getReverseZoneName(zoneSubnet); + } + } + return name; + } + + /** + * Return the subnet for the zone. this should be a network address for the + * subnet (ends in ".0"). + * + * @param conf the hadoop configuration. + * @return the zone subnet. + */ + private String getZoneSubnet(Configuration conf) { + String subnet = conf.get(KEY_DNS_ZONE_SUBNET); + if (subnet != null) { + final String[] bytes = subnet.split("\\."); + if (bytes.length == 3) { + subnet += ".0"; + } + } + return subnet; + } + + /** + * Return the reverse zone name based on the address. + * + * @param networkAddress the network address. + * @return the reverse zone name. + */ + private Name getReverseZoneName(String networkAddress) { + return getReverseZoneName(null, networkAddress); + } + + /** + * Return the reverse zone name based on the address. + * + * @param utils subnet utils + * @param networkAddress the network address. + * @return the reverse zone name. + */ + private Name getReverseZoneName(SubnetUtils utils, String networkAddress) { + Name reverseZoneName = null; + boolean isLargeNetwork = false; + if (utils != null) { + isLargeNetwork = utils.getInfo().getAddressCount() > 256; + } + final String[] bytes = networkAddress.split("\\."); + if (bytes.length == 4) { + String reverseLookupZoneName = null; + if (isLargeNetwork) { + reverseLookupZoneName = + String.format("%s.%s.%s", + bytes[1], + bytes[0], + IN_ADDR_ARPA); + } else { + reverseLookupZoneName = + String.format("%s.%s.%s.%s", + bytes[2], + bytes[1], + bytes[0], + IN_ADDR_ARPA); + } + try { + reverseZoneName = Name.fromString(reverseLookupZoneName); + } catch (TextParseException e) { + LOG.warn("Unable to convert {} to DNS name", reverseLookupZoneName); + } + } + return reverseZoneName; + } + + /** + * Create the zone and its related zone associated DNS records (NS, SOA). + * + * @param zoneName domain name of the zone + * @param conf configuration reference. + * @return the zone. + * @throws IOException + */ + private Zone configureZone(Name zoneName, Configuration conf) + throws IOException { + bindHost = Name.fromString( + InetAddress.getLocalHost().getCanonicalHostName() + "."); + SOARecord soaRecord = new SOARecord(zoneName, DClass.IN, ttl, + bindHost, + bindHost, getSerial(), 86000, 7200, + 1209600, 600); + NSRecord nsRecord = new NSRecord(zoneName, DClass.IN, ttl, bindHost); + Zone zone = zones.get(zoneName); + if (zone == null) { + zone = new SecureableZone(zoneName, new Record[] {soaRecord, nsRecord}); + } + + try { + enableDNSSECIfNecessary(zone, conf, soaRecord, nsRecord); + } catch (NoSuchAlgorithmException e) { + throw new IOException(e); + } catch (InvalidKeySpecException e) { + throw new IOException(e); + } catch (DNSSEC.DNSSECException e) { + throw new IOException(e); + } + + return zone; + } + + /** + * Return a serial number based on the current date and time. + * + * @return the serial number. + */ + private long getSerial() { + Date curDate = new Date(); + SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyyMMddHH"); + String serial = simpleDateFormat.format(curDate); + return Long.parseLong(serial); + } + + /** + * Set the value of the DNSSEC enabled property. + * + * @param conf the Hadoop configuration. + */ + @VisibleForTesting + protected void setDNSSECEnabled(Configuration conf) { + dnssecEnabled = conf.getBoolean(KEY_DNSSEC_ENABLED, false); + } + + /** + * Is DNSSEC enabled? + * + * @return true if enabled, false otherwise. + */ + private boolean isDNSSECEnabled() { + return dnssecEnabled; + } + + /** + * Load the required public/private keys, create the zone DNSKEY record, and + * sign the zone level records. + * + * @param zone the zone. + * @param conf the configuration. + * @param soaRecord the SOA record. + * @param nsRecord the NS record. + * @throws IOException + * @throws NoSuchAlgorithmException + * @throws InvalidKeySpecException + * @throws DNSSEC.DNSSECException + */ + private void enableDNSSECIfNecessary(Zone zone, Configuration conf, + SOARecord soaRecord, + NSRecord nsRecord) + throws IOException, NoSuchAlgorithmException, InvalidKeySpecException, + DNSSEC.DNSSECException { + if (isDNSSECEnabled()) { + // read in the DNSKEY and create the DNSKEYRecord + // TODO: reading these out of config seems wrong... + String publicKey = conf.get(KEY_DNSSEC_PUBLIC_KEY); + if (publicKey == null) { + throw new IOException("DNSSEC Key not configured"); + } + //TODO - perhaps read in actual DNSKEY record structure? + Name zoneName = zone.getOrigin(); + DNSKEYRecord dnskeyRecord = dnsKeyRecs.get(zoneName); + if (dnskeyRecord == null) { + byte[] key = Base64.decodeBase64(publicKey.getBytes("UTF-8")); + dnskeyRecord = new DNSKEYRecord(zoneName, + DClass.IN, ttl, + DNSKEYRecord.Flags.ZONE_KEY, + DNSKEYRecord.Protocol.DNSSEC, + DNSSEC.Algorithm.RSASHA256, key); + dnsKeyRecs.putIfAbsent(zoneName, dnskeyRecord); + } + LOG.info("Registering {}", dnskeyRecord); + try (CloseableLock lock = writeLock.lock()) { + zone.addRecord(dnskeyRecord); + + String privateKeyFile = conf.get(KEY_DNSSEC_PRIVATE_KEY_FILE, + DEFAULT_DNSSEC_PRIVATE_KEY_FILE); + + Properties props = new Properties(); + try ( + FileInputStream inputStream = new FileInputStream(privateKeyFile)) { + props.load(inputStream); + } + + String privateModulus = props.getProperty("Modulus"); + String privateExponent = props.getProperty("PrivateExponent"); + + RSAPrivateKeySpec privateSpec = new RSAPrivateKeySpec( + new BigInteger(1, Base64.decodeBase64(privateModulus)), + new BigInteger(1, Base64.decodeBase64(privateExponent))); + + KeyFactory factory = KeyFactory.getInstance("RSA"); + privateKey = factory.generatePrivate(privateSpec); + + signSiteRecord(zone, dnskeyRecord); + signSiteRecord(zone, soaRecord); + signSiteRecord(zone, nsRecord); + } + // create required DS records + + // domain +// DSRecord dsRecord = new DSRecord(zoneName, DClass.IN, ttl, +// DSRecord.Digest.SHA1, dnskeyRecord); +// zone.addRecord(dsRecord); +// signSiteRecord(zone, dsRecord); + } + } + + /** + * Sign a DNS record. + * + * @param zone the zone reference + * @param record the record to sign. + * @throws DNSSEC.DNSSECException + */ + private void signSiteRecord(Zone zone, Record record) + throws DNSSEC.DNSSECException { + RRset rrset = zone.findExactMatch(record.getName(), + record.getType()); + Calendar cal = Calendar.getInstance(); + Date inception = cal.getTime(); + cal.add(Calendar.YEAR, 1); + Date expiration = cal.getTime(); + RRSIGRecord rrsigRecord = + DNSSEC.sign(rrset, dnsKeyRecs.get(zone.getOrigin()), + privateKey, inception, expiration); + LOG.info("Adding {}", record); + rrset.addRR(rrsigRecord); + } + + /** + * Sets the zone/domain name. The name will be read from the configuration + * and the code will ensure the name is absolute. + * + * @param conf the configuration. + * @throws IOException + */ + void setDomainName(Configuration conf) throws IOException { + domainName = conf.get(KEY_DNS_DOMAIN); + if (domainName == null) { + throw new IOException("No DNS domain name specified"); + } + if (!domainName.endsWith(".")) { + domainName += "."; + } + } + + /** + * Stops the registry. + * + * @throws Exception if the service stop generates an issue. + */ + @Override + protected void serviceStop() throws Exception { + stopExecutor(); + super.serviceStop(); + } + + /** + * Shuts down the leveraged executor service. + */ + protected synchronized void stopExecutor() { + if (executor != null) { + executor.shutdownNow(); + } + } + + /** + * Creates a DNS error response. + * + * @param in the byte array detailing the error. + * @return the error message, in bytes + */ + public byte[] formErrorMessage(byte[] in) { + Header header; + try { + header = new Header(in); + } catch (IOException e) { + return null; + } + return buildErrorMessage(header, Rcode.FORMERR, null); + } + + /** + * Process a TCP request. + * + * @param ch the socket channel for the request. + * @throws IOException if the tcp processing generates an issue. + */ + public void nioTCPClient(SocketChannel ch) throws IOException { + try { + // query sizes are small, so the following two lines should work + // in all instances + ByteBuffer buf = ByteBuffer.allocate(1024); + ch.read(buf); + buf.flip(); + int messageLength = getMessgeLength(buf); + + byte[] in = new byte[messageLength]; + + buf.get(in, 0, messageLength); + + Message query; + byte[] response; + try { + query = new Message(in); + LOG.info("received TCP query {}", query.getQuestion()); + response = generateReply(query, ch.socket()); + if (response == null) { + return; + } + } catch (IOException e) { + response = formErrorMessage(in); + } + + ByteBuffer out = ByteBuffer.allocate(response.length + 2); + out.clear(); + byte[] data = new byte[2]; + + data[1] = (byte)(response.length & 0xFF); + data[0] = (byte)((response.length >> 8) & 0xFF); + out.put(data); + out.put(response); + out.flip(); + + while(out.hasRemaining()) { + ch.write(out); + } + + } catch (IOException e) { + throw NetUtils.wrapException(ch.socket().getInetAddress().getHostName(), + ch.socket().getPort(), + ch.socket().getLocalAddress().getHostName(), + ch.socket().getLocalPort(), e); + } finally { + IOUtils.closeStream(ch); + } + + } + + /** + * Calculate the inbound message length, which is related in the message as an + * unsigned short value. + * + * @param buf the byte buffer containing the message. + * @return the message length + * @throws EOFException + */ + private int getMessgeLength(ByteBuffer buf) throws EOFException { + int ch1 = buf.get(); + int ch2 = buf.get(); + if ((ch1 | ch2) < 0) { + throw new EOFException(); + } + return (ch1 << 8) + (ch2 & 0xff); + } + + /** + * Monitor the TCP socket for inbound requests. + * + * @param serverSocketChannel the server socket channel + * @param addr the local inet address + * @param port the listener (local) port + * @throws Exception if the tcp processing fails. + */ + public void serveNIOTCP(ServerSocketChannel serverSocketChannel, + InetAddress addr, int port) throws Exception { + try { + + while (true) { + final SocketChannel socketChannel = serverSocketChannel.accept(); + if (socketChannel != null) { + executor.submit(new Callable() { + @Override + public Boolean call() throws Exception { + nioTCPClient(socketChannel); + return true; + } + }); + + } else { + Thread.sleep(500); + } + } + } catch (IOException e) { + throw NetUtils.wrapException(addr.getHostName(), port, + addr.getHostName(), port, e); + } + } + + /** + * Open the TCP listener. + * + * @param addr the host address. + * @param port the host port. + * @return the created server socket channel. + * @throws IOException + */ + private ServerSocketChannel openTCPChannel(InetAddress addr, int port) + throws IOException { + ServerSocketChannel serverSocketChannel = ServerSocketChannel.open(); + try { + serverSocketChannel.socket().bind(new InetSocketAddress(addr, port)); + serverSocketChannel.configureBlocking(false); + } catch (IOException e) { + throw NetUtils.wrapException(null, 0, + InetAddress.getLocalHost().getHostName(), + port, e); + } + return serverSocketChannel; + } + + /** + * Create the thread (Callable) monitoring the TCP listener. + * + * @param addr host address. + * @param port host port. + * @throws Exception if the tcp listener generates an error. + */ + public void addNIOTCP(final InetAddress addr, final int port) + throws Exception { + final ServerSocketChannel tcpChannel = openTCPChannel(addr, port); + executor.submit(new Callable() { + @Override + public Boolean call() throws Exception { + try { + serveNIOTCP(tcpChannel, addr, port); + } catch (Exception e) { + LOG.error("Error initializing DNS TCP listener", e); + throw e; + } + + return true; + } + + }); + } + + /** + * Create the thread monitoring the socket for inbound UDP requests. + * + * @param addr host address. + * @param port host port. + * @throws Exception if the UDP listener creation generates an error. + */ + public void addNIOUDP(final InetAddress addr, final int port) + throws Exception { + final DatagramChannel udpChannel = openUDPChannel(addr, port); + executor.submit(new Callable() { + @Override + public Boolean call() throws Exception { + try { + serveNIOUDP(udpChannel, addr, port); + } catch (Exception e) { + LOG.error("Error initializing DNS UDP listener", e); + throw e; + } + return true; + } + }); + } + + /** + * Process an inbound UDP request. + * + * @param channel the UDP datagram channel. + * @param addr local host address. + * @param port local port. + * @throws IOException if the UDP processing fails. + */ + private void serveNIOUDP(DatagramChannel channel, + InetAddress addr, int port) throws Exception { + SocketAddress remoteAddress = null; + try { + + ByteBuffer input = ByteBuffer.allocate(4096); + ByteBuffer output = ByteBuffer.allocate(4096); + byte[] in = null; + + while (true) { + input.clear(); + try { + remoteAddress = channel.receive(input); + } catch (IOException e) { + LOG.debug("Error during message receipt", e); + continue; + } + Message query; + byte[] response = null; + try { + int position = input.position(); + in = new byte[position]; + input.flip(); + input.get(in); + query = new Message(in); + LOG.info("{}: received UDP query {}", remoteAddress, + query.getQuestion()); + response = generateReply(query, null); + if (response == null) { + continue; + } + } catch (IOException e) { + response = formErrorMessage(in); + } + output.clear(); + output.put(response); + output.flip(); + + LOG.debug("{}: sending response", remoteAddress); + channel.send(output, remoteAddress); + } + } catch (Exception e) { + if (e instanceof IOException && remoteAddress != null) { + throw NetUtils.wrapException(addr.getHostName(), + port, + ((InetSocketAddress) remoteAddress).getHostName(), + ((InetSocketAddress) remoteAddress).getPort(), + (IOException) e); + } else { + throw e; + } + } + } + + /** + * Create and UDP listener socket. + * + * @param addr host address. + * @param port host port. + * @return + * @throws IOException if listener creation fails. + */ + private DatagramChannel openUDPChannel(InetAddress addr, int port) + throws IOException { + DatagramChannel channel = DatagramChannel.open(); + try { + channel.socket().bind(new InetSocketAddress(addr, port)); + } catch (IOException e) { + throw NetUtils.wrapException(null, 0, + InetAddress.getLocalHost().getHostName(), + port, e); + } + return channel; + } + + /** + * Create an error message. + * + * @param header the response header. + * @param rcode the response code. + * @param question the question record. + * @return the error message. + */ + byte[] buildErrorMessage(Header header, int rcode, Record question) { + Message response = new Message(); + response.setHeader(header); + for (int i = 0; i < 4; i++) { + response.removeAllRecords(i); + } + response.addRecord(question, Section.QUESTION); + header.setRcode(rcode); + return response.toWire(); + } + + /** + * Generate an error message based on inbound query. + * + * @param query the query. + * @param rcode the response code for the specific error. + * @return the error message. + */ + public byte[] errorMessage(Message query, int rcode) { + return buildErrorMessage(query.getHeader(), rcode, + query.getQuestion()); + } + + /** + * Generate the response for the inbound DNS query. + * + * @param query the query. + * @param s the socket associated with the query. + * @return the response, in bytes. + * @throws IOException if reply generation fails. + */ + byte[] generateReply(Message query, Socket s) + throws IOException { + Header header; + boolean badversion; + int maxLength; + int flags = 0; + + OPTRecord queryOPT = query.getOPT(); + maxLength = getMaxLength(s, queryOPT); + + header = query.getHeader(); + if (header.getFlag(Flags.QR)) { + LOG.debug("returning null"); + return null; + } + if (header.getRcode() != Rcode.NOERROR) { + return errorMessage(query, Rcode.FORMERR); + } + if (header.getOpcode() != Opcode.QUERY) { + return errorMessage(query, Rcode.NOTIMP); + } + + Record queryRecord = query.getQuestion(); + + if (queryOPT != null && (queryOPT.getFlags() & ExtendedFlags.DO) != 0) { + flags = FLAG_DNSSECOK; + } + + Message response = new Message(query.getHeader().getID()); + response.getHeader().setFlag(Flags.QR); + if (query.getHeader().getFlag(Flags.RD)) { + response.getHeader().setFlag(Flags.RD); + response.getHeader().setFlag(Flags.RA); + } + response.addRecord(queryRecord, Section.QUESTION); + + Name name = queryRecord.getName(); + int type = queryRecord.getType(); + int dclass = queryRecord.getDClass(); + + TSIGRecord queryTSIG = query.getTSIG(); + if (type == Type.AXFR && s != null) { + return doAXFR(name, query, null, queryTSIG, s); + } + if (!Type.isRR(type) && type != Type.ANY) { + return errorMessage(query, Rcode.NOTIMP); + } + + LOG.debug("calling addAnswer"); + byte rcode = addAnswer(response, name, type, dclass, 0, flags); + if (rcode != Rcode.NOERROR) { + rcode = remoteLookup(response, name); + response.getHeader().setRcode(rcode); + } + addAdditional(response, flags); + + if (queryOPT != null) { + int optflags = (flags == FLAG_DNSSECOK) ? ExtendedFlags.DO : 0; + OPTRecord opt = new OPTRecord((short) 4096, rcode >>> 16, (byte) 0, + optflags); + response.addRecord(opt, Section.ADDITIONAL); + } + + return response.toWire(maxLength); + } + + /** + * Lookup record from upstream DNS servers. + */ + private byte remoteLookup(Message response, Name name) { + // Forward lookup to primary DNS servers + Record[] answers = getRecords(name, Type.ANY); + try { + for (Record r : answers) { + if (r.getType() == Type.SOA) { + response.addRecord(r, Section.AUTHORITY); + } else { + response.addRecord(r, Section.ANSWER); + } + } + } catch (NullPointerException e) { + return Rcode.NXDOMAIN; + } catch (Throwable e) { + return Rcode.SERVFAIL; + } + return Rcode.NOERROR; + } + + /** + * Requests records for the given resource name. + * + * @param name - query string + * @param type - type of DNS record to lookup + * @return DNS records + */ + protected Record[] getRecords(Name name, int type) { + try { + return new Lookup(name, type).run(); + } catch (NullPointerException | + ExceptionInInitializerError e) { + LOG.error("Fail to lookup: " + name, e); + } + return null; + } + + /** + * Create a query to forward to the primary DNS server (if configured). + * NOTE: Experimental + * + * @param query the inbound query. + * @return the query to forward to the primary server. + * @throws NameTooLongException + * @throws TextParseException if query creation fails. + */ + private Message createPrimaryQuery(Message query) + throws NameTooLongException, TextParseException { + Name name = query.getQuestion().getName(); + if (name.labels() > 0 && name.labels() <= 2) { + // short relative or absolute name. this code may not be necessary - + // OS resolution utilities probably append the search paths defined + // in resolv.conf prior to the lookup + int id = query.getHeader().getID(); + String queryName = name.getLabelString(0); + Name qualifiedName = Name.concatenate(Name.fromString(queryName), + Name.fromString(domainName)); + LOG.info("Received query {}. Forwarding query {}", name, qualifiedName); + Record question = Record.newRecord(qualifiedName, + query.getQuestion().getType(), + query.getQuestion().getDClass()); + query = Message.newQuery(question); + query.getHeader().setID(id); + } + return query; + } + + /** + * Calculate the max length for a response. + * + * @param s the request socket. + * @param queryOPT describes Extended DNS (EDNS) properties of a Message. + * @return the length of the response. + */ + private int getMaxLength(Socket s, OPTRecord queryOPT) { + int maxLength; + if (s != null) { + maxLength = 65535; + } else if (queryOPT != null) { + maxLength = Math.max(queryOPT.getPayloadSize(), 512); + } else { + maxLength = 512; + } + return maxLength; + } + + /** + * Add additional information to a DNS response section if a glue name is + * specified. + * + * @param response the response message. + * @param section the section of the response (e.g. ANSWER, AUTHORITY) + * @param flags the flags. + */ + private void addAdditional2(Message response, int section, int flags) { + Record[] records = response.getSectionArray(section); + for (int i = 0; i < records.length; i++) { + Record r = records[i]; + Name glueName = r.getAdditionalName(); + if (glueName != null) { + addGlue(response, glueName, flags); + } + } + } + + /** + * Process any additional records indicated for both the ANSWER and AUTHORITY + * sections of the response. + * + * @param response the response message. + * @param flags the flags. + */ + private void addAdditional(Message response, int flags) { + addAdditional2(response, Section.ANSWER, flags); + addAdditional2(response, Section.AUTHORITY, flags); + } + + /** + * Add the specific record indicated by the "glue", or the mapping to a + * specific host. + * + * @param response the response message. + * @param name the name of the glue record. + * @param flags the flags. + */ + private void addGlue(Message response, Name name, int flags) { + RRset a = findExactMatch(name, Type.A); + if (a == null) { + return; + } + addRRset(name, response, a, Section.ADDITIONAL, flags); + } + + /** + * Find the record set that matches the requested name and type. + * + * @param name the requested name. + * @param type the record type. + * @return the set of records with the given name and type. + */ + public RRset findExactMatch(Name name, int type) { + try (CloseableLock lock = readLock.lock()) { + Zone zone = findBestZone(name); + if (zone != null) { + return zone.findExactMatch(name, type); + } + } + + return null; + } + + /** + * Find the zone that correlates to the provided name. + * + * @param name the name to be matched to a zone. + * @return the zone. + */ + @Override public Zone findBestZone(Name name) { + Zone foundzone = null; + foundzone = zones.get(name); + if (foundzone != null) { + return foundzone; + } + int labels = name.labels(); + for (int i = 1; i < labels; i++) { + Name tname = new Name(name, i); + foundzone = zones.get(tname); + if (foundzone != null) { + return foundzone; + } + } + return null; + } + + /** + * Add the answer section to the response. + * + * @param response the response message. + * @param name the name of the answer record. + * @param type the type of record. + * @param dclass the DNS class. + * @param iterations iteration count. + * @param flags + * @return the response code. + */ + byte addAnswer(Message response, Name name, int type, int dclass, + int iterations, int flags) { + SetResponse sr = null; + byte rcode = Rcode.NOERROR; + + if (iterations > 6) { + return Rcode.NOERROR; + } + + if (type == Type.SIG || type == Type.RRSIG) { + type = Type.ANY; + flags |= FLAG_SIGONLY; + } + + Zone zone = findBestZone(name); + + LOG.debug("finding record"); + try (CloseableLock lock = readLock.lock()) { + if (zone != null) { + sr = zone.findRecords(name, type); + } else { + rcode = Rcode.NOTAUTH; + } + } + LOG.info("found local record? {}", sr != null && sr.isSuccessful()); + + if (sr != null) { + if (sr.isCNAME()) { + CNAMERecord cname = sr.getCNAME(); + RRset rrset = zone.findExactMatch(cname.getName(), Type.CNAME); + addRRset(name, response, rrset, Section.ANSWER, flags); + if (iterations == 0) { + response.getHeader().setFlag(Flags.AA); + } + rcode = addAnswer(response, cname.getTarget(), + type, dclass, iterations + 1, flags); + } + if (sr.isNXDOMAIN()) { + response.getHeader().setRcode(Rcode.NXDOMAIN); + if (isDNSSECEnabled()) { + try { + addNXT(response, flags); + } catch (Exception e) { + LOG.warn("Unable to add NXTRecord to AUTHORITY Section", e); + } + } + addSOA(response, zone, flags); + if (iterations == 0) { + response.getHeader().setFlag(Flags.AA); + } + rcode = Rcode.NXDOMAIN; + } else if (sr.isNXRRSET()) { + LOG.info("No data found the given name {} and type {}", name, type); + addSOA(response, zone, flags); + if (iterations == 0) { + response.getHeader().setFlag(Flags.AA); + } + } else if (sr.isSuccessful()) { + RRset[] rrsets = sr.answers(); + LOG.info("found answers {}", rrsets); + for (int i = 0; i < rrsets.length; i++) { + addRRset(name, response, rrsets[i], + Section.ANSWER, flags); + } + addNS(response, zone, flags); + if (iterations == 0) { + response.getHeader().setFlag(Flags.AA); + } + } + } else { + if (zone != null) { + Name defaultDomain = null; + try { + defaultDomain = Name.fromString(domainName); + zone = zones.get(defaultDomain); + addNS(response, zone, flags); + if (iterations == 0) { + response.getHeader().setFlag(Flags.AA); + } + } catch (TextParseException e) { + LOG.warn("Unable to obtain default zone for unknown name response", + e); + } + } + } + + return rcode; + } + + /** + * Add the SOA record (describes the properties of the zone) to the authority + * section of the response. + * + * @param response the response message. + * @param zone the DNS zone. + */ + private void addSOA(Message response, Zone zone, int flags) { + RRset soa = zone.findExactMatch(zone.getOrigin(), Type.SOA); + addRRset(soa.getName(), response, soa, + Section.AUTHORITY, flags); + } + + /** + * Add the NXT record to the authority + * section of the response. + * + * @param response the response message. + */ + private void addNXT(Message response, int flags) + throws DNSSEC.DNSSECException, IOException { + Record nxtRecord = getNXTRecord( + response.getSectionArray(Section.QUESTION)[0]); + Zone zone = findBestZone(nxtRecord.getName()); + addRecordCommand.exec(zone, nxtRecord); + RRset nxtRR = zone.findExactMatch(nxtRecord.getName(), Type.NXT); + addRRset(nxtRecord.getName(), response, nxtRR, Section.AUTHORITY, flags); + + removeRecordCommand.exec(zone, nxtRecord); + } + + /** + * Return an NXT record required to validate negative responses. If there is + * an issue returning the NXT record, a SOA record will be returned. + * + * @param query the query record. + * @return an NXT record. + */ + private Record getNXTRecord(Record query) { + Record response = null; + SecureableZone zone = (SecureableZone) findBestZone(query.getName()); + if (zone != null) { + response = zone.getNXTRecord(query, zone); + if (response == null) { + response = zone.getSOA(); + } + } + + return response; + } + + /** + * Add the name server info to the authority section. + * + * @param response the response message. + * @param zone the DNS zone. + * @param flags the flags. + */ + private void addNS(Message response, Zone zone, int flags) { + RRset nsRecords = zone.getNS(); + addRRset(nsRecords.getName(), response, nsRecords, + Section.AUTHORITY, flags); + } + + /** + * Add the provided record set to the response section specified. + * + * @param name the name associated with the record set. + * @param response the response message. + * @param rrset the record set. + * @param section the response section to which the record set will be added. + * @param flags the flags. + */ + private void addRRset(Name name, Message response, RRset rrset, int section, + int flags) { + for (int s = 1; s <= section; s++) { + if (response.findRRset(name, rrset.getType(), s)) { + return; + } + } + if ((flags & FLAG_SIGONLY) == 0) { + Iterator it = rrset.rrs(); + while (it.hasNext()) { + Record r = (Record) it.next(); + if (r.getName().isWild() && !name.isWild()) { + r = r.withName(name); + } + response.addRecord(r, section); + } + } + if ((flags & (FLAG_SIGONLY | FLAG_DNSSECOK)) != 0) { + Iterator it = rrset.sigs(); + while (it.hasNext()) { + Record r = (Record) it.next(); + if (r.getName().isWild() && !name.isWild()) { + r = r.withName(name); + } + response.addRecord(r, section); + } + } + } + + /** + * Perform a zone transfer. + * + * @param name the zone name. + * @param query the query. + * @param tsig the query signature. + * @param qtsig the signature record. + * @param s the connection socket. + * @return an error message if there is no matching zone + * or null due to error. + */ + byte[] doAXFR(Name name, Message query, TSIG tsig, TSIGRecord qtsig, + Socket s) { + boolean first = true; + Zone zone = findBestZone(name); + if (zone == null) { + return errorMessage(query, Rcode.REFUSED); + } + Iterator it = zone.AXFR(); + try { + DataOutputStream dataOut; + dataOut = new DataOutputStream(s.getOutputStream()); + int id = query.getHeader().getID(); + while (it.hasNext()) { + RRset rrset = (RRset) it.next(); + Message response = new Message(id); + Header header = response.getHeader(); + header.setFlag(Flags.QR); + header.setFlag(Flags.AA); + addRRset(rrset.getName(), response, rrset, + Section.ANSWER, FLAG_DNSSECOK); + if (tsig != null) { + tsig.applyStream(response, qtsig, first); + qtsig = response.getTSIG(); + } + first = false; + byte[] out = response.toWire(); + dataOut.writeShort(out.length); + dataOut.write(out); + } + } catch (IOException ex) { + System.out.println("AXFR failed"); + } + try { + s.close(); + } catch (IOException ex) { + } + return null; + } + + /** + * Perform the registry operation (register or delete). This method will take + * the provided service record and either add or remove the DNS records + * indicated. + * + * @param path the ZK path for the service record. + * @param record the service record. + * @param command the registry command (REGISTER or DELETE). + * @throws IOException if the is an error performing registry operation. + */ + private void op(String path, ServiceRecord record, RegistryCommand command) + throws IOException { + ServiceRecordProcessor processor; + try { + String yarnPersistanceValue = record.get( + YarnRegistryAttributes.YARN_PERSISTENCE); + if (yarnPersistanceValue != null) { + if (yarnPersistanceValue.equals(CONTAINER)) { + // container registration. the logic to identify and create the + // container entry needs to be enhanced/more accurate and associate + // to correct host + processor = + new ContainerServiceRecordProcessor(record, path, domainName, + this); + } else { + LOG.debug("Creating ApplicationServiceRecordProcessor for {}", + yarnPersistanceValue); + processor = + new ApplicationServiceRecordProcessor(record, path, domainName, + this); + } + processor.manageDNSRecords(command); + } else { + LOG.warn("Yarn Registry record {} does not contain {} attribute ", + record.toString(), YarnRegistryAttributes.YARN_PERSISTENCE); + } + } catch (Exception e) { + throw new IOException(e); + } + + } + + /** + * Return the username found in the ZK path. + * + * @param path the ZK path. + * @return the user name. + */ + private String getUsername(String path) { + String user = "anonymous"; + Matcher matcher = USER_NAME.matcher(path); + if (matcher.find()) { + user = matcher.group(1); + } + return user; + } + + /** + * Register DNS records based on the provided service record. + * + * @param path the ZK path of the service record. + * @param record record providing DNS registration info. + * @throws IOException if registration causes an error. + */ + @Override + public void register(String path, ServiceRecord record) throws IOException { + op(path, record, addRecordCommand); + } + + /** + * Delete the DNS records generated by the provided service record. + * + * @param path the ZK path for the given record. + * @param record the service record + * @throws IOException if deletion causes and error. + */ + @Override + public void delete(String path, ServiceRecord record) throws IOException { + op(path, record, removeRecordCommand); + } + + /** + * An interface representing a registry associated function/command (see + * command pattern). + */ + interface RegistryCommand { + void exec(Zone zone, Record record) throws IOException; + + String getLogDescription(); + } + + /** + * The "add record" command. + */ + private final RegistryCommand addRecordCommand = new RegistryCommand() { + @Override + public void exec(Zone zone, Record record) throws IOException { + if (zone != null) { + try (CloseableLock lock = writeLock.lock()) { + zone.addRecord(record); + LOG.info("Registered {}", record); + if (isDNSSECEnabled()) { + Calendar cal = Calendar.getInstance(); + Date inception = cal.getTime(); + cal.add(Calendar.YEAR, 1); + Date expiration = cal.getTime(); + RRset rRset = + zone.findExactMatch(record.getName(), record.getType()); + try { + DNSKEYRecord dnskeyRecord = dnsKeyRecs.get(zone.getOrigin()); + RRSIGRecord rrsigRecord = + DNSSEC.sign(rRset, dnskeyRecord, privateKey, + inception, expiration); + LOG.info("Adding {}", rrsigRecord); + rRset.addRR(rrsigRecord); + + //addDSRecord(zone, record.getName(), record.getDClass(), + // record.getTTL(), inception, expiration); + + } catch (DNSSEC.DNSSECException e) { + throw new IOException(e); + } + } + } + } else { + LOG.warn("Unable to find zone matching record {}", record); + } + } + + /** + * Add a DS record associated with the input name. + * @param zone the zone. + * @param name the record name. + * @param dClass the DNS class. + * @param dsTtl the ttl value. + * @param inception the time of inception of the record. + * @param expiration the expiry time of the record. + * @throws DNSSEC.DNSSECException if the addition of DS record fails. + */ + private void addDSRecord(Zone zone, + Name name, int dClass, long dsTtl, + Date inception, + Date expiration) throws DNSSEC.DNSSECException { + RRset rRset; + RRSIGRecord rrsigRecord; + + DNSKEYRecord dnskeyRecord = dnsKeyRecs.get(zone.getOrigin()); + DSRecord dsRecord = new DSRecord(name, dClass, + dsTtl, DSRecord.Digest.SHA1, + dnskeyRecord); + zone.addRecord(dsRecord); + LOG.info("Adding {}", dsRecord); + rRset = zone.findExactMatch(dsRecord.getName(), dsRecord.getType()); + + rrsigRecord = DNSSEC.sign(rRset, dnskeyRecord, privateKey, + inception, expiration); + rRset.addRR(rrsigRecord); + } + + @Override + public String getLogDescription() { + return "Registering "; + } + }; + + /** + * The "remove record" command. + */ + private final RegistryCommand removeRecordCommand = new RegistryCommand() { + @Override + public void exec(Zone zone, Record record) throws IOException { + if (zone == null) { + LOG.error("Unable to remove record because zone is null: {}", record); + return; + } + zone.removeRecord(record); + LOG.info("Removed {}", record); + if (isDNSSECEnabled()) { + RRset rRset = zone.findExactMatch(record.getName(), Type.DS); + if (rRset != null) { + zone.removeRecord(rRset.first()); + } + } + } + + @Override + public String getLogDescription() { + return "Deleting "; + } + }; + + /** + * An implementation allowing for obtaining and releasing a lock. + */ + public static class CloseableLock implements AutoCloseable { + private Lock lock; + + public CloseableLock(Lock lock) { + this.lock = lock; + } + + public CloseableLock lock() { + lock.lock(); + return this; + } + + @Override + public void close() { + lock.unlock(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNSServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNSServer.java new file mode 100644 index 0000000..c7f6831 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNSServer.java @@ -0,0 +1,267 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.registry.client.api.DNSOperationsFactory; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.registry.client.binding.RegistryPathUtils; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.registry.client.impl.zk.PathListener; +import org.apache.hadoop.registry.client.impl.zk.RegistryOperationsService; +import org.apache.hadoop.registry.client.types.RegistryPathStatus; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.ShutdownHookManager; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * A server/service that starts and manages the lifecycle of a DNS registry + * instance. + */ +public class RegistryDNSServer extends CompositeService { + + + public static final int SHUTDOWN_HOOK_PRIORITY = 30; + private RegistryDNS registryDNS; + private RegistryOperationsService registryOperations; + private static final Logger LOG = + LoggerFactory.getLogger(RegistryDNS.class); + private ConcurrentMap pathToRecordMap; + + /** + * Creates the DNS server. + * @param name the server name. + * @param registryDNS the registry DNS instance. + */ + public RegistryDNSServer(String name, final RegistryDNS registryDNS) { + super(name); + this.registryDNS = registryDNS; + } + + /** + * Initializes the DNS server. + * @param conf the hadoop configuration instance. + * @throws Exception if service initialization fails. + */ + @Override + protected void serviceInit(Configuration conf) throws Exception { + + pathToRecordMap = new ConcurrentHashMap<>(); + + registryOperations = new RegistryOperationsService("RegistryDNSOperations"); + addService(registryOperations); + + if (registryDNS == null) { + registryDNS = (RegistryDNS) DNSOperationsFactory.createInstance(conf); + } + addService(registryDNS); + + super.serviceInit(conf); + } + + /** + * Starts the server. + * @throws Exception if service start fails. + */ + @Override + protected void serviceStart() throws Exception { + super.serviceStart(); + manageRegistryDNS(); + } + + /** + * Performs operations required to setup the DNS registry instance (e.g. sets + * up a path listener to react to service record creation/deletion and invoke + * the appropriate registry method). + */ + private void manageRegistryDNS() { + + try { + registryOperations.monitorRegistryEntries(); + registryOperations.registerPathListener(new PathListener() { + private String registryRoot = getConfig(). + get(RegistryConstants.KEY_REGISTRY_ZK_ROOT, + RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT); + + @Override + public void nodeAdded(String path) throws IOException { + // get a listing of service records + String relativePath = getPathRelativeToRegistryRoot(path); + String child = RegistryPathUtils.lastPathEntry(path); + Map map = new HashMap<>(); + map.put(child, registryOperations.stat(relativePath)); + Map records = + RegistryUtils.extractServiceRecords(registryOperations, + getAdjustedParentPath(path), + map); + processServiceRecords(records, register); + pathToRecordMap.putAll(records); + } + + private String getAdjustedParentPath(String path) { + Preconditions.checkNotNull(path); + String adjustedPath = null; + adjustedPath = getPathRelativeToRegistryRoot(path); + try { + return RegistryPathUtils.parentOf(adjustedPath); + } catch (PathNotFoundException e) { + // attempt to use passed in path + return path; + } + } + + private String getPathRelativeToRegistryRoot(String path) { + String adjustedPath; + if (path.equals(registryRoot)) { + adjustedPath = "/"; + } else { + adjustedPath = path.substring(registryRoot.length()); + } + return adjustedPath; + } + + @Override + public void nodeRemoved(String path) throws IOException { + ServiceRecord record = pathToRecordMap.remove(path.substring( + registryRoot.length())); + processServiceRecord(path, record, delete); + } + + }); + + // create listener for record deletions + + } catch (Exception e) { + LOG.warn("Unable to monitor the registry. DNS support disabled.", e); + } + } + + /** + * A registry management command interface. + */ + interface ManagementCommand { + void exec(String path, ServiceRecord record) throws IOException; + } + + /** + * Performs registry service record registration. + */ + private final ManagementCommand register = new ManagementCommand() { + @Override + public void exec(String path, ServiceRecord record) throws IOException { + if (record != null) { + LOG.info("Registering DNS records for {}", path); + registryDNS.register(path, record); + } + } + }; + + /** + * Performs registry service record deletion. + */ + private ManagementCommand delete = new ManagementCommand() { + @Override + public void exec(String path, ServiceRecord record) throws IOException { + if (record != null) { + LOG.info("Deleting DNS records for {}", path); + registryDNS.delete(path, record); + } + } + }; + + /** + * iterates thru the supplied service records, executing the provided registry + * command. + * @param records the service records. + * @param command the registry command. + * @throws IOException + */ + private void processServiceRecords(Map records, + ManagementCommand command) + throws IOException { + for (Map.Entry entry : records.entrySet()) { + processServiceRecord(entry.getKey(), entry.getValue(), command); + } + } + + /** + * Process the service record, parsing the information and creating the + * required DNS records. + * @param path the service record path. + * @param record the record. + * @param command the registry command to execute. + * @throws IOException + */ + private void processServiceRecord(String path, ServiceRecord record, + ManagementCommand command) + throws IOException { + command.exec(path, record); + } + + /** + * Launch the server. + * @param conf configuration + * @param rdns registry dns instance + * @return + */ + static RegistryDNSServer launchDNSServer(Configuration conf, + RegistryDNS rdns) { + RegistryDNSServer dnsServer = null; + + Thread + .setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); + try { + dnsServer = new RegistryDNSServer("RegistryDNSServer", rdns); + ShutdownHookManager.get().addShutdownHook( + new CompositeService.CompositeServiceShutdownHook(dnsServer), + SHUTDOWN_HOOK_PRIORITY); + dnsServer.init(conf); + dnsServer.start(); + } catch (Throwable t) { + LOG.error("Error starting Registry DNS Server", t); + ExitUtil.terminate(-1, "Error starting Registry DNS Server"); + } + return dnsServer; + } + + /** + * Lanches the server instance. + * @param args the command line args. + * @throws IOException if command line options can't be parsed + */ + public static void main(String[] args) throws IOException { + StringUtils.startupShutdownMessage(RegistryDNSServer.class, args, LOG); + YarnConfiguration conf = new YarnConfiguration(); + new GenericOptionsParser(conf, args); + launchDNSServer(conf, null); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ReverseZoneUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ReverseZoneUtils.java new file mode 100644 index 0000000..cb04f9e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ReverseZoneUtils.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import com.google.common.annotations.VisibleForTesting; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.net.util.SubnetUtils; +import org.apache.hadoop.conf.Configuration; +import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_DNS_SPLIT_REVERSE_ZONE_RANGE; +import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_DNS_ZONE_MASK; +import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_DNS_ZONE_SUBNET; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utilities for configuring reverse zones. + */ +public final class ReverseZoneUtils { + + private static final Logger LOG = + LoggerFactory.getLogger(ReverseZoneUtils.class); + + private static final long POW3 = (long) Math.pow(256, 3); + private static final long POW2 = (long) Math.pow(256, 2); + private static final long POW1 = (long) Math.pow(256, 1); + + private ReverseZoneUtils() { + } + + /** + * Given a baseIp, range and index, return the network address for the + * reverse zone. + * + * @param baseIp base ip address to perform calculations against. + * @param range number of ip addresses per subnet. + * @param index the index of the subnet to calculate. + * @return the calculated ip address. + * @throws UnknownHostException if an invalid ip is provided. + */ + protected static String getReverseZoneNetworkAddress(String baseIp, int range, + int index) throws UnknownHostException { + if (index < 0) { + throw new IllegalArgumentException( + String.format("Invalid index provided, must be positive: %d", index)); + } + if (range < 0) { + throw new IllegalArgumentException( + String.format("Invalid range provided, cannot be negative: %d", + range)); + } + return calculateIp(baseIp, range, index); + } + + /** + * When splitting the reverse zone, return the number of subnets needed, + * given the range and netmask. + * + * @param conf the Hadoop configuration. + * @return The number of subnets given the range and netmask. + */ + protected static int getSubnetCountForReverseZones(Configuration conf) { + String subnet = conf.get(KEY_DNS_ZONE_SUBNET); + String mask = conf.get(KEY_DNS_ZONE_MASK); + String range = conf.get(KEY_DNS_SPLIT_REVERSE_ZONE_RANGE); + + int parsedRange; + try { + parsedRange = Integer.parseInt(range); + } catch (NumberFormatException e) { + LOG.error("The supplied range is not a valid integer: Supplied range: ", + range); + throw e; + } + if (parsedRange < 0) { + String msg = String + .format("Range cannot be negative: Supplied range: %d", parsedRange); + LOG.error(msg); + throw new IllegalArgumentException(msg); + } + + int ipCount; + try { + SubnetUtils subnetUtils = new SubnetUtils(subnet, mask); + subnetUtils.setInclusiveHostCount(true); + ipCount = subnetUtils.getInfo().getAddressCount(); + + } catch (IllegalArgumentException e) { + LOG.error("The subnet or mask is invalid: Subnet: {} Mask: {}", subnet, + mask); + throw e; + } + + if (parsedRange == 0) { + return ipCount; + } + return ipCount / parsedRange; + } + + private static String calculateIp(String baseIp, int range, int index) + throws UnknownHostException { + long[] ipParts = splitIp(baseIp); + + long ipNum1 = POW3 * ipParts[0]; + long ipNum2 = POW2 * ipParts[1]; + long ipNum3 = POW1 * ipParts[2]; + long ipNum4 = ipParts[3]; + long ipNum = ipNum1 + ipNum2 + ipNum3 + ipNum4; + + ArrayList ipPartsOut = new ArrayList<>(); + // First octet + long temp = ipNum + range * (long) index; + ipPartsOut.add(0, temp / POW3); + + // Second octet + temp = temp - ipPartsOut.get(0) * POW3; + ipPartsOut.add(1, temp / POW2); + + // Third octet + temp = temp - ipPartsOut.get(1) * POW2; + ipPartsOut.add(2, temp / POW1); + + // Fourth octet + temp = temp - ipPartsOut.get(2) * POW1; + ipPartsOut.add(3, temp); + + return StringUtils.join(ipPartsOut, '.'); + } + + @VisibleForTesting + protected static long[] splitIp(String baseIp) throws UnknownHostException { + InetAddress inetAddress; + try { + inetAddress = InetAddress.getByName(baseIp); + } catch (UnknownHostException e) { + LOG.error("Base IP address is invalid"); + throw e; + } + if (inetAddress instanceof Inet6Address) { + throw new IllegalArgumentException( + "IPv6 is not yet supported for " + "reverse zones"); + } + byte[] octets = inetAddress.getAddress(); + if (octets.length != 4) { + throw new IllegalArgumentException("Base IP address is invalid"); + } + long[] results = new long[4]; + for (int i = 0; i < octets.length; i++) { + results[i] = octets[i] & 0xff; + } + return results; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/SecureableZone.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/SecureableZone.java new file mode 100644 index 0000000..4b0a852 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/SecureableZone.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import org.xbill.DNS.DClass; +import org.xbill.DNS.NXTRecord; +import org.xbill.DNS.Name; +import org.xbill.DNS.RRset; +import org.xbill.DNS.Record; +import org.xbill.DNS.SetResponse; +import org.xbill.DNS.Type; +import org.xbill.DNS.Zone; +import org.xbill.DNS.ZoneTransferException; +import org.xbill.DNS.ZoneTransferIn; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; + +/** + * A zone implementation geared to support some DNSSEC functionality. + */ +public class SecureableZone extends Zone { + private List records; + + /** + * Creates a Zone by doing the specified zone transfer. + * @param xfrin The incoming zone transfer to execute. + * @throws IOException if there is an error. + * @throws ZoneTransferException if there is an error. + */ + public SecureableZone(ZoneTransferIn xfrin) + throws IOException, ZoneTransferException { + super(xfrin); + } + + /** + * Creates a Zone by performing a zone transfer to the specified host. + * @param zone zone name. + * @param dclass the dclass + * @param remote the remote host. + * @throws IOException if there is an error. + * @throws ZoneTransferException if there is an error. + */ + public SecureableZone(Name zone, int dclass, String remote) + throws IOException, ZoneTransferException { + super(zone, dclass, remote); + } + + /** + * Creates a Zone from the records in the specified master file. + * @param zone The name of the zone. + * @param file The master file to read from. + * @throws IOException if there is an error. + */ + public SecureableZone(Name zone, String file) throws IOException { + super(zone, file); + } + + /** + * Creates a Zone from an array of records. + * @param zone The name of the zone. + * @param records The records to add to the zone. + * @throws IOException if there is an error. + */ + public SecureableZone(Name zone, Record[] records) + throws IOException { + super(zone, records); + } + + /** + * Adds a Record to the Zone. + * @param r The record to be added + * @see Record + */ + @Override public void addRecord(Record r) { + if (records == null) { + records = new ArrayList(); + } + super.addRecord(r); + records.add(r); + } + + /** + * Removes a record from the Zone. + * @param r The record to be removed + * @see Record + */ + @Override public void removeRecord(Record r) { + if (records == null) { + records = new ArrayList(); + } + super.removeRecord(r); + records.remove(r); + } + + /** + * Return a NXT record appropriate for the query. + * @param queryRecord the query record. + * @param zone the zone to search. + * @return the NXT record describing the insertion point. + */ + @SuppressWarnings({"unchecked"}) + public Record getNXTRecord(Record queryRecord, Zone zone) { + Collections.sort(records); + + int index = Collections.binarySearch(records, queryRecord, + new Comparator() { + @Override public int compare(Record r1, Record r2) { + return r1.compareTo(r2); + } + }); + if (index >= 0) { + return null; + } + index = -index - 1; + if (index >= records.size()) { + index = records.size() - 1; + } + Record base = records.get(index); + SetResponse sr = zone.findRecords(base.getName(), Type.ANY); + BitSet bitMap = new BitSet(); + bitMap.set(Type.NXT); + RRset[] rRsets = sr.answers(); + for (RRset rRset : rRsets) { + int typeCode = rRset.getType(); + if (typeCode > 0 && typeCode < 128) { + bitMap.set(typeCode); + } + } + return new NXTRecord(base.getName(), DClass.IN, zone.getSOA().getMinimum(), + queryRecord.getName(), bitMap); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ServiceRecordProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ServiceRecordProcessor.java new file mode 100644 index 0000000..b67cc7d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ServiceRecordProcessor.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import org.apache.hadoop.registry.client.types.ServiceRecord; + +import java.io.IOException; + +/** + * Manage the processing of service records in order to create DNS records. + */ +public interface ServiceRecordProcessor { + /** + * Initialize the mapping between DNS record type and record information + * for the given service record. + * @param serviceRecord the registry service record. + * @throws Exception if encountering an error. + */ + void initTypeToInfoMapping(ServiceRecord serviceRecord) + throws Exception; + + /** + * Return the DNS record types valid for this processor. + * @return the array of DNS record types. + */ + int[] getRecordTypes(); + + /** + * Manage the creation and registration of DNS records generated by parsing + * a service record. + * @param command the DNS registration command object (e.g. add_record, + * remove record) + * @throws IOException if the creation or registration generates an issue. + */ + void manageDNSRecords(RegistryDNS.RegistryCommand command) + throws IOException; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ZoneSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ZoneSelector.java new file mode 100644 index 0000000..5043b85 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/ZoneSelector.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import org.xbill.DNS.Name; +import org.xbill.DNS.Zone; + +/** + * A selector that returns the zone associated with a provided name. + */ +public interface ZoneSelector { + /** + * Finds the best matching zone given the provided name. + * @param name the record name for which a zone is requested. + * @return the matching zone. + */ + Zone findBestZone(Name name); +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/package-info.java new file mode 100644 index 0000000..00d8c9db --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/package-info.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * DNS Server classes. + *

+ * These classes are leveraged to create a DNS server that can provide the + * facilities necessary for YARN application and/or service discovery. + *

+ */ +package org.apache.hadoop.registry.server.dns; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java new file mode 100644 index 0000000..7c78161 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestRegistryDNS.java @@ -0,0 +1,627 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import org.apache.commons.net.util.Base64; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.xbill.DNS.AAAARecord; +import org.xbill.DNS.ARecord; +import org.xbill.DNS.CNAMERecord; +import org.xbill.DNS.DClass; +import org.xbill.DNS.DNSKEYRecord; +import org.xbill.DNS.DNSSEC; +import org.xbill.DNS.Flags; +import org.xbill.DNS.Message; +import org.xbill.DNS.Name; +import org.xbill.DNS.OPTRecord; +import org.xbill.DNS.PTRRecord; +import org.xbill.DNS.RRSIGRecord; +import org.xbill.DNS.RRset; +import org.xbill.DNS.Rcode; +import org.xbill.DNS.Record; +import org.xbill.DNS.SRVRecord; +import org.xbill.DNS.Section; +import org.xbill.DNS.Type; + +import java.io.IOException; +import java.math.BigInteger; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.security.KeyFactory; +import java.security.PrivateKey; +import java.security.spec.RSAPrivateKeySpec; +import java.util.Calendar; +import java.util.Date; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.registry.client.api.RegistryConstants.*; + +/** + * + */ +public class TestRegistryDNS extends Assert { + + private RegistryDNS registryDNS; + private RegistryUtils.ServiceRecordMarshal marshal; + + private static final String APPLICATION_RECORD = "{\n" + + " \"type\" : \"JSONServiceRecord\",\n" + + " \"description\" : \"Slider Application Master\",\n" + + " \"external\" : [ {\n" + + " \"api\" : \"classpath:org.apache.hadoop.yarn.service.appmaster.ipc" + + "\",\n" + + " \"addressType\" : \"host/port\",\n" + + " \"protocolType\" : \"hadoop/IPC\",\n" + + " \"addresses\" : [ {\n" + + " \"host\" : \"192.168.1.5\",\n" + + " \"port\" : \"1026\"\n" + + " } ]\n" + + " }, {\n" + + " \"api\" : \"http://\",\n" + + " \"addressType\" : \"uri\",\n" + + " \"protocolType\" : \"webui\",\n" + + " \"addresses\" : [ {\n" + + " \"uri\" : \"http://192.168.1.5:1027\"\n" + + " } ]\n" + + " }, {\n" + + " \"api\" : \"classpath:org.apache.hadoop.yarn.service.management\"" + + ",\n" + + " \"addressType\" : \"uri\",\n" + + " \"protocolType\" : \"REST\",\n" + + " \"addresses\" : [ {\n" + + " \"uri\" : \"http://192.168.1.5:1027/ws/v1/slider/mgmt\"\n" + + " } ]\n" + + " } ],\n" + + " \"internal\" : [ {\n" + + " \"api\" : \"classpath:org.apache.hadoop.yarn.service.agents.secure" + + "\",\n" + + " \"addressType\" : \"uri\",\n" + + " \"protocolType\" : \"REST\",\n" + + " \"addresses\" : [ {\n" + + " \"uri\" : \"https://192.168.1.5:47700/ws/v1/slider/agents\"\n" + + " } ]\n" + + " }, {\n" + + " \"api\" : \"classpath:org.apache.hadoop.yarn.service.agents.oneway" + + "\",\n" + + " \"addressType\" : \"uri\",\n" + + " \"protocolType\" : \"REST\",\n" + + " \"addresses\" : [ {\n" + + " \"uri\" : \"https://192.168.1.5:35531/ws/v1/slider/agents\"\n" + + " } ]\n" + + " } ],\n" + + " \"yarn:id\" : \"application_1451931954322_0016\",\n" + + " \"yarn:persistence\" : \"application\"\n" + + "}\n"; + static final String CONTAINER_RECORD = "{\n" + + " \"type\" : \"JSONServiceRecord\",\n" + + " \"description\" : \"COMP-NAME\",\n" + + " \"external\" : [ ],\n" + + " \"internal\" : [ ],\n" + + " \"yarn:id\" : \"container_e50_1451931954322_0016_01_000002\",\n" + + " \"yarn:persistence\" : \"container\",\n" + + " \"yarn:ip\" : \"172.17.0.19\",\n" + + " \"yarn:hostname\" : \"0a134d6329ba\"\n" + + "}\n"; + + private static final String CONTAINER_RECORD_NO_IP = "{\n" + + " \"type\" : \"JSONServiceRecord\",\n" + + " \"description\" : \"COMP-NAME\",\n" + + " \"external\" : [ ],\n" + + " \"internal\" : [ ],\n" + + " \"yarn:id\" : \"container_e50_1451931954322_0016_01_000002\",\n" + + " \"yarn:persistence\" : \"container\"\n" + + "}\n"; + + private static final String CONTAINER_RECORD_YARN_PERSISTANCE_ABSENT = "{\n" + + " \"type\" : \"JSONServiceRecord\",\n" + + " \"description\" : \"COMP-NAME\",\n" + + " \"external\" : [ ],\n" + + " \"internal\" : [ ],\n" + + " \"yarn:id\" : \"container_e50_1451931954322_0016_01_000003\",\n" + + " \"yarn:ip\" : \"172.17.0.19\",\n" + + " \"yarn:hostname\" : \"0a134d6329bb\"\n" + + "}\n"; + + @Before + public void initialize() throws Exception { + setRegistryDNS(new RegistryDNS("TestRegistry")); + Configuration conf = createConfiguration(); + + getRegistryDNS().setDomainName(conf); + getRegistryDNS().initializeZones(conf); + + setMarshal(new RegistryUtils.ServiceRecordMarshal()); + } + + protected Configuration createConfiguration() { + Configuration conf = new Configuration(); + conf.set(RegistryConstants.KEY_DNS_DOMAIN, "hwx.test"); + conf.set(RegistryConstants.KEY_DNS_ZONE_SUBNET, "172.17.0"); + conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS); + return conf; + } + + protected boolean isSecure() { + return false; + } + + @After + public void closeRegistry() throws Exception { + getRegistryDNS().stopExecutor(); + } + + @Test + public void testAppRegistration() throws Exception { + ServiceRecord record = getMarshal().fromBytes("somepath", + APPLICATION_RECORD.getBytes()); + getRegistryDNS().register( + "/registry/users/root/services/org-apache-slider/test1/", record); + + // start assessing whether correct records are available + Record[] recs = assertDNSQuery("test1.root.hwx.test."); + assertEquals("wrong result", "192.168.1.5", + ((ARecord) recs[0]).getAddress().getHostAddress()); + + recs = assertDNSQuery("management-api.test1.root.hwx.test.", 2); + assertEquals("wrong target name", "test1.root.hwx.test.", + ((CNAMERecord) recs[0]).getTarget().toString()); + assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord); + + recs = assertDNSQuery("appmaster-ipc-api.test1.root.hwx.test.", + Type.SRV, 1); + assertTrue("not an SRV record", recs[0] instanceof SRVRecord); + assertEquals("wrong port", 1026, ((SRVRecord) recs[0]).getPort()); + + recs = assertDNSQuery("appmaster-ipc-api.test1.root.hwx.test.", 2); + assertEquals("wrong target name", "test1.root.hwx.test.", + ((CNAMERecord) recs[0]).getTarget().toString()); + assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord); + + recs = assertDNSQuery("http-api.test1.root.hwx.test.", 2); + assertEquals("wrong target name", "test1.root.hwx.test.", + ((CNAMERecord) recs[0]).getTarget().toString()); + assertTrue("not an ARecord", recs[isSecure() ? 2 : 1] instanceof ARecord); + + recs = assertDNSQuery("http-api.test1.root.hwx.test.", Type.SRV, + 1); + assertTrue("not an SRV record", recs[0] instanceof SRVRecord); + assertEquals("wrong port", 1027, ((SRVRecord) recs[0]).getPort()); + + assertDNSQuery("test1.root.hwx.test.", Type.TXT, 3); + assertDNSQuery("appmaster-ipc-api.test1.root.hwx.test.", Type.TXT, 1); + assertDNSQuery("http-api.test1.root.hwx.test.", Type.TXT, 1); + assertDNSQuery("management-api.test1.root.hwx.test.", Type.TXT, 1); + } + + @Test + public void testContainerRegistration() throws Exception { + ServiceRecord record = getMarshal().fromBytes("somepath", + CONTAINER_RECORD.getBytes()); + getRegistryDNS().register( + "/registry/users/root/services/org-apache-slider/test1/components/" + + "ctr-e50-1451931954322-0016-01-000002", + record); + + // start assessing whether correct records are available + Record[] recs = + assertDNSQuery("ctr-e50-1451931954322-0016-01-000002.hwx.test."); + assertEquals("wrong result", "172.17.0.19", + ((ARecord) recs[0]).getAddress().getHostAddress()); + + recs = assertDNSQuery("comp-name.test1.root.hwx.test.", 1); + assertTrue("not an ARecord", recs[0] instanceof ARecord); + } + + @Test + public void testContainerRegistrationPersistanceAbsent() throws Exception { + ServiceRecord record = marshal.fromBytes("somepath", + CONTAINER_RECORD_YARN_PERSISTANCE_ABSENT.getBytes()); + registryDNS.register( + "/registry/users/root/services/org-apache-slider/test1/components/" + + "ctr-e50-1451931954322-0016-01-000003", + record); + + Name name = + Name.fromString("ctr-e50-1451931954322-0016-01-000002.hwx.test."); + Record question = Record.newRecord(name, Type.A, DClass.IN); + Message query = Message.newQuery(question); + byte[] responseBytes = registryDNS.generateReply(query, null); + Message response = new Message(responseBytes); + assertEquals("Excepting NXDOMAIN as Record must not have regsisterd wrong", + Rcode.NXDOMAIN, response.getRcode()); + } + + @Test + public void testRecordTTL() throws Exception { + ServiceRecord record = getMarshal().fromBytes("somepath", + CONTAINER_RECORD.getBytes()); + getRegistryDNS().register( + "/registry/users/root/services/org-apache-slider/test1/components/" + + "ctr-e50-1451931954322-0016-01-000002", + record); + + // start assessing whether correct records are available + Record[] recs = assertDNSQuery( + "ctr-e50-1451931954322-0016-01-000002.hwx.test."); + assertEquals("wrong result", "172.17.0.19", + ((ARecord) recs[0]).getAddress().getHostAddress()); + assertEquals("wrong ttl", 30L, recs[0].getTTL()); + + recs = assertDNSQuery("comp-name.test1.root.hwx.test.", 1); + assertTrue("not an ARecord", recs[0] instanceof ARecord); + + assertEquals("wrong ttl", 30L, recs[0].getTTL()); + } + + @Test + public void testReverseLookup() throws Exception { + ServiceRecord record = getMarshal().fromBytes("somepath", + CONTAINER_RECORD.getBytes()); + getRegistryDNS().register( + "/registry/users/root/services/org-apache-slider/test1/components/" + + "ctr-e50-1451931954322-0016-01-000002", + record); + + // start assessing whether correct records are available + Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1); + assertEquals("wrong result", + "comp-name.test1.root.hwx.test.", + ((PTRRecord) recs[0]).getTarget().toString()); + } + + @Test + public void testReverseLookupInLargeNetwork() throws Exception { + setRegistryDNS(new RegistryDNS("TestRegistry")); + Configuration conf = createConfiguration(); + conf.set(RegistryConstants.KEY_DNS_DOMAIN, "hwx.test"); + conf.set(KEY_DNS_ZONE_SUBNET, "172.17.0.0"); + conf.set(KEY_DNS_ZONE_MASK, "255.255.224.0"); + conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS); + + getRegistryDNS().setDomainName(conf); + getRegistryDNS().initializeZones(conf); + + ServiceRecord record = getMarshal().fromBytes("somepath", + CONTAINER_RECORD.getBytes()); + getRegistryDNS().register( + "/registry/users/root/services/org-apache-slider/test1/components/" + + "ctr-e50-1451931954322-0016-01-000002", + record); + + // start assessing whether correct records are available + Record[] recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1); + assertEquals("wrong result", + "comp-name.test1.root.hwx.test.", + ((PTRRecord) recs[0]).getTarget().toString()); + } + + @Test + public void testMissingReverseLookup() throws Exception { + ServiceRecord record = getMarshal().fromBytes("somepath", + CONTAINER_RECORD.getBytes()); + getRegistryDNS().register( + "/registry/users/root/services/org-apache-slider/test1/components/" + + "ctr-e50-1451931954322-0016-01-000002", + record); + + // start assessing whether correct records are available + Name name = Name.fromString("19.1.17.172.in-addr.arpa."); + Record question = Record.newRecord(name, Type.PTR, DClass.IN); + Message query = Message.newQuery(question); + OPTRecord optRecord = new OPTRecord(4096, 0, 0, Flags.DO, null); + query.addRecord(optRecord, Section.ADDITIONAL); + byte[] responseBytes = getRegistryDNS().generateReply(query, null); + Message response = new Message(responseBytes); + assertEquals("Missing record should be: ", Rcode.NXDOMAIN, + response.getRcode()); + } + + @Test + public void testNoContainerIP() throws Exception { + ServiceRecord record = getMarshal().fromBytes("somepath", + CONTAINER_RECORD_NO_IP.getBytes()); + getRegistryDNS().register( + "/registry/users/root/services/org-apache-slider/test1/components/" + + "ctr-e50-1451931954322-0016-01-000002", + record); + + // start assessing whether correct records are available + Name name = + Name.fromString("ctr-e50-1451931954322-0016-01-000002.hwx.test."); + Record question = Record.newRecord(name, Type.A, DClass.IN); + Message query = Message.newQuery(question); + + byte[] responseBytes = getRegistryDNS().generateReply(query, null); + Message response = new Message(responseBytes); + assertEquals("wrong status", Rcode.NXDOMAIN, response.getRcode()); + } + + private Record[] assertDNSQuery(String lookup) throws IOException { + return assertDNSQuery(lookup, Type.A, 1); + } + + private Record[] assertDNSQuery(String lookup, int numRecs) + throws IOException { + return assertDNSQuery(lookup, Type.A, numRecs); + } + + Record[] assertDNSQuery(String lookup, int type, int numRecs) + throws IOException { + Name name = Name.fromString(lookup); + Record question = Record.newRecord(name, type, DClass.IN); + Message query = Message.newQuery(question); + OPTRecord optRecord = new OPTRecord(4096, 0, 0, Flags.DO, null); + query.addRecord(optRecord, Section.ADDITIONAL); + byte[] responseBytes = getRegistryDNS().generateReply(query, null); + Message response = new Message(responseBytes); + assertEquals("not successful", Rcode.NOERROR, response.getRcode()); + assertNotNull("Null response", response); + assertEquals("Questions do not match", query.getQuestion(), + response.getQuestion()); + Record[] recs = response.getSectionArray(Section.ANSWER); + assertEquals("wrong number of answer records", + isSecure() ? numRecs * 2 : numRecs, recs.length); + if (isSecure()) { + boolean signed = false; + for (Record record : recs) { + signed = record.getType() == Type.RRSIG; + if (signed) { + break; + } + } + assertTrue("No signatures found", signed); + } + return recs; + } + + @Test + public void testDNSKEYRecord() throws Exception { + String publicK = + "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD " + + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ " + + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q " + + "l9Ozs5bV"; + // byte[] publicBytes = Base64.decodeBase64(publicK); + // X509EncodedKeySpec keySpec = new X509EncodedKeySpec(publicBytes); + // KeyFactory keyFactory = KeyFactory.getInstance("RSA"); + // PublicKey pubKey = keyFactory.generatePublic(keySpec); + DNSKEYRecord dnskeyRecord = + new DNSKEYRecord(Name.fromString("hwxstg.site."), DClass.IN, 0, + DNSKEYRecord.Flags.ZONE_KEY, + DNSKEYRecord.Protocol.DNSSEC, + DNSSEC.Algorithm.RSASHA256, + Base64.decodeBase64(publicK.getBytes())); + assertNotNull(dnskeyRecord); + RSAPrivateKeySpec privateSpec = new RSAPrivateKeySpec(new BigInteger(1, + Base64.decodeBase64( + "7Ul6/QDPWSGVAK9/Se53X8I0dDDA8S7wE1yFm2F0PEo9Wfb3KsMIegBaPCIaw5LDd" + + "LMg+trBJsfPImyOfSgsGEasfpB50UafJ2jGM2zDeb9IKY6NH9rssYEAwMUq" + + "oWKiLiA7K43rqy8F5j7/m7Dvb7R6L0BDbSCp/qqX07OzltU=")), + new BigInteger(1, Base64.decodeBase64( + "MgbQ6DBYhskeufNGGdct0cGG/4wb0X183ggenwCv2dopDyOTPq+5xMb4Pz9Ndzgk/" + + "yCY7mpaWIu9rttGOzrR+LBRR30VobPpMK1bMnzu2C0x08oYAguVwZB79DLC" + + "705qmZpiaaFB+LnhG7VtpPiOBm3UzZxdrBfeq/qaKrXid60="))); + KeyFactory factory = KeyFactory.getInstance("RSA"); + PrivateKey priv = factory.generatePrivate(privateSpec); + + ARecord aRecord = new ARecord(Name.fromString("some.test."), DClass.IN, 0, + InetAddress.getByName("192.168.0.1")); + Calendar cal = Calendar.getInstance(); + Date inception = cal.getTime(); + cal.add(Calendar.YEAR, 1); + Date expiration = cal.getTime(); + RRset rrset = new RRset(aRecord); + RRSIGRecord rrsigRecord = DNSSEC.sign(rrset, + dnskeyRecord, + priv, + inception, + expiration); + DNSSEC.verify(rrset, rrsigRecord, dnskeyRecord); + + } + + @Test + public void testIpv4toIpv6() throws Exception { + InetAddress address = + BaseServiceRecordProcessor + .getIpv6Address(InetAddress.getByName("172.17.0.19")); + assertTrue("not an ipv6 address", address instanceof Inet6Address); + assertEquals("wrong IP", "172.17.0.19", + InetAddress.getByAddress(address.getAddress()).getHostAddress()); + } + + @Test + public void testAAAALookup() throws Exception { + ServiceRecord record = getMarshal().fromBytes("somepath", + CONTAINER_RECORD.getBytes()); + getRegistryDNS().register( + "/registry/users/root/services/org-apache-slider/test1/components/" + + "ctr-e50-1451931954322-0016-01-000002", + record); + + // start assessing whether correct records are available + Record[] recs = assertDNSQuery( + "ctr-e50-1451931954322-0016-01-000002.hwx.test.", Type.AAAA, 1); + assertEquals("wrong result", "172.17.0.19", + ((AAAARecord) recs[0]).getAddress().getHostAddress()); + + recs = assertDNSQuery("comp-name.test1.root.hwx.test.", Type.AAAA, 1); + assertTrue("not an ARecord", recs[0] instanceof AAAARecord); + } + + @Test + public void testNegativeLookup() throws Exception { + ServiceRecord record = getMarshal().fromBytes("somepath", + CONTAINER_RECORD.getBytes()); + getRegistryDNS().register( + "/registry/users/root/services/org-apache-slider/test1/components/" + + "ctr-e50-1451931954322-0016-01-000002", + record); + + // start assessing whether correct records are available + Name name = Name.fromString("missing.hwx.test."); + Record question = Record.newRecord(name, Type.A, DClass.IN); + Message query = Message.newQuery(question); + + byte[] responseBytes = getRegistryDNS().generateReply(query, null); + Message response = new Message(responseBytes); + assertEquals("not successful", Rcode.NXDOMAIN, response.getRcode()); + assertNotNull("Null response", response); + assertEquals("Questions do not match", query.getQuestion(), + response.getQuestion()); + Record[] sectionArray = response.getSectionArray(Section.AUTHORITY); + assertEquals("Wrong number of recs in AUTHORITY", isSecure() ? 2 : 1, + sectionArray.length); + boolean soaFound = false; + for (Record rec : sectionArray) { + soaFound = rec.getType() == Type.SOA; + if (soaFound) { + break; + } + } + assertTrue("wrong record type", + soaFound); + + } + + @Test + public void testReadMasterFile() throws Exception { + setRegistryDNS(new RegistryDNS("TestRegistry")); + Configuration conf = new Configuration(); + conf.set(RegistryConstants.KEY_DNS_DOMAIN, "hwx.test"); + conf.set(RegistryConstants.KEY_DNS_ZONE_SUBNET, "172.17.0"); + conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS); + conf.set(RegistryConstants.KEY_DNS_ZONES_DIR, + getClass().getResource("/").getFile()); + if (isSecure()) { + conf.setBoolean(RegistryConstants.KEY_DNSSEC_ENABLED, true); + conf.set(RegistryConstants.KEY_DNSSEC_PUBLIC_KEY, + "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD " + + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ " + + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q " + + "l9Ozs5bV"); + conf.set(RegistryConstants.KEY_DNSSEC_PRIVATE_KEY_FILE, + getClass().getResource("/test.private").getFile()); + } + + getRegistryDNS().setDomainName(conf); + getRegistryDNS().initializeZones(conf); + + ServiceRecord record = getMarshal().fromBytes("somepath", + CONTAINER_RECORD.getBytes()); + getRegistryDNS().register( + "/registry/users/root/services/org-apache-slider/test1/components/" + + "ctr-e50-1451931954322-0016-01-000002", + record); + + // start assessing whether correct records are available + Record[] recs = + assertDNSQuery("ctr-e50-1451931954322-0016-01-000002.hwx.test."); + assertEquals("wrong result", "172.17.0.19", + ((ARecord) recs[0]).getAddress().getHostAddress()); + + recs = assertDNSQuery("comp-name.test1.root.hwx.test.", 1); + assertTrue("not an ARecord", recs[0] instanceof ARecord); + + // lookup dyanmic reverse records + recs = assertDNSQuery("19.0.17.172.in-addr.arpa.", Type.PTR, 1); + assertEquals("wrong result", + "comp-name.test1.root.hwx.test.", + ((PTRRecord) recs[0]).getTarget().toString()); + + // now lookup static reverse records + Name name = Name.fromString("5.0.17.172.in-addr.arpa."); + Record question = Record.newRecord(name, Type.PTR, DClass.IN); + Message query = Message.newQuery(question); + OPTRecord optRecord = new OPTRecord(4096, 0, 0, Flags.DO, null); + query.addRecord(optRecord, Section.ADDITIONAL); + byte[] responseBytes = getRegistryDNS().generateReply(query, null); + Message response = new Message(responseBytes); + recs = response.getSectionArray(Section.ANSWER); + assertEquals("wrong result", "cn005.hwx.test.", + ((PTRRecord) recs[0]).getTarget().toString()); + } + + @Test + public void testReverseZoneNames() throws Exception { + Configuration conf = new Configuration(); + conf.set(KEY_DNS_ZONE_SUBNET, "172.26.32.0"); + conf.set(KEY_DNS_ZONE_MASK, "255.255.224.0"); + + Name name = getRegistryDNS().getReverseZoneName(conf); + assertEquals("wrong name", "26.172.in-addr.arpa.", name.toString()); + } + + @Test + public void testSplitReverseZoneNames() throws Exception { + Configuration conf = new Configuration(); + registryDNS = new RegistryDNS("TestRegistry"); + conf.set(RegistryConstants.KEY_DNS_DOMAIN, "example.com"); + conf.set(KEY_DNS_SPLIT_REVERSE_ZONE, "true"); + conf.set(KEY_DNS_SPLIT_REVERSE_ZONE_RANGE, "256"); + conf.set(KEY_DNS_ZONE_SUBNET, "172.26.32.0"); + conf.set(KEY_DNS_ZONE_MASK, "255.255.224.0"); + conf.setTimeDuration(RegistryConstants.KEY_DNS_TTL, 30L, TimeUnit.SECONDS); + conf.set(RegistryConstants.KEY_DNS_ZONES_DIR, + getClass().getResource("/").getFile()); + if (isSecure()) { + conf.setBoolean(RegistryConstants.KEY_DNSSEC_ENABLED, true); + conf.set(RegistryConstants.KEY_DNSSEC_PUBLIC_KEY, + "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD " + + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ " + + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q " + + "l9Ozs5bV"); + conf.set(RegistryConstants.KEY_DNSSEC_PRIVATE_KEY_FILE, + getClass().getResource("/test.private").getFile()); + } + registryDNS.setDomainName(conf); + registryDNS.setDNSSECEnabled(conf); + registryDNS.addSplitReverseZones(conf, 4); + assertEquals(4, registryDNS.getZoneCount()); + } + + @Test + public void testExampleDotCom() throws Exception { + Name name = Name.fromString("example.com."); + Record[] records = getRegistryDNS().getRecords(name, Type.SOA); + assertNotNull("example.com exists:", records); + } + public RegistryDNS getRegistryDNS() { + return registryDNS; + } + + public void setRegistryDNS( + RegistryDNS registryDNS) { + this.registryDNS = registryDNS; + } + + public RegistryUtils.ServiceRecordMarshal getMarshal() { + return marshal; + } + + public void setMarshal( + RegistryUtils.ServiceRecordMarshal marshal) { + this.marshal = marshal; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestReverseZoneUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestReverseZoneUtils.java new file mode 100644 index 0000000..1331f75 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestReverseZoneUtils.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import java.net.UnknownHostException; +import static org.junit.Assert.assertEquals; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +/** + * Tests for the reverse zone utilities. + */ +public class TestReverseZoneUtils { + private static final String NET = "172.17.4.0"; + private static final int RANGE = 256; + private static final int INDEX = 0; + + @Rule public ExpectedException exception = ExpectedException.none(); + + @Test + public void testGetReverseZoneNetworkAddress() throws Exception { + assertEquals("172.17.4.0", + ReverseZoneUtils.getReverseZoneNetworkAddress(NET, RANGE, INDEX)); + } + + @Test + public void testSplitIp() throws Exception { + long[] splitIp = ReverseZoneUtils.splitIp(NET); + assertEquals(172, splitIp[0]); + assertEquals(17, splitIp[1]); + assertEquals(4, splitIp[2]); + assertEquals(0, splitIp[3]); + } + + @Test + public void testThrowIllegalArgumentExceptionIfIndexIsNegative() + throws Exception { + exception.expect(IllegalArgumentException.class); + ReverseZoneUtils.getReverseZoneNetworkAddress(NET, RANGE, -1); + } + + @Test + public void testThrowUnknownHostExceptionIfIpIsInvalid() throws Exception { + exception.expect(UnknownHostException.class); + ReverseZoneUtils + .getReverseZoneNetworkAddress("213124.21231.14123.13", RANGE, INDEX); + } + + @Test + public void testThrowIllegalArgumentExceptionIfRangeIsNegative() + throws Exception { + exception.expect(IllegalArgumentException.class); + ReverseZoneUtils.getReverseZoneNetworkAddress(NET, -1, INDEX); + } + + @Test + public void testVariousRangeAndIndexValues() throws Exception { + // Given the base address of 172.17.4.0, step 256 IP addresses, 5 times. + assertEquals("172.17.9.0", + ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 256, 5)); + assertEquals("172.17.4.128", + ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 128, 1)); + assertEquals("172.18.0.0", + ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 256, 252)); + assertEquals("172.17.12.0", + ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1024, 2)); + assertEquals("172.17.4.0", + ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 0, 1)); + assertEquals("172.17.4.0", + ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1, 0)); + assertEquals("172.17.4.1", + ReverseZoneUtils.getReverseZoneNetworkAddress(NET, 1, 1)); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java new file mode 100644 index 0000000..ded63bd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/server/dns/TestSecureRegistryDNS.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.registry.client.api.RegistryConstants; + +/** + * + */ +public class TestSecureRegistryDNS extends TestRegistryDNS { + @Override protected Configuration createConfiguration() { + Configuration conf = super.createConfiguration(); + conf.setBoolean(RegistryConstants.KEY_DNSSEC_ENABLED, true); + conf.set(RegistryConstants.KEY_DNSSEC_PUBLIC_KEY, + "AwEAAe1Jev0Az1khlQCvf0nud1/CNHQwwPEu8BNchZthdDxKPVn29yrD " + + "CHoAWjwiGsOSw3SzIPrawSbHzyJsjn0oLBhGrH6QedFGnydoxjNsw3m/ " + + "SCmOjR/a7LGBAMDFKqFioi4gOyuN66svBeY+/5uw72+0ei9AQ20gqf6q " + + "l9Ozs5bV"); + conf.set(RegistryConstants.KEY_DNSSEC_PRIVATE_KEY_FILE, + getClass().getResource("/test.private").getFile()); + + return conf; + } + + @Override protected boolean isSecure() { + return true; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/0.17.172.in-addr.arpa.zone b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/0.17.172.in-addr.arpa.zone new file mode 100644 index 0000000..0165f0d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/0.17.172.in-addr.arpa.zone @@ -0,0 +1,36 @@ +; +; Licensed to the Apache Software Foundation (ASF) under one +; or more contributor license agreements. See the NOTICE file +; distributed with this work for additional information +; regarding copyright ownership. The ASF licenses this file +; to you under the Apache License, Version 2.0 (the +; "License"); you may not use this file except in compliance +; with the License. You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, software +; distributed under the License is distributed on an "AS IS" BASIS, +; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; See the License for the specific language governing permissions and +; limitations under the License. +; +; +$ORIGIN . +$TTL 1800 ; 30 minutes +0.17.172.in-addr.arpa IN SOA ns.hwhq.hortonworks.com. it.hortonworks.com. ( + 2015081000 ; serial + 10800 ; refresh (3 hours) + 900 ; retry (15 minutes) + 1814400 ; expire (3 weeks) + 10800 ; minimum (3 hours) +) + NS ns.hwhq.hortonworks.com. + NS ns2.hwhq.hortonworks.com. + +$ORIGIN 0.17.172.in-addr.arpa. +5 PTR cn005.hwx.test. +6 PTR cn006.hwx.test. +7 PTR cn007.hwx.test. +8 PTR cn008.hwx.test. +9 PTR cn009.hwx.test. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/test.private b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/test.private new file mode 100644 index 0000000..5f0da9d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/resources/test.private @@ -0,0 +1,32 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +Private-key-format: v1.3 +Algorithm: 8 (RSASHA256) +Modulus: 7Ul6/QDPWSGVAK9/Se53X8I0dDDA8S7wE1yFm2F0PEo9Wfb3KsMIegBaPCIaw5LDdLMg+trBJsfPImyOfSgsGEasfpB50UafJ2jGM2zDeb9IKY6NH9rssYEAwMUqoWKiLiA7K43rqy8F5j7/m7Dvb7R6L0BDbSCp/qqX07OzltU= +PublicExponent: AQAB +PrivateExponent: MgbQ6DBYhskeufNGGdct0cGG/4wb0X183ggenwCv2dopDyOTPq+5xMb4Pz9Ndzgk/yCY7mpaWIu9rttGOzrR+LBRR30VobPpMK1bMnzu2C0x08oYAguVwZB79DLC705qmZpiaaFB+LnhG7VtpPiOBm3UzZxdrBfeq/qaKrXid60= +Prime1: /HFdjI4cRuJBjK9IGWWmmVZWwaFsQYO9GHLCDwjm691GxaDpXuMdPd0uH9EqQvskyF8JPmzQXI43swyUFjizow== +Prime2: 8KFxkWEHlhgB2GLi8tk39TKY5vmFUvh4FO28COl1N/rWjKVpfM1p6HQ6YavoGNZQmDBazv4WOZRqSQukHApzJw== +Exponent1: alX+h/RcqOcpoW88OaZ99N1PkiTDCx3JC4FbiSXAz93Xr+vGIfgdGzAN+80JtklABz8xD6CabEJj6AIGZw3fbQ== +Exponent2: vvPusqZkJcjBVh0K6hpUXKEdU1W5ZmFEsZ8Cs7PH0Hee4Je3QVGk9NGfLrkDgwo3hL4CofZiXqkXOwYg4husyw== +Coefficient: omxpbNU6u/swbnkTC6MicaDqbJP7ETnCCJ1iN2+HZO/AlQCFlqVzLwGZmvGMAGA9ZWF+YpqpPhvzi4bWmi5XrQ== +Created: 20160119155251 +Publish: 20160119155251 +Activate: 20160119155251 + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java index 6f7b6fd..4e84e3d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java @@ -31,6 +31,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.registry.client.binding.RegistryPathUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; @@ -372,6 +373,11 @@ private void setHostname(DockerRunCommand runCommand, String throws ContainerExecutionException { if (name == null || name.isEmpty()) { name = RegistryPathUtils.encodeYarnID(containerIdStr); + + String domain = conf.get(RegistryConstants.KEY_DNS_DOMAIN); + if (domain != null) { + name += ("." + domain); + } validateHostname(name); } @@ -784,6 +790,8 @@ public void reapContainer(ContainerRuntimeContext ctx) .executePrivilegedOperation(null, privOp, null, null, true, false); LOG.info("Docker inspect output for " + containerId + ": " + output); + // strip off quotes if any + output = output.replaceAll("['\"]", ""); int index = output.lastIndexOf(','); if (index == -1) { LOG.error("Incorrect format for ip and host"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java index f10e885..e748955 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java @@ -18,15 +18,15 @@ package org.apache.hadoop.yarn.server.resourcemanager.blacklist; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; + import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; - /** * Maintains a list of failed nodes and returns that as long as number of * blacklisted nodes is below a threshold percentage of total nodes. If more diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 4997bc6..d3fbcb5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -107,6 +107,7 @@ import org.apache.hadoop.yarn.state.SingleArcTransition; import org.apache.hadoop.yarn.state.StateMachine; import org.apache.hadoop.yarn.state.StateMachineFactory; +import org.apache.hadoop.yarn.util.Apps; import org.apache.hadoop.yarn.util.BoundedAppender; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; @@ -1539,38 +1540,6 @@ public boolean shouldCountTowardsMaxAttemptRetry() { } } - private static boolean shouldCountTowardsNodeBlacklisting(int exitStatus) { - switch (exitStatus) { - case ContainerExitStatus.PREEMPTED: - case ContainerExitStatus.KILLED_BY_RESOURCEMANAGER: - case ContainerExitStatus.KILLED_BY_APPMASTER: - case ContainerExitStatus.KILLED_AFTER_APP_COMPLETION: - case ContainerExitStatus.ABORTED: - // Neither the app's fault nor the system's fault. This happens by design, - // so no need for skipping nodes - return false; - case ContainerExitStatus.DISKS_FAILED: - // This container is marked with this exit-status means that the node is - // already marked as unhealthy given that most of the disks failed. So, no - // need for any explicit skipping of nodes. - return false; - case ContainerExitStatus.KILLED_EXCEEDED_VMEM: - case ContainerExitStatus.KILLED_EXCEEDED_PMEM: - // No point in skipping the node as it's not the system's fault - return false; - case ContainerExitStatus.SUCCESS: - return false; - case ContainerExitStatus.INVALID: - // Ideally, this shouldn't be considered for skipping a node. But in - // reality, it seems like there are cases where we are not setting - // exit-code correctly and so it's better to be conservative. See - // YARN-4284. - return true; - default: - return true; - } - } - private static final class UnmanagedAMAttemptSavedTransition extends AMLaunchedTransition { @Override @@ -1954,7 +1923,7 @@ private static void amContainerFinished(RMAppAttemptImpl appAttempt, containerFinishedEvent.getContainerStatus(); if (containerStatus != null) { int exitStatus = containerStatus.getExitStatus(); - if (shouldCountTowardsNodeBlacklisting(exitStatus)) { + if (Apps.shouldCountTowardsNodeBlacklisting(exitStatus)) { appAttempt.addAMNodeToBlackList(nodeId); } } else { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java index 4e36665..bee9354 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java @@ -22,6 +22,8 @@ import java.net.InetSocketAddress; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.RMHAUtils; @@ -38,8 +40,12 @@ */ public class RMWebApp extends WebApp implements YarnWebParams { + private static final Log LOG = + LogFactory.getLog(RMWebApp.class.getName()); private final ResourceManager rm; private boolean standby = false; + private final static String APISERVER = + "org.apache.hadoop.yarn.service.webapp.ApiServer"; public RMWebApp(ResourceManager rm) { this.rm = rm; @@ -53,6 +59,19 @@ public void setup() { bind(RMWebApp.class).toInstance(this); if (rm != null) { + boolean enableServiceApi = rm.getConfig() + .getBoolean(YarnConfiguration.YARN_API_SERVICES_ENABLE, false); + if (enableServiceApi) { + try { + // Use reflection here to load ApiServer class, + // this is done to avoid creating cyclic dependency + // between maven projects. + Class apiServer = Class.forName(APISERVER); + bind(apiServer); + } catch (ClassNotFoundException e) { + LOG.warn("ApiServer REST API is not activated."); + } + } bind(ResourceManager.class).toInstance(rm); } route("/", RmController.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md index fa33b8b..71c9e80 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md @@ -22,7 +22,7 @@ Overview YARN commands are invoked by the bin/yarn script. Running the yarn script without any arguments prints the description for all commands. -Usage: `yarn [SHELL_OPTIONS] COMMAND [GENERIC_OPTIONS] [COMMAND_OPTIONS]` +Usage: `yarn [SHELL_OPTIONS] COMMAND [GENERIC_OPTIONS] [SUB_COMMAND] [COMMAND_OPTIONS]` YARN has an option parsing framework that employs parsing generic options as well as running classes. @@ -37,25 +37,36 @@ User Commands Commands useful for users of a Hadoop cluster. -### `application` +### `application` or `app` Usage: `yarn application [options] ` +Usage: `yarn app [options] ` | COMMAND\_OPTIONS | Description | |:---- |:---- | | -appId \ | Specify Application Id to be operated | | -appStates \ | Works with -list to filter applications based on input comma-separated list of application states. The valid application state can be one of the following:  ALL, NEW, NEW\_SAVING, SUBMITTED, ACCEPTED, RUNNING, FINISHED, FAILED, KILLED | +| -appTags \ | Works with -list to filter applications based on input comma-separated list of application tags. | | -appTypes \ | Works with -list to filter applications based on input comma-separated list of application types. | -| -help | Help | -| -list | Lists applications from the RM. Supports optional use of -appTypes to filter applications based on application type, and -appStates to filter applications based on application state. | -| -kill \ | Kills the application. | -| -movetoqueue \ | Moves the application to a different queue. | +| -changeQueue \ | Moves application to a new queue. ApplicationId can be passed using 'appId' option. 'movetoqueue' command is deprecated, this new command 'changeQueue' performs same functionality. | +| -component \ \ | Works with -flex option to change the number of components/containers running for an application / long-running service. Supports absolute or relative changes, such as +1, 2, or -3. | +| -destroy \ | Destroys a saved application specification and removes all application data permanently. Supports -appTypes option to specify which client implementation to use. | +| -enableFastLaunch | Uploads AM dependencies to HDFS to make future launches faster. Supports -appTypes option to specify which client implementation to use. | +| -flex \ | Changes number of running containers for a component of an application / long-running service. Requires -component option. If name is provided, appType must be provided unless it is the default yarn-service. If ID is provided, the appType will be looked up. Supports -appTypes option to specify which client implementation to use. | +| -help | Displays help for all commands. | +| -kill \ | Kills the application. Set of applications can be provided separated with space | +| -launch \ \ | Launches application from specification file (saves specification and starts application). Options -updateLifetime and -changeQueue can be specified to alter the values provided in the file. Supports -appTypes option to specify which client implementation to use. | +| -list | List applications. Supports optional use of -appTypes to filter applications based on application type, -appStates to filter applications based on application state and -appTags to filter applications based on application tag. | +| -movetoqueue \ | Moves the application to a different queue. Deprecated command. Use 'changeQueue' instead. | | -queue \ | Works with the movetoqueue command to specify which queue to move an application to. | +| -save \ \ | Saves specification file for an application. Options -updateLifetime and -changeQueue can be specified to alter the values provided in the file. Supports -appTypes option to specify which client implementation to use. | +| -start \ | Starts a previously saved application. Supports -appTypes option to specify which client implementation to use. | | -status \ | Prints the status of the application. | -| -updateLifetime \ | Update application timeout (from the time of request) in seconds. ApplicationId can be specified using 'appId' option. | +| -stop \ | Stops application gracefully (may be started again later). If name is provided, appType must be provided unless it is the default yarn-service. If ID is provided, the appType will be looked up. Supports -appTypes option to specify which client implementation to use. | +| -updateLifetime \ | Update timeout of an application from NOW. ApplicationId can be passed using 'appId' option. Timeout value is in seconds. | | -updatePriority \ | Update priority of an application. ApplicationId can be passed using 'appId' option. | -Prints application(s) report/kill application +Prints application(s) report/kill application/manage long running application ### `applicationattempt` @@ -273,6 +284,12 @@ Usage: `yarn timelineserver` Start the TimeLineServer +### registrydns + +Usage: `yarn registrydns` + +Start the RegistryDNS server + Files ----- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/index.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/index.md index 3a648b6..0a127cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/index.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/index.md @@ -22,6 +22,7 @@ to allow deployed applications to register themselves and the means of communicating with them. Client applications can then locate services and use the binding information to connect with the services's network-accessible endpoints, be they REST, IPC, Web UI, Zookeeper quorum+path or some other protocol. +Currently, all the registry data is stored in a zookeeper cluster. * [Architecture](yarn-registry.html) * [Configuration](registry-configuration.html) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md index adca451..46bc92d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md @@ -90,7 +90,7 @@ and the ports on which the ZK services are listening. ``` - List of hostname:port pairs defining the + A comma separated list of hostname:port pairs defining the zookeeper quorum binding for the registry hadoop.registry.zk.quorum @@ -339,7 +339,7 @@ concluding that the quorum is unreachable and failing. - List of hostname:port pairs defining the + A comma separated list of hostname:port pairs defining the zookeeper quorum binding for the registry hadoop.registry.zk.quorum diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md new file mode 100644 index 0000000..e567d03 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Concepts.md @@ -0,0 +1,46 @@ + + +# Concepts +This document describes some key concepts and features that makes YARN as a first-class platform in order to natively support long running services on YARN. + +### Service Framework (ApplicationMaster) on YARN +A container orchestration framework is implemented to help deploying services on YARN. In a nutshell, the framework is an ApplicationMaster that +requests containers from ResourceManager based on service definition provided by the user and launch the containers across the cluster adhering to placement policies. +It also does all the heavy lifting work such as resolving the service definition and configurations, managing component life cycles such as automatically restarting +failed containers, monitoring components' healthiness and readiness, ensuring dependency start order across components, flexing up/down components, +upgrading components etc. The end goal of the framework is to make sure the service is up and running as the state that user desired. + +In addition, it leverages a lot of features in YARN core to accomplish scheduling constraints, such as +affinity and anti-affinity scheduling, log aggregation for services, automatically restart a container if it fails, and do in-place upgrade of a container. + +### A Restful API-Server for deploying/managing services on YARN +A restful API server is developed to allow users to deploy/manage their services on YARN via a simple JSON spec. This avoids users +from dealing with the low-level APIs, writing complex code to bring their services onto YARN. The REST layer acts as a unified REST based entry for +creation and lifecycle management of YARN services. Services here can range from simple single-component apps to the most complex, +multi-component applications needing special orchestration needs. Please refer to this [API doc](YarnServiceAPI.md) for detailed API documentations. + +The API-server is stateless, which means users can simply spin up multiple instances, and have a load balancer fronting them to +support HA, distribute the load etc. + +### Service Discovery +A DNS server is implemented to enable discovering services on YARN via the standard mechanism: DNS lookup. + +The framework posts container information such as hostname and ip into the [YARN service registry](../registry/index.md). And the DNS server essentially exposes the +information in YARN service registry by translating them into DNS records such as A record and SRV record. +Clients can then discover the IPs of containers via standard DNS lookup. + +The previous read mechanisms of YARN Service Registry were limited to a registry specific (java) API and a REST interface and are difficult +to wireup existing clients and services. The DNS based service discovery eliminates this gap. Please refer to this [Service Discovery doc](ServiceDiscovery.md) +for more details. \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md new file mode 100644 index 0000000..e4881ce --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Examples.md @@ -0,0 +1,161 @@ + + +# YARN Service Examples + +This document describes some example service definitions (`Yarnfile`). + + + +## Apache web server - httpd (with registry DNS) +Below is the `Yarnfile` for a service called `httpd-service` with two `httpd` instances. +There is also an httpd proxy instance (httpd-proxy-0) that proxies between the other two httpd instances (httpd-0 and httpd-1). + +Note this example requires registry DNS. +``` +{ + "name": "httpd-service", + "lifetime": "3600", + "components": [ + { + "name": "httpd", + "number_of_containers": 2, + "artifact": { + "id": "centos/httpd-24-centos7:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/bin/run-httpd", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "files": [ + { + "type": "ENV", + "dest_file": "/var/www/html/index.html", + "props": { + "content": "
Title
Hello from ${COMPONENT_INSTANCE_NAME}!" + } + } + ] + } + }, + { + "name": "httpd-proxy", + "number_of_containers": 1, + "artifact": { + "id": "centos/httpd-24-centos7:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/bin/run-httpd", + "resource": { + "cpus": 1, + "memory": "1024" + }, + "configuration": { + "files": [ + { + "type": "TEMPLATE", + "dest_file": "/etc/httpd/conf.d/httpd-proxy.conf", + "src_file": "httpd-proxy.conf" + } + ] + } + } + ], + "quicklinks": { + "Apache HTTP Server": "http://httpd-proxy-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080" + } +} +``` +This `Yarnfile` is already included in the Hadoop distribution, along with the required configuration template `httpd-proxy.conf`. +First upload the configuration template file to HDFS: +``` +hdfs dfs -copyFromLocal ${HADOOP_YARN_HOME}/share/hadoop/yarn/yarn-service-examples/httpd/httpd-proxy.conf . +``` + +The proxy configuration template looks like the following and will configure the httpd-proxy-0 container to balance between the httpd-0 and httpd-1 containers evenly: +``` + + BalancerMember http://httpd-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080 + BalancerMember http://httpd-1.${SERVICE_NAME}.${USER}.${DOMAIN}:8080 + ProxySet lbmethod=bytraffic + + +ProxyPass "/" "balancer://test/" +ProxyPassReverse "/" "balancer://test/" +``` + +Then run the service with the command: +``` +yarn app -launch httpd +``` + +The last argument is either the path to a JSON specification of the service, or in this case, the name of an example service. +The directory where examples can be found can be configured by setting the YARN\_EXAMPLES\_DIR environment variable. + +Once the service is running, navigate to `http://httpd-proxy-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080` to see the root page. +The pages should alternately show "Hello from httpd-0!" or "Hello from httpd-1!" + +The individual httpd URLs can also be visited, `http://httpd-0.${SERVICE_NAME}.${USER}.${DOMAIN}:8080` and `http://httpd-1.${SERVICE_NAME}.${USER}.${DOMAIN}:8080`. + +If unsure of your hostnames, visit the RM REST endpoint `http://:8088/ws/v1/services/httpd-service`. + +## Apache web server - httpd (without registry DNS) + +A similar IP-based example is provided for environments that do not have registry DNS set up. +The service name for this example is `httpd-service-no-dns`. +There are a couple of additions to the `Yarnfile` for the `httpd-service` described above. +A readiness check is added for the `httpd` component: +``` + "readiness_check": { + "type": "HTTP", + "props": { + "url": "http://${THIS_HOST}:8080" + } + }, +``` +and `httpd` is added as a dependency for the `httpd-proxy` component: +``` + "dependencies": [ "httpd" ], +``` + +This means that the httpd-proxy-0 instance will not be started until after an HTTP probe has succeeded for the httpd-0 and httpd-1 containers. +This is necessary so that the IPs of the containers can be used in the configuration of httpd-proxy-0. +The proxy configuration is similar to that of the previous example, with the BalancerMember lines changed as follows: +``` + BalancerMember http://${HTTPD-0_IP}:8080 + BalancerMember http://${HTTPD-1_IP}:8080 +``` + +Note that IP and HOST variables such as `${HTTPD-0_IP}` and `${HTTPD-0_HOST}` should only be used by a component that has a dependency on the named component (`httpd` in this case) AND should only be used when the named component specifies a readiness check. +Here, `httpd-proxy` has a dependency on `httpd` and `httpd` has an HTTP readiness check. +Without the dependency and readiness check, the httpd-proxy-0 container would be started in parallel with the httpd-0 and http-1 containers, and the IPs and hosts would not be assigned yet for httpd-0 and httpd-1. + +Other variables can be used by any component. + +Before creating the service, upload the proxy configuration to HDFS: +``` +hdfs dfs -copyFromLocal ${HADOOP_YARN_HOME}/share/hadoop/yarn/yarn-service-examples/httpd-no-dns/httpd-proxy-no-dns.conf . +``` + +Then run the service with the command: +``` +yarn app -launch httpd-no-dns +``` +where `service-name` is optional. If omitted, it uses the name defined in the `Yarnfile`. + +Look up your IPs at the RM REST endpoint `http://:8088/ws/v1/services/httpd-service`. +Then visit port 8080 for each IP to view the pages. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md new file mode 100644 index 0000000..58daee5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md @@ -0,0 +1,60 @@ + + +# YARN Service +## Overview +Yarn Service framework provides first class support and APIs to host long running services natively in YARN. +In a nutshell, it serves as a container orchestration platform for managing containerized services on YARN. It supports both docker container +and traditional process based containers in YARN. + +The responsibility of this framework includes performing configuration resolutions and mounts, +lifecycle management such as stop/start/delete the service, flexing service components up/down, rolling upgrades services on YARN, monitoring services' healthiness and readiness and more. + +The yarn-service framework primarily includes below components: + +* A core framework (ApplicationMaster) running on YARN to serve as a container orchestrator, being responsible for all service lifecycle managements. +* A restful API-server to for users to interact with YARN to deploy/manage their services via a simple JSON spec. +* A DNS server backed by YARN service registry to enable discovering services on YARN by the standard DNS lookup. + +## Why should I try YARN Service framework? + +YARN Service framework makes it easy to bring existing services onto YARN. +It hides all the complex low-level details of application management and relieves +users from forced into writing new code. Developers of new services do not have +to worry about YARN internals and only need to focus on containerization of their +service(s). + +Further, another huge win of this feature is that now you can enable both +traditional batch processing jobs and long running services in a single platform! +The benefits of combining these workloads are two-fold: + +* Greatly simplify the cluster operations as you have only a single cluster to deal with. +* Making both batch jobs and services share a cluster can greatly improve resource utilization. + +## How do I get started? + +*`This feature is in alpha state`* and so APIs, command lines are subject to change. We will continue to update the documents over time. + +[QuickStart](QuickStart.md) shows a quick tutorial that walks you through simple steps to deploy a service on YARN. + +## How do I get my hands dirty? + +* [Concepts](Concepts.md): Describes the internals of the framework and some features in YARN core to support running services on YARN. +* [Service REST API](YarnServiceAPI.md): The API doc for deploying/managing services on YARN. +* [Service Discovery](ServiceDiscovery.md): Descirbes the service discovery mechanism on YARN. +* [Registry DNS](RegistryDNS.md): Deep dives into the Registry DNS internals. +* [Examples](Examples.md): List some example service definitions (`Yarnfile`). + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md new file mode 100644 index 0000000..512c011 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/QuickStart.md @@ -0,0 +1,217 @@ + + +# Quick Start + +This document describes how to deploy services on YARN using the YARN Service framework. + + + +## Start HDFS and YARN components + + Start all the hadoop components HDFS, YARN as usual. + + +## Example service +Below is a simple service definition that launches sleep containers on YARN by writing a simple spec file and without writing any code. + +``` +{ + "name": "sleeper-service", + "components" : + [ + { + "name": "sleeper", + "number_of_containers": 1, + "launch_command": "sleep 900000", + "resource": { + "cpus": 1, + "memory": "256" + } + } + ] +} +``` +User can simply run a pre-built example service on YARN using below command: +``` +yarn app -launch +``` +e.g. Below command launches a `sleeper` service named as `my-sleeper` on YARN. +``` +yarn app -launch my-sleeper sleeper +``` +For launching docker based services using YARN Service framework, please refer to [API doc](YarnServiceAPI.md). + +## Manage services on YARN via CLI +Below steps walk you through deploying a services on YARN using CLI. +Refer to [Yarn Commands](../YarnCommands.md) for the full list of commands and options. +### Deploy a service +``` +yarn app -launch ${SERVICE_NAME} ${PATH_TO_SERVICE_DEF_FILE} +``` +Params: +- SERVICE_NAME: The name of the service. Note that this needs to be unique across running services for the current user. +- PATH_TO_SERVICE_DEF: The path to the service definition file in JSON format. + +For example: +``` +yarn app -launch sleeper-service /path/to/local/sleeper.json +``` + +### Flex a component of a service +Increase or decrease the number of containers for a component. +``` +yarn app -flex ${SERVICE_NAME} -component ${COMPONENT_NAME} ${NUMBER_OF_CONTAINERS} +``` +For example, for a service named `sleeper-service`: + +Set the `sleeper` component to `2` containers (absolute number). + +``` +yarn app -flex sleeper-service -component sleeper 2 +``` + +Relative changes are also supported for the ${NUMBER_OF_CONTAINERS} in the flex command, such as +2 or -2. + +### Stop a service +Stopping a service will stop all containers of the service and the ApplicationMaster, but does not delete the state of a service, such as the service root folder on hdfs. +``` +yarn app -stop ${SERVICE_NAME} +``` + +### Restart a stopped service +Restarting a stopped service is easy - just call start! +``` +yarn app -start ${SERVICE_NAME} +``` + +### Destroy a service +In addition to stopping a service, it also deletes the service root folder on hdfs and the records in YARN Service Registry. +``` +yarn app -destroy ${SERVICE_NAME} +``` + +## Manage services on YARN via REST API + +YARN API Server REST API can be activated as part of the ResourceManager. + +### Start Embedded API-Server as part of ResourceManager +For running inside ResourceManager, add this property to `yarn-site.xml` and restart ResourceManager. + +``` + + + Enable services rest api on ResourceManager. + + yarn.webapp.api-service.enable + true + +``` + +Services can be deployed on YARN through the ResourceManager web endpoint. + +Refer to [API doc](YarnServiceAPI.md) for the detailed API specificatiosn. + +### Deploy a service + +POST the aforementioned example service definition to the ResourceManager api-server endpoint: +``` +POST http://localhost:8088/ws/v1/services +``` + +### Get a service status +``` +GET http://localhost:8088/ws/v1/services/${SERVICE_NAME} +``` + +### Flex a component of a service +``` +PUT http://localhost:8088/ws/v1/services/${SERVICE_NAME}/components/${COMPONENT_NAME} +``` +`PUT` Request body: +``` +{ + "name": "${COMPONENT_NAME}", + "number_of_containers": ${COUNT} +} +``` +For example: +``` +{ + "name": "sleeper", + "number_of_containers": 2 +} +``` + +### Stop a service +Stopping a service will stop all containers of the service and the ApplicationMaster, but does not delete the state of a service, such as the service root folder on hdfs. + +``` +PUT http://localhost:8088/ws/v1/services/${SERVICE_NAME} +``` + +`PUT` Request body: +``` +{ + "name": "${SERVICE_NAME}", + "state": "STOPPED" +} +``` + +### Restart a stopped service +Restarting a stopped service is easy. + +``` +PUT http://localhost:8088/ws/v1/services/${SERVICE_NAME} +``` + +`PUT` Request body: +``` +{ + "name": "${SERVICE_NAME}", + "state": "STARTED" +} +``` +### Destroy a service +In addition to stopping a service, it also deletes the service root folder on hdfs and the records in YARN Service Registry. +``` +DELETE http://localhost:8088/ws/v1/services/${SERVICE_NAME} +``` + +## Services UI with YARN UI2 and Timeline Service v2 +A new `service` tab is added in the YARN UI2 specially to show YARN Services in a first class manner. +The services framework posts the data into TimelineService and the `service` UI reads data from TimelineService to render its content. + +### Enable Timeline Service v2 +Please refer to [TimeLineService v2 doc](../TimelineServiceV2.md) for how to enable Timeline Service v2. + +### Enable new YARN UI + +Set below config in `yarn-site.xml` and start ResourceManager. +If you are building from source code, make sure you use `-Pyarn-ui` in the `mvn` command - this will generate the war file for the new YARN UI. +``` + + To enable RM web ui2 application. + yarn.webapp.ui2.enable + true + +``` + +# Try with Docker +The above example is only for a non-docker container based service. YARN Service Framework also provides first-class support for managing docker based services. +Most of the steps for managing docker based services are the same except that in docker the `Artifact` type for a component is `DOCKER` and the Artifact `id` is the name of the docker image. +For details in how to setup docker on YARN, please check [Docker on YARN](../DockerContainers.md). + +With docker support, it also opens up a set of new possibilities to implement features such as discovering service containers on YARN with DNS. +Check [ServiceDiscovery](ServiceDiscovery.md) for more details. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md new file mode 100644 index 0000000..c7f19ce --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md @@ -0,0 +1,200 @@ + + +# Registry DNS Server + + +The document describes the internals of Registry DNS server. It is based on the [YARN service registry](../registry/index.md) which is backed by a zookeeper cluster. +## Introduction + +The Registry DNS Server provides a standard DNS interface to the information posted into the YARN Registry by deployed applications. The DNS service serves the following functions: + +1. **Exposing existing service-discovery information via DNS** - Information provided in +the current YARN service registry’s records will be converted into DNS entries, thus +allowing users to discover information about YARN applications using standard DNS +client mechanisms (e.g. a DNS SRV Record specifying the hostname and port +number for services). +2. **Enabling Container to IP mappings** - Enables discovery of the IPs of containers via +standard DNS lookups. Given the availability of the records via DNS, container +name-based communication will be facilitated (e.g. `curl +http://solr-0.solr-service.devuser.yarncluster:8983/solr/admin/collections?action=LIST`). + +## Service Properties + +The existing YARN Service Registry is leveraged as the source of information for the DNS Service. + +The following core functions are supported by the DNS-Server: + +### Functional properties + +1. Supports creation of DNS records for end-points of the deployed YARN applications +2. Record names remain unchanged during restart of containers and/or applications +3. Supports reverse lookups (name based on IP). Note, this works only for +Docker containers because other containers share the IP of the host +4. Supports security using the standards defined by The Domain Name System Security +Extensions (DNSSEC) +5. Highly available +6. Scalable - The service provides the responsiveness (e.g. low-latency) required to +respond to DNS queries (timeouts yield attempts to invoke other configured name +servers). + +### Deployment properties + +1. Supports integration with existing DNS assets (e.g. a corporate DNS server) by acting as +a DNS server for a Hadoop cluster zone/domain. The server is not intended to act as a +primary DNS server and does not forward requests to other servers. Rather, a +primary DNS server can be configured to forward a zone to the registry DNS +server. +2. The DNS Server exposes a port that can receive both TCP and UDP requests per +DNS standards. The default port for DNS protocols is not in the restricted +range (5353). However, existing DNS assets may only allow zone forwarding to +non-custom ports. To support this, the registry DNS server can be started in +privileged mode. + +## DNS Record Name Structure + +The DNS names of generated records are composed from the following elements +(labels). Note that these elements must be compatible with DNS conventions +(see “Preferred Name Syntax” in [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt)): + +* **domain** - the name of the cluster DNS domain. This name is provided as a +configuration property. In addition, it is this name that is configured at a parent DNS +server as the zone name for the defined registry DNS zone (the zone for which +the parent DNS server will forward requests to registry DNS). E.g. yarncluster.com +* **username** - the name of the application deployer. This name is the simple short-name (for +e.g. the primary component of the Kerberos principal) associated with the user launching +the application. As the username is one of the elements of DNS names, it is expected +that this also conforms to DNS name conventions (RFC 1035 linked above), so it +is converted to a valid DNS hostname entries using the punycode convention used +for internationalized DNS. +* **application name** - the name of the deployed YARN application. This name is inferred +from the YARN registry path to the application's node. Application name, +rather than application id, was chosen as a way of making it easy for users to refer to human-readable DNS +names. This obviously mandates certain uniqueness properties on application names. +* **container id** - the YARN assigned ID to a container (e.g. +container_e3741_1454001598828_01_000004) +* **component name** - the name assigned to the deployed component (for e.g. a master +component). A component is a distributed element of an application or service that is +launched in a YARN container (e.g. an HBase master). One can imagine multiple +components within an application. A component name is not yet a first class concept in +YARN, but is a very useful one that we are introducing here for the sake of registry DNS +entries. Many frameworks like MapReduce, Slider already have component names +(though, as mentioned, they are not yet supported in YARN in a first class fashion). +* **api** - the api designation for the exposed endpoint + +### Notes about DNS Names + +* In most instances, the DNS names can be easily distinguished by the number of +elements/labels that compose the name. The cluster’s domain name is always the last +element. After that element is parsed out, reading from right to left, the first element +maps to the application user and so on. Wherever it is not easily distinguishable, naming conventions are used to disambiguate the name using a prefix such as +“container” or suffix such as “api”. For example, an endpoint published as a +management endpoint will be referenced with the name *management-api.griduser.yarncluster.com*. +* Unique application name (per user) is not currently supported/guaranteed by YARN, but +it is supported by the YARN service framework. The registry DNS service currently +leverages the last element of the ZK path entry for the application as an +application name. These application names have to be unique for a given user. + +## DNS Server Functionality + +The primary functions of the DNS service are illustrated in the following diagram: + +![DNS Functional Overview](../images/dns_overview.png "DNS Functional Overview") + +### DNS record creation +The following figure illustrates at slightly greater detail the DNS record creation and registration sequence (NOTE: service record updates would follow a similar sequence of steps, +distinguished only by the different event type): + +![DNS Functional Overview](../images/dns_record_creation.jpeg "DNS Functional Overview") + +### DNS record removal +Similarly, record removal follows a similar sequence + +![DNS Functional Overview](../images/dns_record_removal.jpeg "DNS Functional Overview") + +(NOTE: The DNS Zone requires a record as an argument for the deletion method, thus +requiring similar parsing logic to identify the specific records that should be removed). + +### DNS Service initialization +* The DNS service initializes both UDP and TCP listeners on a configured port. +If a port in the restricted range is desired (such as the standard DNS port +53), the DNS service can be launched using jsvc as described in the section +on starting the DNS server. +* Subsequently, the DNS service listens for inbound DNS requests. Those requests are +standard DNS requests from users or other DNS servers (for example, DNS servers that have the +RegistryDNS service configured as a forwarder). + +## Start the DNS Server +By default, the DNS server runs on non-privileged port `5353`. Start the server +with: +``` +yarn --daemon start registrydns +``` + +If the DNS server is configured to use the standard privileged port `53`, the +environment variables YARN\_REGISTRYDNS\_SECURE\_USER and +YARN\_REGISTRYDNS\_SECURE\_EXTRA\_OPTS must be uncommented in the yarn-env.sh +file. The DNS server should then be launched as root and jsvc will be used to +reduce the privileges of the daemon after the port has been bound. + +## Configuration +The Registry DNS server reads its configuration properties from the yarn-site.xml file. The following are the DNS associated configuration properties: + +| Name | Description | +| ------------ | ------------- | +|hadoop.registry.zk.quorum| A comma separated list of hostname:port pairs defining the zookeeper quorum for the [YARN registry](../registry/registry-configuration.md). | +| hadoop.registry.dns.enabled | The DNS functionality is enabled for the cluster. Default is false. | +| hadoop.registry.dns.domain-name | The domain name for Hadoop cluster associated records. | +| hadoop.registry.dns.bind-address | Address associated with the network interface to which the DNS listener should bind. | +| hadoop.registry.dns.bind-port | The port number for the DNS listener. The default port is 5353. | +| hadoop.registry.dns.dnssec.enabled | Indicates whether the DNSSEC support is enabled. Default is false. | +| hadoop.registry.dns.public-key | The base64 representation of the server’s public key. Leveraged for creating the DNSKEY Record provided for DNSSEC client requests. | +| hadoop.registry.dns.private-key-file | The path to the standard DNSSEC private key file. Must only be readable by the DNS launching identity. See [dnssec-keygen](https://ftp.isc.org/isc/bind/cur/9.9/doc/arm/man.dnssec-keygen.html) documentation. | +| hadoop.registry.dns-ttl | The default TTL value to associate with DNS records. The default value is set to 1 (a value of 0 has undefined behavior). A typical value should be approximate to the time it takes YARN to restart a failed container. | +| hadoop.registry.dns.zone-subnet | An indicator of the IP range associated with the cluster containers. The setting is utilized for the generation of the reverse zone name. | +| hadoop.registry.dns.zone-mask | The network mask associated with the zone IP range. If specified, it is utilized to ascertain the IP range possible and come up with an appropriate reverse zone name. | +| hadoop.registry.dns.zones-dir | A directory containing zone configuration files to read during zone initialization. This directory can contain zone master files named *zone-name.zone*. See [here](http://www.zytrax.com/books/dns/ch6/mydomain.html) for zone master file documentation.| +### Sample configurations +``` + + The domain name for Hadoop cluster associated records. + hadoop.registry.dns.domain-name + ycluster + + + + The port number for the DNS listener. The default port is 5353. + If the standard privileged port 53 is used, make sure start the DNS with jsvc support. + hadoop.registry.dns.bind-port + 5353 + + + + The DNS functionality is enabled for the cluster. Default is false. + hadoop.registry.dns.enabled + true + + + + Address associated with the network interface to which the DNS listener should bind. + hadoop.registry.dns.bind-address + localhost + + + + A comma separated list of hostname:port pairs defining the zookeeper quorum for the YARN registry + hadoop.registry.zk.quorum + localhost:2181 + + ``` \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md new file mode 100644 index 0000000..ed56fa3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md @@ -0,0 +1,136 @@ + + +# Service Discovery + +This document describes the mechanism of service discovery on YARN and the +steps for enabling it. + +## Overview +A [DNS server](RegistryDNS.md) is implemented to enable discovering services on YARN via +the standard mechanism: DNS lookup. + +The framework ApplicationMaster posts the container information such as hostname and IP address into +the YARN service registry. The DNS server exposes the information in YARN service registry by translating them into DNS +records such as A record and SRV record. Clients can then discover the IPs of containers via standard DNS lookup. + +For non-docker containers (containers with null `Artifact` or with `Artifact` type set to `TARBALL`), since all containers on the same host share the same ip address, +the DNS supports forward DNS lookup, but not support reverse DNS lookup. +With docker, it supports both forward and reverse lookup, since each container +can be configured to have its own unique IP. In addition, the DNS also supports configuring static zone files for both foward and reverse lookup. + +## Docker Container IP Management in Cluster +To support the use-case of per container per IP, containers must be launched with `bridge` network. However, with `bridge` network, containers +running on one node are not routable from other nodes by default. This is not an issue if you are only doing single node testing, however, for +a multi-node environment, containers must be made routable from other nodes. + +There are several approaches to solve this depending on the platforms like GCE or AWS. Please refer to specific platform documentations for how to enable this. +For on-prem cluster, one way to solve this issue is, on each node, configure the docker daemon to use a custom bridge say `br0` which is routable from all nodes. +Also, assign an exclusive, contiguous range of IP addresses expressed in CIDR form e.g `172.21.195.240/26 (64 IPs)` to each docker +daemon using the `fixed-cidr` option like below in the docker `daemon.json`: +``` +"bridge": "br0" +"fixed-cidr": "172.21.195.240/26" +``` +Check how to [customize docker bridge network](https://docs.docker.com/engine/userguide/networking/default_network/custom-docker0/) for details. + + +## Naming Convention with Registry DNS +With the DNS support, user can simply access their services in a well-defined naming format as below: + +``` +${COMPONENT_INSTANCE_NAME}.${SERVICE_NAME}.${USER}.${DOMAIN} +``` +For example, in a cluster whose domain name is `yarncluster` (as defined by the `hadoop.registry.dns.domain-name` in `yarn-site.xml`), a service named `hbase` deployed by user `devuser` +with two components `hbasemaster` and `regionserver` can be accessed as below: + +This URL points to the usual hbase master UI +``` +http://hbasemaster-0.hbase.devuser.yarncluster:16010/master-status +``` + + +Note that YARN service framework assigns `COMPONENT_INSTANCE_NAME` for each container in a sequence of monotonically increasing integers. For example, `hbasemaster-0` gets +assigned `0` since it is the first and only instance for the `hbasemaster` component. In case of `regionserver` component, it can have multiple containers + and so be named as such: `regionserver-0`, `regionserver-1`, `regionserver-2` ... etc + +`Disclaimer`: The DNS implementation is still experimental. It should not be used as a fully-functional DNS. + + +## Configure Registry DNS + +Below is the set of configurations in `yarn-site.xml` required for enabling Registry DNS. A full list of properties can be found in the Configuration +section of [Registry DNS](RegistryDNS.md). + + +``` + + The domain name for Hadoop cluster associated records. + hadoop.registry.dns.domain-name + ycluster + + + + The port number for the DNS listener. The default port is 5353. + If the standard privileged port 53 is used, make sure start the DNS with jsvc support. + hadoop.registry.dns.bind-port + 5353 + + + + The DNS functionality is enabled for the cluster. Default is false. + hadoop.registry.dns.enabled + true + + + + Address associated with the network interface to which the DNS listener should bind. + hadoop.registry.dns.bind-address + localhost + + + + A comma separated list of hostname:port pairs defining the zookeeper quorum for the YARN registry + hadoop.registry.zk.quorum + localhost:2181 + +``` +To configure Registry DNS to serve reverse lookup for `172.17.0.0/24` +``` + + The network mask associated with the zone IP range. If specified, it is utilized to ascertain the + IP range possible and come up with an appropriate reverse zone name. + hadoop.registry.dns.zone-mask + 255.255.255.0 + + + + An indicator of the IP range associated with the cluster containers. The setting is utilized for the + generation of the reverse zone name. + hadoop.registry.dns.zone-subnet + 172.17.0.0 + +``` +## Start the DNS Server +By default, the DNS server runs on non-privileged port `5353`. Start the server +with: +``` +yarn --daemon start registrydns +``` + +If the DNS server is configured to use the standard privileged port `53`, the +environment variables `YARN_REGISTRYDNS_SECURE_USER` and +`YARN_REGISTRYDNS_SECURE_EXTRA_OPTS` must be uncommented in the `yarn-env.sh` +file. The DNS server should then be launched as `root` and jsvc will be used to +reduce the privileges of the daemon after the port has been bound. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md new file mode 100644 index 0000000..7a2490e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/YarnServiceAPI.md @@ -0,0 +1,598 @@ + + +# YARN Service API + +Bringing a new service on YARN today is not a simple experience. The APIs of existing +frameworks are either too low level (native YARN), require writing new code (for frameworks with programmatic APIs) +or writing a complex spec (for declarative frameworks). + +This simplified REST API can be used to create and manage the lifecycle of YARN services. +In most cases, the application owner will not be forced to make any changes to their applications. +This is primarily true if the application is packaged with containerization technologies like Docker. + +This document describes the API specifications (aka. YarnFile) for deploying/managing +containerized services on YARN. The same JSON spec can be used for both REST API +and CLI to manage the services. + + +### Version information +Version: 1.0.0 + +### License information +License: Apache 2.0 +License URL: http://www.apache.org/licenses/LICENSE-2.0.html + +### URI scheme +Host: host.mycompany.com + +Port: 8088(default RM port) + +Schemes: HTTP + +### Consumes + +* application/json + + +### Produces + +* application/json + + +## Paths +### Create a service +``` +POST /ws/v1/services +``` + +#### Description + +Create a service. The request JSON is a service object with details required for creation. If the request is successful it returns 202 Accepted. A success of this API only confirms success in submission of the service creation request. There is no guarantee that the service will actually reach a RUNNING state. Resource availability and several other factors determines if the service will be deployed in the cluster. It is expected that clients would subsequently call the GET API to get details of the service and determine its state. + +#### Parameters +|Type|Name|Description|Required|Schema|Default| +|----|----|----|----|----|----| +|BodyParameter|Service|Service request object|true|Service|| + + +#### Responses +|HTTP Code|Description|Schema| +|----|----|----| +|202|The request to create a service is accepted|No Content| +|400|Invalid service definition provided in the request body|No Content| +|500|Failed to create a service|No Content| +|default|Unexpected error|ServiceStatus| + + +### (TBD) List of services running in the cluster. +``` +GET /ws/v1/services +``` + +#### Description + +Get a list of all currently running services (response includes a minimal projection of the service info). For more details do a GET on a specific service name. + +#### Responses +|HTTP Code|Description|Schema| +|----|----|----| +|200|An array of services|Service array| +|default|Unexpected error|ServiceStatus| + + +### Get current version of the API server. +``` +GET /ws/v1/services/version +``` + +#### Description + +Get current version of the API server. + +#### Responses +|HTTP Code|Description|Schema| +|----|----|----| +|200|Successful request|No Content| + + +### Update a service or upgrade the binary version of the components of a running service +``` +PUT /ws/v1/services/{service_name} +``` + +#### Description + +Update the runtime properties of a service. Currently the following operations are supported - update lifetime, stop/start a service. The PUT operation is also used to orchestrate an upgrade of the service containers to a newer version of their artifacts (TBD). + +#### Parameters +|Type|Name|Description|Required|Schema|Default| +|----|----|----|----|----|----| +|PathParameter|service_name|Service name|true|string|| +|BodyParameter|Service|The updated service definition. It can contain the updated lifetime of a service or the desired state (STOPPED/STARTED) of a service to initiate a start/stop operation against the specified service|true|Service|| + + +#### Responses +|HTTP Code|Description|Schema| +|----|----|----| +|204|Update or upgrade was successful|No Content| +|404|Service does not exist|No Content| +|default|Unexpected error|ServiceStatus| + + +### Destroy a service +``` +DELETE /ws/v1/services/{service_name} +``` + +#### Description + +Destroy a service and release all resources. This API might have to return JSON data providing location of logs (TBD), etc. + +#### Parameters +|Type|Name|Description|Required|Schema|Default| +|----|----|----|----|----|----| +|PathParameter|service_name|Service name|true|string|| + + +#### Responses +|HTTP Code|Description|Schema| +|----|----|----| +|204|Destroy was successful|No Content| +|404|Service does not exist|No Content| +|default|Unexpected error|ServiceStatus| + + +### Get details of a service. +``` +GET /ws/v1/services/{service_name} +``` + +#### Description + +Return the details (including containers) of a running service + +#### Parameters +|Type|Name|Description|Required|Schema|Default| +|----|----|----|----|----|----| +|PathParameter|service_name|Service name|true|string|| + + +#### Responses +|HTTP Code|Description|Schema| +|----|----|----| +|200|a service object|object| +|404|Service does not exist|No Content| +|default|Unexpected error|ServiceStatus| + + +### Flex a component's number of instances. +``` +PUT /ws/v1/services/{service_name}/components/{component_name} +``` + +#### Description + +Set a component's desired number of instanes + +#### Parameters +|Type|Name|Description|Required|Schema|Default| +|----|----|----|----|----|----| +|PathParameter|service_name|Service name|true|string|| +|PathParameter|component_name|Component name|true|string|| +|BodyParameter|Component|The definition of a component which contains the updated number of instances.|true|Component|| + + +#### Responses +|HTTP Code|Description|Schema| +|----|----|----| +|200|Flex was successful|No Content| +|404|Service does not exist|No Content| +|default|Unexpected error|ServiceStatus| + +## Definitions +### Artifact + +Artifact of a service component. If not specified, component will just run the bare launch command and no artifact will be localized. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|id|Artifact id. Examples are package location uri for tarball based services, image name for docker, name of service, etc.|true|string|| +|type|Artifact type, like docker, tarball, etc. (optional). For TARBALL type, the specified tarball will be localized to the container local working directory under a folder named lib. For SERVICE type, the service specified will be read and its components will be added into this service. The original component with artifact type SERVICE will be removed (any properties specified in the original component will be ignored).|false|enum (DOCKER, TARBALL, SERVICE)|DOCKER| +|uri|Artifact location to support multiple artifact stores (optional).|false|string|| + + +### Component + +One or more components of the service. If the service is HBase say, then the component can be a simple role like master or regionserver. If the service is a complex business webapp then a component can be other services say Kafka or Storm. Thereby it opens up the support for complex and nested services. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|name|Name of the service component (mandatory). If Registry DNS is enabled, the max length is 63 characters. If unique component support is enabled, the max length is lowered to 44 characters.|true|string|| +|state|The state of the component|false|ComponentState|| +|dependencies|An array of service components which should be in READY state (as defined by readiness check), before this component can be started. The dependencies across all components of a service should be represented as a DAG.|false|string array|| +|readiness_check|Readiness check for this component.|false|ReadinessCheck|| +|artifact|Artifact of the component (optional). If not specified, the service level global artifact takes effect.|false|Artifact|| +|launch_command|The custom launch command of this component (optional for DOCKER component, required otherwise). When specified at the component level, it overrides the value specified at the global level (if any).|false|string|| +|resource|Resource of this component (optional). If not specified, the service level global resource takes effect.|false|Resource|| +|number_of_containers|Number of containers for this component (optional). If not specified, the service level global number_of_containers takes effect.|false|integer (int64)|| +|run_privileged_container|Run all containers of this component in privileged mode (YARN-4262).|false|boolean|| +|placement_policy|Advanced scheduling and placement policies for all containers of this component (optional). If not specified, the service level placement_policy takes effect. Refer to the description at the global level for more details.|false|PlacementPolicy|| +|configuration|Config properties for this component.|false|Configuration|| +|quicklinks|A list of quicklink keys defined at the service level, and to be resolved by this component.|false|string array|| + + +### ComponentState + +The state of the component + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|state|enum of the state of the component|false|enum (FLEXING, STABLE)|| + + +### ConfigFile + +A config file that needs to be created and made available as a volume in a service component container. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|type|Config file in the standard format like xml, properties, json, yaml, template.|false|enum (XML, PROPERTIES, JSON, YAML, TEMPLATE, ENV, HADOOP_XML)|| +|dest_file|The path that this configuration file should be created as. If it is an absolute path, it will be mounted into the DOCKER container. Absolute paths are only allowed for DOCKER containers. If it is a relative path, only the file name should be provided, and the file will be created in the container local working directory under a folder named conf.|false|string|| +|src_file|This provides the source location of the configuration file, the content of which is dumped to dest_file post property substitutions, in the format as specified in type. Typically the src_file would point to a source controlled network accessible file maintained by tools like puppet, chef, or hdfs etc. Currently, only hdfs is supported.|false|string|| +|props|A blob of key value pairs that will be dumped in the dest_file in the format as specified in type. If src_file is specified, src_file content are dumped in the dest_file and these properties will overwrite, if any, existing properties in src_file or be added as new properties in src_file.|false|object|| + + +### Configuration + +Set of configuration properties that can be injected into the service components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|properties|A blob of key-value pairs of common service properties.|false|object|| +|env|A blob of key-value pairs which will be appended to the default system properties and handed off to the service at start time. All placeholder references to properties will be substituted before injection.|false|object|| +|files|Array of list of files that needs to be created and made available as volumes in the service component containers.|false|ConfigFile array|| + + +### Container + +An instance of a running service container. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|id|Unique container id of a running service, e.g. container_e3751_1458061340047_0008_01_000002.|false|string|| +|launch_time|The time when the container was created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from cluster launch time.|false|string (date)|| +|ip|IP address of a running container, e.g. 172.31.42.141. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.|false|string|| +|hostname|Fully qualified hostname of a running container, e.g. ctr-e3751-1458061340047-0008-01-000002.examplestg.site. The IP address and hostname attribute values are dependent on the cluster/docker network setup as per YARN-4007.|false|string|| +|bare_host|The bare node or host in which the container is running, e.g. cn008.example.com.|false|string|| +|state|State of the container of a service.|false|ContainerState|| +|component_instance_name|Name of the component instance that this container instance belongs to. Component instance name is named as $COMPONENT_NAME-i, where i is a monotonically increasing integer. E.g. A componet called nginx can have multiple component instances named as nginx-0, nginx-1 etc. Each component instance is backed by a container instance.|false|string|| +|resource|Resource used for this container.|false|Resource|| +|artifact|Artifact used for this container.|false|Artifact|| +|privileged_container|Container running in privileged mode or not.|false|boolean|| + + +### ContainerState + +The current state of the container of a service. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|state|enum of the state of the container|false|enum (INIT, STARTED, READY)|| + + +### PlacementPolicy + +Placement policy of an instance of a service. This feature is in the works in YARN-6592. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|label|Assigns a service to a named partition of the cluster where the service desires to run (optional). If not specified all services are submitted to a default label of the service owner. One or more labels can be setup for each service owner account with required constraints like no-preemption, sla-99999, preemption-ok, etc.|false|string|| + + +### ReadinessCheck + +A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every service is different. Hence the need for a simple interface, with scope to support advanced usecases. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|type|E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).|true|enum (HTTP, PORT)|| +|props|A blob of key value pairs that will be used to configure the check.|false|object|| +|artifact|Artifact of the pluggable readiness check helper container (optional). If specified, this helper container typically hosts the http uri and encapsulates the complex scripts required to perform actual container readiness check. At the end it is expected to respond a 204 No content just like the simplified use case. This pluggable framework benefits service owners who can run services without any packaging modifications. Note, artifacts of type docker only is supported for now. NOT IMPLEMENTED YET|false|Artifact|| + + +### Resource + +Resource determines the amount of resources (vcores, memory, network, etc.) usable by a container. This field determines the resource to be applied for all the containers of a component or service. The resource specified at the service (or global) level can be overriden at the component level. Only one of profile OR cpu & memory are expected. It raises a validation exception otherwise. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|profile|Each resource profile has a unique id which is associated with a cluster-level predefined memory, cpus, etc.|false|string|| +|cpus|Amount of vcores allocated to each container (optional but overrides cpus in profile if specified).|false|integer (int32)|| +|memory|Amount of memory allocated to each container (optional but overrides memory in profile if specified). Currently accepts only an integer value and default unit is in MB.|false|string|| + + +### Service + +a service resource has the following attributes. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|name|A unique service name. If Registry DNS is enabled, the max length is 63 characters.|true|string|| +|id|A unique service id.|false|string|| +|artifact|The default artifact for all components of the service except the components which has Artifact type set to SERVICE (optional).|false|Artifact|| +|resource|The default resource for all components of the service (optional).|false|Resource|| +|launch_time|The time when the service was created, e.g. 2016-03-16T01:01:49.000Z.|false|string (date)|| +|number_of_running_containers|In get response this provides the total number of running containers for this service (across all components) at the time of request. Note, a subsequent request can return a different number as and when more containers get allocated until it reaches the total number of containers or if a flex request has been made between the two requests.|false|integer (int64)|| +|lifetime|Life time (in seconds) of the service from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.|false|integer (int64)|| +|placement_policy|(TBD) Advanced scheduling and placement policies. If not specified, it defaults to the default placement policy of the service owner. The design of placement policies are in the works. It is not very clear at this point, how policies in conjunction with labels be exposed to service owners. This is a placeholder for now. The advanced structure of this attribute will be determined by YARN-4902.|false|PlacementPolicy|| +|components|Components of a service.|false|Component array|| +|configuration|Config properties of a service. Configurations provided at the service/global level are available to all the components. Specific properties can be overridden at the component level.|false|Configuration|| +|state|State of the service. Specifying a value for this attribute for the PUT payload means update the service to this desired state.|false|ServiceState|| +|quicklinks|A blob of key-value pairs of quicklinks to be exported for a service.|false|object|| +|queue|The YARN queue that this service should be submitted to.|false|string|| + + +### ServiceState + +The current state of a service. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|state|enum of the state of the service|false|enum (ACCEPTED, STARTED, READY, STOPPED, FAILED)|| + + +### ServiceStatus + +The current status of a submitted service, returned as a response to the GET API. + +|Name|Description|Required|Schema|Default| +|----|----|----|----|----| +|diagnostics|Diagnostic information (if any) for the reason of the current state of the service. It typically has a non-null value, if the service is in a non-running state.|false|string|| +|state|Service state.|false|ServiceState|| +|code|An error code specific to a scenario which service owners should be able to use to understand the failure in addition to the diagnostic information.|false|integer (int32)|| + + + +## Examples + +### Create a simple single-component service with most attribute values as defaults +POST URL - http://localhost:8088/ws/v1/services + +##### POST Request JSON +```json +{ + "name": "hello-world", + "components" : + [ + { + "name": "hello", + "number_of_containers": 1, + "artifact": { + "id": "nginx:latest", + "type": "DOCKER" + }, + "launch_command": "./start_nginx.sh", + "resource": { + "cpus": 1, + "memory": "256" + } + } + ] +} +``` + +##### GET Response JSON +GET URL - http://localhost:8088/ws/v1/services/hello-world + +Note, lifetime value of -1 means unlimited lifetime. + +```json +{ + "name": "hello-world", + "id": "application_1503963985568_0002", + "lifetime": -1, + "components": [ + { + "name": "hello", + "dependencies": [], + "resource": { + "cpus": 1, + "memory": "256" + }, + "configuration": { + "properties": {}, + "env": {}, + "files": [] + }, + "quicklinks": [], + "containers": [ + { + "id": "container_e03_1503963985568_0002_01_000001", + "ip": "10.22.8.143", + "hostname": "myhost.local", + "state": "READY", + "launch_time": 1504051512412, + "bare_host": "10.22.8.143", + "component_name": "hello-0" + }, + { + "id": "container_e03_1503963985568_0002_01_000002", + "ip": "10.22.8.143", + "hostname": "myhost.local", + "state": "READY", + "launch_time": 1504051536450, + "bare_host": "10.22.8.143", + "component_name": "hello-1" + } + ], + "launch_command": "./start_nginx.sh", + "number_of_containers": 1, + "run_privileged_container": false + } + ], + "configuration": { + "properties": {}, + "env": {}, + "files": [] + }, + "quicklinks": {} +} + +``` +### Update to modify the lifetime of a service +PUT URL - http://localhost:8088/ws/v1/services/hello-world + +##### PUT Request JSON + +Note, irrespective of what the current lifetime value is, this update request will set the lifetime of the service to be 3600 seconds (1 hour) from the time the request is submitted. Hence, if a a service has remaining lifetime of 5 mins (say) and would like to extend it to an hour OR if an application has remaining lifetime of 5 hours (say) and would like to reduce it down to an hour, then for both scenarios you need to submit the same request below. + +```json +{ + "lifetime": 3600 +} +``` +### Stop a service +PUT URL - http://localhost:8088/ws/v1/services/hello-world + +##### PUT Request JSON +```json +{ + "state": "STOPPED" +} +``` + +### Start a service +PUT URL - http://localhost:8088/ws/v1/services/hello-world + +##### PUT Request JSON +```json +{ + "state": "STARTED" +} +``` + +### Update to flex up/down the no of containers (instances) of a component of a service +PUT URL - http://localhost:8088/ws/v1/services/hello-world/components/hello + +##### PUT Request JSON +```json +{ + "name": "hello", + "number_of_containers": 3 +} +``` + +### Destroy a service +DELETE URL - http://localhost:8088/ws/v1/services/hello-world + +*** + +### Create a complicated service - HBase +POST URL - http://localhost:8088:/ws/v1/services/hbase-app-1 + +##### POST Request JSON + +```json +{ + "name": "hbase-app-1", + "lifetime": "3600", + "components": [ + { + "name": "hbasemaster", + "number_of_containers": 1, + "artifact": { + "id": "hbase:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/hdp/current/hbase-master/bin/hbase master start", + "resource": { + "cpus": 1, + "memory": "2048" + }, + "configuration": { + "env": { + "HBASE_LOG_DIR": "" + }, + "files": [ + { + "type": "XML", + "dest_file": "/etc/hadoop/conf/core-site.xml", + "props": { + "fs.defaultFS": "${CLUSTER_FS_URI}" + } + }, + { + "type": "XML", + "dest_file": "/etc/hbase/conf/hbase-site.xml", + "props": { + "hbase.cluster.distributed": "true", + "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}", + "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase", + "zookeeper.znode.parent": "${SERVICE_ZK_PATH}", + "hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}", + "hbase.master.info.port": "16010" + } + } + ] + } + }, + { + "name": "regionserver", + "number_of_containers": 3, + "unique_component_support": "true", + "artifact": { + "id": "hbase:latest", + "type": "DOCKER" + }, + "launch_command": "/usr/hdp/current/hbase-regionserver/bin/hbase regionserver start", + "resource": { + "cpus": 1, + "memory": "2048" + }, + "configuration": { + "env": { + "HBASE_LOG_DIR": "" + }, + "files": [ + { + "type": "XML", + "dest_file": "/etc/hadoop/conf/core-site.xml", + "props": { + "fs.defaultFS": "${CLUSTER_FS_URI}" + } + }, + { + "type": "XML", + "dest_file": "/etc/hbase/conf/hbase-site.xml", + "props": { + "hbase.cluster.distributed": "true", + "hbase.zookeeper.quorum": "${CLUSTER_ZK_QUORUM}", + "hbase.rootdir": "${SERVICE_HDFS_DIR}/hbase", + "zookeeper.znode.parent": "${SERVICE_ZK_PATH}", + "hbase.master.hostname": "hbasemaster.${SERVICE_NAME}.${USER}.${DOMAIN}", + "hbase.master.info.port": "16010", + "hbase.regionserver.hostname": "${COMPONENT_INSTANCE_NAME}.${SERVICE_NAME}.${USER}.${DOMAIN}" + } + } + ] + } + } + ], + "quicklinks": { + "HBase Master Status UI": "http://hbasemaster0.${SERVICE_NAME}.${USER}.${DOMAIN}:16010/master-status", + "Proxied HBase Master Status UI": "http://app-proxy/${DOMAIN}/${USER}/${SERVICE_NAME}/hbasemaster/16010/" + } +} +``` diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/dns_overview.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/dns_overview.png new file mode 100644 index 0000000..b9e80b9 Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/dns_overview.png differ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/dns_record_creation.jpeg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/dns_record_creation.jpeg new file mode 100644 index 0000000..63b2599 Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/dns_record_creation.jpeg differ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/dns_record_removal.jpeg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/dns_record_removal.jpeg new file mode 100644 index 0000000..40d870c Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/resources/images/dns_record_removal.jpeg differ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/restabstract.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/restabstract.js new file mode 100644 index 0000000..df409d6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/restabstract.js @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; +import Ember from 'ember'; + +export default DS.RESTAdapter.extend({ + address: null, //Must be set by inheriting classes + restNameSpace: null, //Must be set by inheriting classes + serverName: null, //Must be set by inheriting classes + + headers: { + Accept: 'application/json' + }, + + host: Ember.computed("address", function() { + var address = this.get("address"); + return this.get(`hosts.${address}`); + }), + + namespace: Ember.computed("restNameSpace", function() { + var nameSpace = this.get("restNameSpace"); + return this.get(`env.app.namespaces.${nameSpace}`); + }), + + ajax(url, method, options) { + options = options || {}; + options.crossDomain = true; + options.xhrFields = { + withCredentials: true + }; + options.targetServer = this.get('serverName'); + return this._super(url, method, options); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-component-instance.js new file mode 100644 index 0000000..062a006 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-component-instance.js @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import AbstractAdapter from './abstract'; + +export default AbstractAdapter.extend({ + address: "timelineWebAddress", + restNameSpace: "timelineService", + serverName: "ATS", + + urlForQuery(query/*, modelName*/) { + var url = this.buildURL(); + url += '/' + query.appId + '/entities/COMPONENT_INSTANCE?fields=ALL'; + delete query.appId; + return url; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-component.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-component.js new file mode 100644 index 0000000..c356192 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-component.js @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import AbstractAdapter from './abstract'; + +export default AbstractAdapter.extend({ + address: "timelineWebAddress", + restNameSpace: "timelineService", + serverName: "ATS", + + urlForQuery(query/*, modelName*/) { + var url = this.buildURL(); + url += '/' + query.appId + '/entities/COMPONENT?fields=ALL'; + delete query.appId; + return url; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-info.js new file mode 100644 index 0000000..dec3e50 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-service-info.js @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import AbstractAdapter from './abstract'; + +export default AbstractAdapter.extend({ + address: "timelineWebAddress", + restNameSpace: "timelineService", + serverName: "ATS", + + urlForQueryRecord(query/*, modelName*/) { + var url = this.buildURL(); + url += '/' + query.appId + '/entities/SERVICE_ATTEMPT?fields=ALL'; + delete query.appId; + return url; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js new file mode 100644 index 0000000..dc5dbfd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-servicedef.js @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; +import RESTAbstractAdapter from './restabstract'; + +export default RESTAbstractAdapter.extend({ + address: "dashWebAddress", + restNameSpace: "dashService", + serverName: "DASH", + + deployService(request) { + var url = this.buildURL(); + return this.ajax(url, "POST", {data: request}); + }, + + stopService(serviceName) { + var url = this.buildURL(); + url += "/" + serviceName; + var data = {"state": "STOPPED", "name": serviceName}; + return this.ajax(url, "PUT", {data: data}); + }, + + deleteService(serviceName) { + var url = this.buildURL(); + url += "/" + serviceName; + return this.ajax(url, "DELETE", {data: {}}); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/breadcrumb-bar.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/breadcrumb-bar.js index 44edb8e..b8d974a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/breadcrumb-bar.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/breadcrumb-bar.js @@ -21,6 +21,7 @@ import Ember from 'ember'; export default Ember.Component.extend({ breadcrumbs: null, + hideRefresh: false, actions:{ refresh: function () { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/confirm-dialog.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/confirm-dialog.js new file mode 100644 index 0000000..a6f518b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/confirm-dialog.js @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Component.extend({ + dialogId: "confirmModalDialog", + title: "Confirm", + message: "Are you sure?", + + actions: { + yesConfirmed() { + this.sendAction(); + } + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js new file mode 100644 index 0000000..90e10e5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/deploy-service.js @@ -0,0 +1,167 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Component.extend({ + viewType: 'standard', + savedStandardTemplates: null, + savedJsonTemplates: null, + savedTemplateName: '', + serviceDef: null, + customServiceDef: '', + serviceResp: null, + isLoading: false, + + actions: { + showSaveTemplateModal() { + this.$('#saveListModal').modal('show'); + }, + + deployService() { + this.set('serviceResp', null); + if (this.get('isStandardViewType')) { + this.sendAction("deployServiceDef", this.get('serviceDef')); + } else { + try { + var parsed = JSON.parse(this.get('customServiceDef')); + this.sendAction("deployServiceJson", parsed); + } catch (err) { + this.set('serviceResp', {type: 'error', message: 'Invalid JSON: ' + err.message}); + throw err; + } + } + }, + + updateViewType(type) { + this.set('viewType', type); + }, + + addToSavedList() { + this.unselectAllSavedList(); + if (this.get('isStandardViewType')) { + this.get('savedStandardTemplates').addObject({ + name: this.get('savedTemplateName'), + defId: this.get('serviceDef.id'), + active: true + }); + this.set('serviceDef.isCached', true); + } else { + this.get('savedJsonTemplates').addObject({ + name: this.get('savedTemplateName'), + custom: this.get('customServiceDef'), + active: true + }); + } + this.$('#saveListModal').modal('hide'); + this.set('savedTemplateName', ''); + }, + + updateServiceDef(def) { + this.selectActiveListItem(def); + if (this.get('isStandardViewType')) { + this.set('serviceDef', this.getStore().peekRecord('yarn-servicedef', def.defId)); + } else { + this.set('customServiceDef', def.custom); + } + }, + + clearConfigs() { + this.unselectAllSavedList(); + this.set('serviceResp', null); + if (this.get('isStandardViewType')) { + var oldDef = this.get('serviceDef'); + var def = oldDef.createNewServiceDef(); + this.set('serviceDef', def); + if (!oldDef.get('isCached')) { + oldDef.deleteRecord(); + } + } else { + this.set('customServiceDef', ''); + } + }, + + removeFromSavedList(list) { + if (list.active) { + this.send('clearConfigs'); + } + if (this.get('isStandardViewType')) { + this.get('savedStandardTemplates').removeObject(list); + } else { + this.get('savedJsonTemplates').removeObject(list); + } + }, + + clearServiceResponse() { + this.set('serviceResp', null); + } + }, + + didInsertElement() { + var self = this; + self.$().find('.modal').on('shown.bs.modal', function() { + self.$().find('.modal.in').find('input.form-control:first').focus(); + }); + }, + + selectActiveListItem(item) { + this.unselectAllSavedList(); + Ember.set(item, 'active', true); + }, + + unselectAllSavedList() { + this.get('getSavedList').forEach(function(item) { + Ember.set(item, 'active', false); + }); + }, + + getSavedList: Ember.computed('viewType', function() { + if (this.get('isStandardViewType')) { + return this.get('savedStandardTemplates'); + } else { + return this.get('savedJsonTemplates'); + } + }), + + getStore: function() { + return this.get('serviceDef.store'); + }, + + isStandardViewType: Ember.computed.equal('viewType', 'standard'), + + isCustomViewType: Ember.computed.equal('viewType', 'custom'), + + isValidTemplateName: Ember.computed.notEmpty('savedTemplateName'), + + isValidServiceDef: Ember.computed('serviceDef.name', 'serviceDef.queue', 'serviceDef.serviceComponents.[]', function () { + return this.get('serviceDef').isValidServiceDef(); + }), + + isValidCustomServiceDef: Ember.computed.notEmpty('customServiceDef'), + + enableSaveOrDeployBtn: Ember.computed('isValidServiceDef', 'isValidCustomServiceDef', 'viewType', 'isLoading', function() { + if (this.get('isLoading')) { + return false; + } + if (this.get('isStandardViewType')) { + return this.get('isValidServiceDef'); + } else { + return this.get('isValidCustomServiceDef'); + } + }) +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/fileconfig-viewer-dialog.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/fileconfig-viewer-dialog.js new file mode 100644 index 0000000..d4912768 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/fileconfig-viewer-dialog.js @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Component.extend({ + dialogId: "fileconfig_viewer_dialog", + title: "File Configuration Properties", + props: null, + customProps: Ember.computed('props', function() { + var custom = []; + var props = this.get('props'); + for (var pro in props) { + custom.push({ + name: pro, + value: props[pro] + }); + } + return custom; + }) +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/info-tooltip.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/info-tooltip.js new file mode 100644 index 0000000..605b611 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/info-tooltip.js @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; +import InfoSeeder from 'yarn-ui/utils/info-seeder'; + +export default Ember.Component.extend({ + classNames: ['tooltip', 'info-tooltip'], + elementId: 'info_tooltip_wrapper', + + didInsertElement() { + var $tooltip = Ember.$('#info_tooltip_wrapper'); + Ember.$('body').on('mouseenter', '.info-icon', function() { + var $elem = Ember.$(this); + var info = InfoSeeder[$elem.data('info')]; + var offset = $elem.offset(); + $tooltip.show(); + $tooltip.find("#tooltip_content").text(info); + $tooltip.offset({top: offset.top + 20, left: offset.left - 10}); + }).on('mouseleave', '.info-icon', function() { + $tooltip.find("#tooltip_content").text(''); + $tooltip.hide(); + }); + }, + + WillDestroyElement() { + Ember.$('body').off('hover'); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/metrics-table.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/metrics-table.js new file mode 100644 index 0000000..62b2fc5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/metrics-table.js @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Component.extend({ + metrics: null, + type: '' +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js new file mode 100644 index 0000000..5a9ae30 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Component.extend({ + serviceDef: null, + currentComponent: null, + duplicateNameError: false, + + actions: { + showAddComponentModal() { + var newComp = this.get('serviceDef').createNewServiceComponent(); + this.set('currentComponent', newComp); + this.set('duplicateNameError', false); + this.$('#addComponentModal').modal('show'); + }, + + addNewComponent() { + this.set('duplicateNameError', false); + if (this.isCurrentNameDuplicate()) { + this.set('duplicateNameError', true); + return; + } + this.get('serviceDef.serviceComponents').addObject(this.get('currentComponent')); + this.$('#addComponentModal').modal('hide'); + }, + + removeComponent(component) { + this.get('serviceDef.serviceComponents').removeObject(component); + } + }, + + isCurrentNameDuplicate() { + var currName = this.get('currentComponent.name'); + var item = this.get('serviceDef.serviceComponents').findBy('name', currName); + return !Ember.isNone(item); + }, + + isValidCurrentComponent: Ember.computed.and('currentComponent', 'currentComponent.name', 'currentComponent.cpus', 'currentComponent.memory', 'currentComponent.numOfContainers', 'currentComponent.artifactId', 'currentComponent.launchCommand') +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-config-table.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-config-table.js new file mode 100644 index 0000000..b0a78dd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-config-table.js @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Component.extend({ + serviceDef: null, + currentConfig: null, + serviceConfigJson: '', + + actions: { + showNewConfigurationModal() { + var newConfig = this.get('serviceDef').createNewServiceConfig(); + this.set('currentConfig', newConfig); + this.$('#addConfigurationModal').modal('show'); + if (this.get('isNonEmptyComponents') && this.get('currentConfig.componentName') === '') { + this.set('currentConfig.componentName', this.get('componentNames.firstObject')); + } + }, + + removeConfiguration(config) { + this.get('serviceDef.serviceConfigs').removeObject(config); + }, + + configTypeChanged(type) { + this.set('currentConfig.type', type); + if (type === 'quicklink') { + this.set('currentConfig.scope', 'service'); + this.set('currentConfig.componentName', ''); + } + }, + + addNewConfiguration() { + this.get('serviceDef.serviceConfigs').addObject(this.get('currentConfig')); + this.$('#addConfigurationModal').modal('hide'); + }, + + showServiceConfigUploadModal() { + this.set('serviceConfigJson', ''); + this.$("#service_config_upload_modal").modal('show'); + }, + + uploadServiceConfig(json) { + this.get('serviceDef').convertJsonServiceConfigs(json); + this.$("#service_config_upload_modal").modal('hide'); + }, + + configScopeChanged(scope) { + this.set('currentConfig.scope', scope); + }, + + scopeComponentChanged(name) { + this.set('currentConfig.componentName', name); + } + }, + + isNonEmptyComponents: Ember.computed('serviceDef.serviceComponents.length', function() { + return this.get('serviceDef.serviceComponents.length') > 0; + }), + + isNotQuicklink: Ember.computed('currentConfig.type', function() { + return this.get('currentConfig.type') !== "quicklink"; + }), + + componentNames: Ember.computed('serviceDef.serviceComponents.[]', function() { + var names = []; + this.get('serviceDef.serviceComponents').forEach(function(cmp) { + names.push(cmp.get('name')); + }); + return names; + }), + + isValidCurrentConfig: Ember.computed.and('currentConfig', 'currentConfig.name', 'currentConfig.value') +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-fileconfig-table.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-fileconfig-table.js new file mode 100644 index 0000000..7c06152 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-fileconfig-table.js @@ -0,0 +1,112 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Component.extend({ + serviceDef: null, + currentFileConfig: null, + fileConfigJson: '', + fileConfigProps: '', + propertyViewer: null, + parseError: '', + + actions: { + showNewConfigFileModal() { + var newFile = this.get('serviceDef').createNewFileConfig(); + this.set('currentFileConfig', newFile); + this.set('fileConfigProps', ''); + this.set('parseError', ''); + this.$('#addFileConfigModal').modal('show'); + if (this.get('isNonEmptyComponents') && this.get('currentFileConfig.componentName') === '') { + this.set('currentFileConfig.componentName', this.get('componentNames.firstObject')); + } + }, + + removeFileConfiguration(file) { + this.get('serviceDef.fileConfigs').removeObject(file); + }, + + addNewFileConfig() { + this.set('parseError', ''); + var props = this.get('fileConfigProps'); + if (props) { + try { + var parsed = JSON.parse(props); + this.set('currentFileConfig.props', parsed); + } catch (err) { + this.set('parseError', `Invalid JSON: ${err.message}`); + throw err; + } + } + this.get('serviceDef.fileConfigs').addObject(this.get('currentFileConfig')); + this.$('#addFileConfigModal').modal('hide'); + }, + + showFileConfigUploadModal() { + this.set('fileConfigJson', ''); + this.$("#service_file_config_upload_modal").modal('show'); + }, + + uploadFileConfig(json) { + this.get('serviceDef').convertJsonFileConfigs(json); + this.$("#service_file_config_upload_modal").modal('hide'); + }, + + configScopeChanged(scope) { + this.set('currentFileConfig.scope', scope); + }, + + scopeComponentChanged(name) { + this.set('currentFileConfig.componentName', name); + }, + + configTypeChanged(type) { + this.set('currentFileConfig.type', type); + if (type === "TEMPLATE") { + this.set('currentFileConfig.props', null); + this.set('fileConfigProps', ''); + } + }, + + showFileConfigPropertyViewer(props) { + this.set('propertyViewer', props); + this.$("#file_config_properties_viewer").modal('show'); + } + }, + + isNonEmptyComponents: Ember.computed('serviceDef.serviceComponents.length', function() { + return this.get('serviceDef.serviceComponents.length') > 0; + }), + + componentNames: Ember.computed('serviceDef.serviceComponents.[]', function() { + var names = []; + this.get('serviceDef.serviceComponents').forEach(function(cmp) { + names.push(cmp.get('name')); + }); + return names; + }), + + isValidCurrentFileConfig: Ember.computed('currentFileConfig', 'currentFileConfig.srcFile', 'currentFileConfig.destFile', 'fileConfigProps', function() { + return this.get('currentFileConfig') && this.get('currentFileConfig.destFile') && (this.get('currentFileConfig.srcFile') || this.get('fileConfigProps')); + }), + + isConfigTypeHadoopXml: Ember.computed('currentFileConfig.type', function() { + return this.get('currentFileConfig.type') === 'HADOOP_XML'; + }) +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/upload-config.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/upload-config.js new file mode 100644 index 0000000..2f9dc9c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/upload-config.js @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Component.extend({ + dialogId: "config_upload_modal", + title: "Upload Configuration", + configJson: '', + parseErrorMsg: '', + + actions: { + uploadConfig() { + var json = this.get('configJson'); + try { + JSON.parse(json); + this.upateParseResults(""); + } catch (ex) { + this.upateParseResults("Invalid JSON: " + ex.message); + throw ex; + } + if (!this.get('parseErrorMsg')) { + this.sendAction("uploadConfig", json); + } + } + }, + + didInsertElement() { + this.$('#' + this.get('dialogId')).on('shown.bs.modal', function() { + this.upateParseResults(""); + }.bind(this)); + }, + + isValidConfigJson: Ember.computed.notEmpty('configJson'), + + upateParseResults(message) { + this.set('parseErrorMsg', message); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js new file mode 100644 index 0000000..947cb98 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; +import ColumnDef from 'em-table/utils/column-definition'; + +export default Ember.Controller.extend({ + queryParams: ["service"], + service: undefined, + + tableColumns: Ember.computed('model.appId', 'model.serviceName', function() { + var cols = []; + var service = this.get('model.serviceName'); + var appId = this.get('model.appId'); + + cols.push({ + id: 'name', + headerTitle: 'Component Group', + contentPath: 'name', + cellComponentName: 'em-table-linked-cell', + getCellContent: function(row) { + return { + displayText: row.get('name'), + href: `#/yarn-component-instances/${row.get('name')}/info?service=${service}&&appid=${appId}` + }; + } + }, { + id: 'vcores', + headerTitle: 'VCores', + contentPath: 'vcores' + }, { + id: 'memory', + headerTitle: 'Memory (MB)', + contentPath: 'memory' + }, { + id: 'instances', + headerTitle: '# Components', + contentPath: 'instances', + observePath: true + }, { + id: 'createdDate', + headerTitle: 'Created Time', + contentPath: 'createdDate' + }); + + return ColumnDef.make(cols); + }) +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/configs.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/configs.js new file mode 100644 index 0000000..a6cba9e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/configs.js @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Controller.extend({ + queryParams: ["service"], + service: undefined +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js index f9652f9..3de6687 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/info.js @@ -21,6 +21,66 @@ import Ember from 'ember'; export default Ember.Controller.extend({ queryParams: ["service"], service: undefined, + isLoading: false, + actionResponse: null, + + actions: { + showStopServiceConfirm() { + this.set('actionResponse', null); + Ember.$("#stopServiceConfirmDialog").modal('show'); + }, + + stopService() { + var self = this; + Ember.$("#stopServiceConfirmDialog").modal('hide'); + var adapter = this.store.adapterFor('yarn-servicedef'); + self.set('isLoading', true); + adapter.stopService(this.get('service')).then(function() { + self.set('actionResponse', {msg: 'Service stopped successfully. Auto refreshing in 5 seconds.', type: 'success'}); + Ember.run.later(self, function() { + this.set('actionResponse', null); + this.send("refresh"); + }, 5000); + }, function(errr) { + let messg = errr.diagnostics || 'Error: Stop service failed!'; + self.set('actionResponse', {msg: messg, type: 'error'}); + }).finally(function() { + self.set('isLoading', false); + }); + }, + + showDeleteServiceConfirm() { + this.set('actionResponse', null); + Ember.$("#deleteServiceConfirmDialog").modal('show'); + }, + + deleteService() { + var self = this; + Ember.$("#deleteServiceConfirmDialog").modal('hide'); + var adapter = this.store.adapterFor('yarn-servicedef'); + self.set('isLoading', true); + adapter.deleteService(this.get('service')).then(function() { + self.set('actionResponse', {msg: 'Service deleted successfully. Redirecting to services in 5 seconds.', type: 'success'}); + Ember.run.later(self, function() { + this.set('actionResponse', null); + this.transitionToRoute("yarn-services"); + }, 5000); + }, function(errr) { + let messg = errr.diagnostics || 'Error: Delete service failed!'; + self.set('actionResponse', {msg: messg, type: 'error'}); + }).finally(function() { + self.set('isLoading', false); + }); + }, + + resetActionResponse() { + this.set('actionResponse', null); + } + }, + + isRunningService: Ember.computed('model.serviceName', 'model.app.state', function() { + return this.get('service') !== undefined && this.get('model.app.state') === 'RUNNING'; + }), amHostHttpAddressFormatted: Ember.computed('model.app.amHostHttpAddress', function() { var amHostAddress = this.get('model.app.amHostHttpAddress'); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js new file mode 100644 index 0000000..4b8dbf4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance.js @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Controller.extend({ + componentName: '', + instanceName: '', + serviceName: '', + appId: '', + + breadcrumbs: [{ + text: "Home", + routeName: 'application' + }, { + text: "Services", + routeName: 'yarn-services', + }], + + updateBreadcrumbs(appId, serviceName, componentName, instanceName) { + var crumbs = [{ + text: "Home", + routeName: 'application' + }, { + text: "Services", + routeName: 'yarn-services', + }]; + if (appId && serviceName && componentName && instanceName) { + crumbs.push({ + text: `${serviceName} [${appId}]`, + href: `#/yarn-app/${appId}/info?service=${serviceName}` + }, { + text: 'Components', + href: `#/yarn-app/${appId}/components?service=${serviceName}` + }, { + text: `${componentName}`, + href: `#/yarn-component-instances/${componentName}/info?service=${serviceName}&&appid=${appId}` + }, { + text: `${instanceName}` + }); + } + this.set('breadcrumbs', crumbs); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js new file mode 100644 index 0000000..e3abcb7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Controller.extend({ + queryParams: ["appid", "service"], + appid: undefined, + service: undefined +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances.js new file mode 100644 index 0000000..965631c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances.js @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Controller.extend({ + componentName: '', + serviceName: '', + appId: '', + + breadcrumbs: [{ + text: "Home", + routeName: 'application' + }, { + text: "Services", + routeName: 'yarn-services', + }], + + updateBreadcrumbs(appId, serviceName, componentName, tailCrumbs) { + var crumbs = [{ + text: "Home", + routeName: 'application' + }, { + text: "Services", + routeName: 'yarn-services', + }]; + if (appId && serviceName && componentName) { + crumbs.push({ + text: `${serviceName} [${appId}]`, + href: `#/yarn-app/${appId}/info?service=${serviceName}` + }, { + text: 'Components', + href: `#/yarn-app/${appId}/components?service=${serviceName}` + }, { + text: `${componentName}`, + href: `#/yarn-component-instances/${componentName}/info?service=${serviceName}&&appid=${appId}` + }); + } + if (tailCrumbs) { + crumbs.pushObjects(tailCrumbs); + } + this.set('breadcrumbs', crumbs); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/configs.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/configs.js new file mode 100644 index 0000000..dac6498 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/configs.js @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Controller.extend({ + queryParams: ["service", "appid"], + appid: undefined, + service: undefined +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js new file mode 100644 index 0000000..a676b34 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; +import ColumnDef from 'em-table/utils/column-definition'; + +export default Ember.Controller.extend({ + queryParams: ["service", "appid"], + appid: undefined, + service: undefined, + + tableColumns: Ember.computed('model.appId', 'model.serviceName', function() { + var cols = []; + var appId = this.get('model.appId'); + var serviceName = this.get('model.serviceName'); + + cols.push({ + id: 'instanceName', + headerTitle: 'Component Name', + contentPath: 'instanceName', + cellComponentName: 'em-table-linked-cell', + getCellContent: function(row) { + var component = row.get('component'); + var instance = row.get('instanceName'); + return { + text: instance, + href: `#/yarn-component-instance/${component}/instances/${instance}/info?appid=${appId}&&service=${serviceName}` + }; + } + }, { + id: 'containerId', + headerTitle: 'Current Container Id', + contentPath: 'containerId', + minWidth: '350px' + }, { + id: 'state', + headerTitle: 'State', + contentPath: 'state' + }, { + id: 'startedDate', + headerTitle: 'Started Time', + contentPath: 'startedDate' + }); + + return ColumnDef.make(cols); + }) +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js new file mode 100644 index 0000000..25d575f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-deploy-service.js @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Controller.extend({ + breadcrumbs: [{ + text: "Home", + routeName: 'application' + }, { + text: "Services", + routeName: 'yarn-services', + }, { + text: "New Service", + routeName: 'yarn-deploy-service', + }], + + savedStandardTemplates: [], + savedJsonTemplates: [], + serviceResponse: null, + isLoading: false, + + actions: { + deployServiceDef(serviceDef) { + var defjson = serviceDef.getServiceJSON(); + this.deployServiceApp(defjson); + }, + + deployServiceJson(json) { + this.deployServiceApp(json); + } + }, + + gotoServices() { + Ember.run.later(this, function() { + this.set('serviceResponse', null); + this.transitionToRoute('yarn-services'); + }, 1000); + }, + + deployServiceApp(requestJson) { + var self = this; + var adapter = this.store.adapterFor('yarn-servicedef'); + this.set('isLoading', true); + adapter.deployService(requestJson).then(function() { + self.set('serviceResponse', {message: 'Service has been accepted successfully. Redirecting to services in a second.', type: 'success'}); + self.gotoServices(); + }, function(errmsg) { + self.set('serviceResponse', {message: errmsg, type: 'error'}); + }).finally(function() { + self.set('isLoading', false); + }); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/check-availability.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/check-availability.js new file mode 100644 index 0000000..4470d65 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/helpers/check-availability.js @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export function checkAvailability(params/*, hash*/) { + if (params[0] !== undefined && params[0] !== null && params[0] !== '') { + return params[0]; + } + return 'N/A'; +} + +export default Ember.Helper.helper(checkAvailability); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-component-instance.js new file mode 100644 index 0000000..532fc55 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-component-instance.js @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; +import Ember from 'ember'; +import Converter from 'yarn-ui/utils/converter'; + +export default DS.Model.extend({ + containerId: DS.attr('string'), + component: DS.attr('string'), + instanceName: DS.attr('string'), + state: DS.attr('number'), + createdTimestamp: DS.attr('number'), + startedTimestamp: DS.attr('number'), + host: DS.attr('string'), + node: DS.attr('string'), + hostUrl: DS.attr('string'), + ipAddr: DS.attr('string'), + exitStatusCode: DS.attr('string'), + + createdDate: Ember.computed('createdTimestamp', function() { + var timestamp = this.get('createdTimestamp'); + if (timestamp > 0) { + return Converter.timeStampToDate(timestamp); + } + return 'N/A'; + }), + + startedDate: Ember.computed('startedTimestamp', function() { + var timestamp = this.get('startedTimestamp'); + if (timestamp > 0) { + return Converter.timeStampToDate(timestamp); + } + return 'N/A'; + }) +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-component.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-component.js new file mode 100644 index 0000000..9e06419 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-component.js @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; +import Ember from 'ember'; +import Converter from 'yarn-ui/utils/converter'; + +export default DS.Model.extend({ + name: DS.attr('string'), + vcores: DS.attr('string'), + memory: DS.attr('string'), + priority: DS.attr('string'), + instances: DS.attr('string'), + createdTimestamp: DS.attr('number'), + + configs: DS.attr({defaultValue: function() { + return Ember.A(); + }}), + + metrics: DS.attr({defaultValue: function() { + return Ember.Object.create(); + }}), + + createdDate: Ember.computed('createdTimestamp', function() { + var timestamp = this.get('createdTimestamp'); + if (timestamp > 0) { + return Converter.timeStampToDate(timestamp); + } + return 'N/A'; + }) +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-info.js new file mode 100644 index 0000000..7b961e8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-service-info.js @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; +import Ember from 'ember'; +import Converter from 'yarn-ui/utils/converter'; + +export default DS.Model.extend({ + name: DS.attr('string'), + appId: DS.attr('string'), + state: DS.attr('string'), + createdTimestamp: DS.attr('number'), + launchTimestamp: DS.attr('number'), + + quicklinks: DS.attr({defaultValue: function() { + return Ember.A(); + }}), + + configs: DS.attr({defaultValue: function() { + return Ember.A(); + }}), + + metrics: DS.attr({defaultValue: function() { + return Ember.Object.create(); + }}), + + createdDate: Ember.computed('createdTimestamp', function() { + var timestamp = this.get('createdTimestamp'); + if (timestamp > 0) { + return Converter.timeStampToDate(timestamp); + } + return 'N/A'; + }), + + launchDate: Ember.computed('launchTimestamp', function() { + var timestamp = this.get('launchTimestamp'); + if (timestamp > 0) { + return Converter.timeStampToDate(timestamp); + } + return 'N/A'; + }) +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js new file mode 100644 index 0000000..0439fb4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js @@ -0,0 +1,278 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; +import Ember from 'ember'; + +export default DS.Model.extend({ + name: DS.attr('string', {defaultValue: ''}), + queue: DS.attr('string', {defaultValue: ''}), + lifetime: DS.attr('string', {defaultValue: ''}), + isCached: DS.attr('boolean', {defaultValue: false}), + + serviceComponents: DS.attr({defaultValue: function() { + return Ember.A(); + }}), + + serviceConfigs: DS.attr({defaultValue: function() { + return Ember.A(); + }}), + + fileConfigs: DS.attr({defaultValue: function() { + return Ember.A(); + }}), + + quicklinks: DS.attr({defaultValue: function() { + return {}; + }}), + + clear() { + this.set('name', ''); + this.set('queue', ''); + this.set('lifetime', ''); + this.get('serviceComponents').clear(); + this.get('serviceConfigs').clear(); + this.get('fileConfigs').clear(); + this.set('quicklinks', {}); + }, + + isValidServiceDef() { + return this.get('name') !== '' && this.get('queue') !== '' && this.get('serviceComponents.length') > 0; + }, + + createNewServiceComponent() { + return Ember.Object.create({ + name: '', + numOfContainers: '', + cpus: '', + memory: '', + artifactId: '', + artifactType: 'DOCKER', + launchCommand: '', + dependencies: [], + uniqueComponentSupport: false, + configuration: null + }); + }, + + createNewServiceConfig(name, value) { + var Config = Ember.Object.extend({ + name: name || '', + value: value || '', + type: 'property', // property OR env OR quicklink + scope: 'service', // service OR component + componentName: '', + capitalizedType: Ember.computed('type', function() { + return Ember.String.capitalize(this.get('type')); + }), + formattedScope: Ember.computed('scope', 'componentName', function() { + if (this.get('scope') !== 'service') { + return this.get('componentName') + ' [Component]'; + } + return Ember.String.capitalize(this.get('scope')); + }) + }); + return Config.create(); + }, + + createNewFileConfig(src, dest) { + var FileConfig = Ember.Object.extend({ + type: 'TEMPLATE', // HADOOP_XML OR TEMPLATE + srcFile: src || '', + destFile: dest || '', + scope: 'service', // service OR component + componentName: '', + props: null, + formattedScope: Ember.computed('scope', 'componentName', function() { + if (this.get('scope') !== 'service') { + return this.get('componentName') + ' [Component]'; + } + return Ember.String.capitalize(this.get('scope')); + }) + }); + return FileConfig.create(); + }, + + getServiceJSON() { + return this.serializeServiceDef(); + }, + + serializeServiceDef() { + var json = { + name: "", + queue: "", + lifetime: "-1", + components: [], + configuration: { + properties: {}, + env: {}, + files: [] + }, + quicklinks: {} + }; + + var components = this.get('serviceComponents'); + var configs = this.get('serviceConfigs'); + var fileConfigs = this.get('fileConfigs'); + + json['name'] = this.get('name'); + json['queue'] = this.get('queue'); + + if (this.get('lifetime')) { + json['lifetime'] = this.get('lifetime'); + } + + components.forEach(function(component) { + json.components.push(this.serializeComponent(component)); + }.bind(this)); + + configs.forEach(function(config) { + let conf = this.serializeConfiguration(config); + if (conf.scope === "service") { + if (conf.type === "property") { + json.configuration.properties[conf.name] = conf.value; + } else if (conf.type === "env") { + json.configuration.env[conf.name] = conf.value; + } else if (conf.type === "quicklink") { + json.quicklinks[conf.name] = conf.value; + } + } else if (conf.scope === "component") { + let requiredCmp = json.components.findBy('name', conf.componentName); + if (requiredCmp) { + requiredCmp.configuration = requiredCmp.configuration || {}; + requiredCmp.configuration.properties = requiredCmp.configuration.properties || {}; + requiredCmp.configuration.env = requiredCmp.configuration.env || {}; + if (conf.type === "property") { + requiredCmp.configuration.properties[conf.name] = conf.value; + } else if (conf.type === "env") { + requiredCmp.configuration.env[conf.name] = conf.value; + } + } + } + }.bind(this)); + + fileConfigs.forEach(function(file) { + let scope = file.get('scope'); + if (scope === "service") { + json.configuration.files.push(this.serializeFileConfig(file)); + } else if (scope === "component") { + let requiredCmp = json.components.findBy('name', file.get('componentName')); + if (requiredCmp) { + requiredCmp.configuration = requiredCmp.configuration || {}; + requiredCmp.configuration.files = requiredCmp.configuration.files || []; + requiredCmp.configuration.files.push(this.serializeFileConfig(file)); + } + } + }.bind(this)); + + return json; + }, + + serializeComponent(record) { + var json = {}; + json['name'] = record.get('name'); + json['number_of_containers'] = record.get('numOfContainers'); + json['launch_command'] = record.get('launchCommand'); + json['dependencies'] = []; + json['artifact'] = { + id: record.get('artifactId'), + type: record.get('artifactType') + }; + json['resource'] = { + cpus: record.get('cpus'), + memory: record.get('memory') + }; + if (record.get('uniqueComponentSupport')) { + json['unique_component_support'] = "true"; + } + if (record.get('configuration')) { + json['configuration'] = record.get('configuration'); + } + return json; + }, + + serializeConfiguration(config) { + var json = {}; + json["type"] = config.get('type'); + json["scope"] = config.get('scope'); + json["componentName"] = config.get('componentName'); + json["name"] = config.get('name'); + json["value"] = config.get('value'); + return json; + }, + + serializeFileConfig(file) { + var json = {}; + json["type"] = file.get('type'); + json["dest_file"] = file.get('destFile'); + json["src_file"] = file.get('srcFile'); + if (file.get('type') === "HADOOP_XML" && file.get('props')) { + json["props"] = file.get('props'); + } + return json; + }, + + createNewServiceDef() { + return this.get('store').createRecord('yarn-servicedef', { + id: 'yarn_servicedef_' + Date.now() + }); + }, + + convertJsonServiceConfigs(json) { + var parsedJson = JSON.parse(json); + if (parsedJson.properties) { + for (let prop in parsedJson.properties) { + if (parsedJson.properties.hasOwnProperty(prop)) { + let newPropObj = this.createNewServiceConfig(prop, parsedJson.properties[prop]); + this.get('serviceConfigs').addObject(newPropObj); + } + } + } + if (parsedJson.env) { + for (let envprop in parsedJson.env) { + if (parsedJson.env.hasOwnProperty(envprop)) { + let newEnvObj = this.createNewServiceConfig(envprop, parsedJson.env[envprop]); + newEnvObj.set('type', 'env'); + this.get('serviceConfigs').addObject(newEnvObj); + } + } + } + }, + + convertJsonFileConfigs(json) { + var parsedJson = JSON.parse(json); + if (parsedJson.files) { + parsedJson.files.forEach(function(file) { + let newFileObj = this.createNewFileConfig(file.src_file, file.dest_file); + this.get('fileConfigs').addObject(newFileObj); + }.bind(this)); + } + }, + + cloneServiceDef() { + var clone = this.createNewServiceDef(); + clone.set('name', this.get('name')); + clone.set('queue', this.get('queue')); + clone.set('lifetime', this.get('lifetime')); + clone.get('serviceComponents', this.get('serviceComponents')); + clone.get('serviceConfigs', this.get('serviceConfigs')); + clone.get('fileConfigs', this.get('fileConfigs')); + clone.set('quicklinks', this.get('quicklinks')); + return clone; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js index 9013142..c7b3d6a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/router.js @@ -48,11 +48,21 @@ Router.map(function() { this.route('yarn-container-log', { path: '/yarn-container-log/:node_id/:node_addr/:container_id/:filename' }); + this.route('yarn-deploy-service'); this.route('cluster-overview'); this.route('yarn-app', function() { this.route('info', {path: '/:app_id/info'}); this.route('attempts', {path: '/:app_id/attempts'}); + this.route('components', {path: '/:app_id/components'}); this.route('charts', {path: '/:app_id/charts'}); + this.route('configs', {path: '/:app_id/configs'}); + }); + this.route('yarn-component-instances', function() { + this.route('info', {path: '/:component_name/info'}); + this.route('configs', {path: '/:component_name/configs'}); + }); + this.route('yarn-component-instance', function() { + this.route('info', {path: '/:component_name/instances/:instance_name/info'}); }); this.route('yarn-app-attempt', { path: '/yarn-app-attempt/:app_attempt_id'}); this.route('error'); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/components.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/components.js new file mode 100644 index 0000000..8f6f40f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/components.js @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; +import AbstractRoute from '../abstract'; + +export default AbstractRoute.extend({ + model(param, transition) { + transition.send('updateBreadcrumbs', param.app_id, param.service, [{text: 'Components'}]); + return Ember.RSVP.hash({ + appId: param.app_id, + serviceName: param.service, + components: this.store.query('yarn-service-component', {appId: param.app_id, type: 'COMPONENT'}).catch(function() { + return []; + }), + instances: this.store.query('yarn-component-instance', {appId: param.app_id}).catch(function() { + return []; + }) + }); + }, + + afterModel(model) { + let instances = model.instances; + model.components.forEach(function(component) { + var num = instances.filterBy('component', component.get('name')).length; + component.set('instances', num); + }); + }, + + unloadAll() { + this.store.unloadAll('yarn-service-component'); + this.store.unloadAll('yarn-component-instance'); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/configs.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/configs.js new file mode 100644 index 0000000..7502481 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/configs.js @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; +import AbstractRoute from '../abstract'; + +export default AbstractRoute.extend({ + model(param, transition) { + transition.send('updateBreadcrumbs', param.app_id, param.service, [{text: "Configurations & Metrics"}]); + return Ember.RSVP.hash({ + appId: param.app_id, + serviceName: param.service, + + configs: this.store.queryRecord('yarn-service-info', {appId: param.app_id}).then(function(info) { + if (info && info.get('configs')) { + return info.get('configs'); + } + return []; + }, function() { + return []; + }), + + metrics: this.store.queryRecord('yarn-service-info', {appId: param.app_id}).then(function(info) { + if (info && info.get('metrics')) { + return info.get('metrics'); + } + return null; + }, function() { + return null; + }) + }); + }, + + unloadAll() { + this.store.unloadAll('yarn-service-info'); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js index 4a4b19e..7585476 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-app/info.js @@ -26,12 +26,22 @@ export default AbstractRoute.extend(AppAttemptMixin, { return Ember.RSVP.hash({ appId: param.app_id, serviceName: param.service, - app: this.fetchAppInfoFromRMorATS(param.app_id, this.store) + app: this.fetchAppInfoFromRMorATS(param.app_id, this.store), + + quicklinks: this.store.queryRecord('yarn-service-info', {appId: param.app_id}).then(function(info) { + if (info && info.get('quicklinks')) { + return info.get('quicklinks'); + } + return []; + }, function() { + return []; + }) }); }, unloadAll() { this.store.unloadAll('yarn-app'); this.store.unloadAll('yarn-app-timeline'); + this.store.unloadAll('yarn-service-info'); } }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance.js new file mode 100644 index 0000000..681eed5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance.js @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import AbstractRoute from './abstract'; + +export default AbstractRoute.extend({ + actions: { + updateBreadcrumbs(appId, serviceName, componentName, instanceName) { + var controller = this.controllerFor('yarn-component-instance'); + controller.setProperties({appId: appId, serviceName: serviceName, componentName: componentName, instanceName: instanceName}); + controller.updateBreadcrumbs(appId, serviceName, componentName, instanceName); + } + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js new file mode 100644 index 0000000..3753c75 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instance/info.js @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; +import AbstractRoute from '../abstract'; + +export default AbstractRoute.extend({ + model(params, transition) { + var instanceName = params.instance_name; + transition.send('updateBreadcrumbs', params.appid, params.service, params.component_name, instanceName); + return Ember.RSVP.hash({ + appId: params.appid, + serviceName: params.service, + componentName: params.component_name, + instanceName: instanceName, + container: this.store.query('yarn-component-instance', {appId: params.appid}).then(function(instances) { + if (instances && instances.findBy('instanceName', instanceName)) { + return instances.findBy('instanceName', instanceName); + } + return null; + }, function() { + return null; + }), + }); + }, + + unloadAll() { + this.store.unloadAll('yarn-component-instance'); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances.js new file mode 100644 index 0000000..0190911 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances.js @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import AbstractRoute from './abstract'; + +export default AbstractRoute.extend({ + actions: { + updateBreadcrumbs(appId, serviceName, componentName, tailCrumbs) { + var controller = this.controllerFor('yarn-component-instances'); + controller.setProperties({appId: appId, componentName: componentName, serviceName: serviceName}); + controller.updateBreadcrumbs(appId, serviceName, componentName, tailCrumbs); + } + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/configs.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/configs.js new file mode 100644 index 0000000..a2540fe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/configs.js @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; +import AbstractRoute from '../abstract'; + +export default AbstractRoute.extend({ + model(params, transition) { + var componentName = params.component_name; + transition.send('updateBreadcrumbs', params.appid, params.service, componentName, [{text: 'Configurations'}]); + return Ember.RSVP.hash({ + appId: params.appid, + serviceName: params.service, + componentName: componentName, + configs: this.store.query('yarn-service-component', {appId: params.appid}).then(function(components) { + if (components && components.findBy('name', componentName)) { + return components.findBy('name', componentName).get('configs'); + } + return []; + }, function() { + return []; + }) + }); + }, + + unloadAll() { + this.store.unloadAll('yarn-service-component'); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/info.js new file mode 100644 index 0000000..83fd420 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-component-instances/info.js @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; +import AbstractRoute from '../abstract'; + +export default AbstractRoute.extend({ + model(params, transition) { + var componentName = params.component_name; + transition.send('updateBreadcrumbs', params.appid, params.service, componentName); + return Ember.RSVP.hash({ + appId: params.appid, + serviceName: params.service, + componentName: componentName, + instances: this.store.query('yarn-component-instance', {appId: params.appid}).then(function(instances) { + if (instances && instances.filterBy('component', componentName)) { + return instances.filterBy('component', componentName); + } + return []; + }, function() { + return []; + }), + metrics: this.store.query('yarn-service-component', {appId: params.appid}).then(function(components) { + if (components && components.findBy('name', componentName)) { + return components.findBy('name', componentName).get('metrics'); + } + return null; + }, function() { + return null; + }) + }); + }, + + unloadAll() { + this.store.unloadAll('yarn-service-component'); + this.store.unloadAll('yarn-component-instance'); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-deploy-service.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-deploy-service.js new file mode 100644 index 0000000..05ef600 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-deploy-service.js @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Route.extend({ + model() { + return this.store.createRecord('yarn-servicedef', { + id: 'yarn_servicedef_' + Date.now() + }); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-services.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-services.js index fb01138..a6535ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-services.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/routes/yarn-services.js @@ -23,7 +23,7 @@ export default AbstractRoute.extend({ model() { return Ember.RSVP.hash({ apps: this.store.query('yarn-app', { - applicationTypes: "org-apache-slider" + applicationTypes: "yarn-service" }), }); }, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js new file mode 100644 index 0000000..82eb273 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-component-instance.js @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; + +export default DS.JSONAPISerializer.extend({ + internalNormalizeSingleResponse(store, primaryModelClass, payload) { + var info = payload.info; + + var fixedPayload = { + id: 'yarn_component_instance_' + payload.id, + type: primaryModelClass.modelName, + attributes: { + containerId: payload.id, + component: info.COMPONENT_NAME, + instanceName: info.COMPONENT_NAME + '_' + payload.instanceId, + state: info.STATE, + createdTimestamp: payload.createdtime, + startedTimestamp: info.LAUNCH_TIME, + host: info.HOSTNAME, + node: info.BARE_HOST, + hostUrl: 'N/A', + ipAddr: info.IP, + exitStatusCode: info.EXIT_STATUS_CODE + } + }; + + return fixedPayload; + }, + + normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) { + var normalizedResponse = {data: []}; + var instanceUid = {}; + + if (payload && Array.isArray(payload)) { + this.sortPayloadByCreatedTimeAscending(payload); + + payload.forEach(function(container) { + let componentName = container.info.COMPONENT_NAME; + if (!instanceUid[componentName]) { + instanceUid[componentName] = 0; + } + container.instanceId = ++instanceUid[componentName]; + var pl = this.internalNormalizeSingleResponse(store, primaryModelClass, container); + normalizedResponse.data.push(pl); + }.bind(this)); + } + + return normalizedResponse; + }, + + sortPayloadByCreatedTimeAscending(payload) { + payload.sort(function(inst1, inst2) { + return inst1.createdtime - inst2.createdtime; + }); + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js new file mode 100644 index 0000000..b0261fc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-component.js @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; +import Ember from 'ember'; + +export default DS.JSONAPISerializer.extend({ + internalNormalizeSingleResponse(store, primaryModelClass, payload) { + var info = payload.info; + var configs = payload.configs; + var metrics = payload.metrics; + var newConfigs = Ember.A(); + var newMetrics = Ember.Object.create(); + + if (configs) { + for (let conf in configs) { + let confObj = Ember.Object.create({ + name: conf, + value: configs[conf] || 'N/A' + }); + newConfigs.push(confObj); + } + } + + if (metrics) { + metrics.forEach(function(metric) { + let val = metric.values[Object.keys(metric.values)[0]]; + newMetrics.set(metric.id, ((val !== undefined)? val : 'N/A')); + }); + } + + var fixedPayload = { + id: 'yarn_service_component_' + payload.id, + type: primaryModelClass.modelName, + attributes: { + name: payload.id, + vcores: info.RESOURCE_CPU, + memory: info.RESOURCE_MEMORY, + priority: 'N/A', + instances: 'N/A', + createdTimestamp: payload.createdtime, + configs: newConfigs, + metrics: newMetrics + } + }; + + return fixedPayload; + }, + + normalizeArrayResponse(store, primaryModelClass, payload/*, id, requestType*/) { + var normalizedResponse = {data: []}; + + if (payload && Array.isArray(payload)) { + payload.forEach(function(component) { + var pl = this.internalNormalizeSingleResponse(store, primaryModelClass, component); + normalizedResponse.data.push(pl); + }.bind(this)); + } + + return normalizedResponse; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-info.js new file mode 100644 index 0000000..d3ee93e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-service-info.js @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import DS from 'ember-data'; +import Ember from 'ember'; + +export default DS.JSONAPISerializer.extend({ + internalNormalizeSingleResponse(store, primaryModelClass, payload) { + var info = payload.info; + var configs = payload.configs; + var quicklinks = info.QUICK_LINKS; + var metrics = payload.metrics; + var newConfigs = Ember.A(); + var newQuicklinks = Ember.A(); + var newMetrics = Ember.Object.create(); + + if (configs) { + for (let conf in configs) { + let confObj = Ember.Object.create({ + name: conf, + value: configs[conf] || 'N/A' + }); + newConfigs.push(confObj); + } + } + + if (quicklinks) { + for (let link in quicklinks) { + let linkObj = Ember.Object.create({ + name: link, + value: quicklinks[link] || 'N/A' + }); + newQuicklinks.push(linkObj); + } + } + + if (metrics) { + metrics.forEach(function(metric) { + let val = metric.values[Object.keys(metric.values)[0]]; + newMetrics.set(metric.id, ((val !== undefined)? val : 'N/A')); + }); + } + + var fixedPayload = { + id: 'yarn_service_info_' + payload.id, + type: primaryModelClass.modelName, + attributes: { + name: info.NAME, + appId: payload.id, + state: info.STATE, + createdTimestamp: payload.createdtime, + launchTimestamp: info.LAUNCH_TIME, + quicklinks: newQuicklinks, + configs: newConfigs, + metrics: newMetrics + } + }; + + return fixedPayload; + }, + + normalizeSingleResponse(store, primaryModelClass, payload/*, id, requestType*/) { + var normalizedResponse = {data: []}; + + if (payload && payload[0]) { + var pl = this.internalNormalizeSingleResponse(store, primaryModelClass, payload[0]); + normalizedResponse.data = pl; + } + + return normalizedResponse; + } +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js index 807844e..9359530 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/services/hosts.js @@ -75,4 +75,8 @@ export default Ember.Service.extend({ rmWebAddress: Ember.computed(function () { return this.normalizeURL(this.get("env.app.hosts.rmWebAddress")); }), + + dashWebAddress: Ember.computed(function () { + return this.normalizeURL(this.get("env.app.hosts.dashWebAddress")); + }) }); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css index f48c186..4d4925b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/styles/app.css @@ -418,3 +418,179 @@ div.attempt-info-panel table > tbody > tr > td:last-of-type { width: 14px; display: inline-block; } + +.deploy-service textarea { + border-radius: 5px !important; + resize: none; + word-wrap: break-word; +} + +.deploy-service .loading-state { + opacity: 0.5; +} + +.deploy-service .loading-state img { + width: 80px; + height: 80px; + margin: 40px auto; + left: 50% !important; + position: absolute; + z-index: 9999; +} + +.align-center { + text-align: center !important; +} + +.bold-text { + font-weight: bold !important; +} + +.deploy-service .saved-list { + min-height: 600px; +} + +.deploy-service .glyphicon { + cursor: pointer; +} + +.deploy-service .remove-icon:hover { + color: #d9534f; +} + +.deploy-service .savedlist-column { + padding-top: 10px; +} + +.deploy-service .definition-column { + padding-top: 10px; + border-left: 1px solid #ddd; +} + +.deploy-service .content-area { + padding: 15px 0px; + border-top: 1px solid #ddd; +} + +.deploy-service .custom-json-area { + padding: 10px 0; + margin-top: -26px; +} + +.deploy-service-modal .modal-dialog { + width: 400px; +} + +.deploy-service-modal .form-group { + margin-bottom: 5px; +} + +.deploy-service .action-btns { + text-align: right; + padding-bottom: 15px; + padding-right: 0; +} + +table.table-custom-action > thead > tr > th:last-of-type, table.table-custom-action > tbody > tr > td:last-of-type { + width: 50px !important; +} + +.deploy-service .toggle-btn.active { + color: #fff; + background-color: #337ab7; + border-color: #337ab7; + text-shadow: none; +} + +.deploy-service .service-resp { + word-wrap: break-word; +} + +table.table-custom-bordered { + border: 1px solid #ddd !important; + border-radius: 4px !important; +} + +table.table-custom-bordered > thead > tr > th, table.table-custom-bordered > tbody > tr > td { + border-bottom: 1px solid #ddd !important; + border-right: 1px solid #ddd !important; +} + +table.table-custom-striped > thead > tr, .table-custom-striped > tbody > tr:nth-of-type(even) { + background-color: #f9f9f9 !important; +} + +table.table-custom-header > thead > tr > th { + background-color: #f5f5f5 !important; +} + +table.table-radius-none { + border-radius: 0 !important; +} + +table.table-border-none { + border: none !important; +} + +.deploy-service label.required:after, .deploy-service-modal label.required:after { + content: '*'; + color: #d9534f; +} + +.deploy-service .form-group.shrink-height { + margin-bottom: -8px; +} + +table.fix-table-overflow { + table-layout: fixed; +} + +table.fix-table-overflow > tbody > tr > td:last-of-type { + overflow: hidden; + text-overflow: ellipsis; +} + +div.tooltip.info-tooltip { + font: 14px sans-serif !important; + background: lightsteelblue; + word-wrap: break-word; + position: absolute; + text-align: justify; + border-radius: 3px; + z-index: 9999; + padding: 10px; + display: none; + min-width: 200px; + max-width: 500px; + opacity: 1; + top: 10px; + left: 0; +} + +div.tooltip.info-tooltip > span.top-arrow { + color: lightsteelblue; + position: absolute; + top: -10px; + left: 10px; +} + +span.info-icon { + color: #337ab7 !important; +} + +div.service-action-mask { + position: absolute; + opacity: 0.5; + z-index: 9999; + width: 100%; + height: 100%; +} + +div.service-action-mask img { + position: absolute; + width: 80px; + height: 80px; + margin: 40px auto; + left: 45% !important; + z-index: 9999; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs index 1ac53bf..c0c4b7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/application.hbs @@ -92,3 +92,5 @@ + +{{info-tooltip}} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/breadcrumb-bar.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/breadcrumb-bar.hbs index 24acbd9..54229cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/breadcrumb-bar.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/breadcrumb-bar.hbs @@ -18,5 +18,7 @@ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/confirm-dialog.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/confirm-dialog.hbs new file mode 100644 index 0000000..b3bc49a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/confirm-dialog.hbs @@ -0,0 +1,37 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs new file mode 100644 index 0000000..a098ec3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/deploy-service.hbs @@ -0,0 +1,157 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+ {{#if serviceResp}} +
+
+
+
+ + {{serviceResp.message}} +
+
+
+
+ {{/if}} +
+ {{#if isLoading}} + Loading... + {{/if}} +
+
+
+ +
+ +
+
+ +
+ +
+ + +
+ +
+ {{#if isStandardViewType}} + +
+
+
+ + + {{input type="text" class="form-control" placeholder="Service Name" value=serviceDef.name}} +
+
+
+
+ +
+
+
+ + + {{input type="text" class="form-control" placeholder="Queue Name" value=serviceDef.queue}} +
+
+
+
+ +
+
+
+ + + {{input type="number" min="0" class="form-control" placeholder="Service Lifetime (Seconds)" value=serviceDef.lifetime}} +
+
+
+
+ +
+ {{service-component-table serviceDef=serviceDef applicationCtrl=applicationCtrl}} +
+ +
+ {{service-config-table serviceDef=serviceDef}} +
+ +
+ {{service-fileconfig-table serviceDef=serviceDef}} +
+ {{/if}} + + {{#if isCustomViewType}} +
+ {{textarea class="form-control" rows="29" cols="120" value=customServiceDef placeholder="Service JSON configuration here..."}} +
+ {{/if}} +
+ +
+ + + +
+
+
+
+
+
+ + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/fileconfig-viewer-dialog.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/fileconfig-viewer-dialog.hbs new file mode 100644 index 0000000..1420340 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/fileconfig-viewer-dialog.hbs @@ -0,0 +1,53 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/info-tooltip.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/info-tooltip.hbs new file mode 100644 index 0000000..faba135 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/info-tooltip.hbs @@ -0,0 +1,20 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + + +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/metrics-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/metrics-table.hbs new file mode 100644 index 0000000..6e4e990 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/metrics-table.hbs @@ -0,0 +1,82 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +{{#if metrics}} +
+
+
+
{{type}} Metrics: Success Information
+
+
+ + + + + + + + + + + + + + + + + + + +
Desired ContainersRunning ContainersCompleted ContainersPending ContainersSurplus Containers
{{metrics.ContainersDesired}}{{metrics.ContainersRunning}}{{metrics.ContainersCompleted}}{{metrics.ContainersPending}}{{metrics.SurplusContainers}}
+
+
+
+
+
+
+
{{type}} Metrics: Failure Information
+
+
+ + + + + + + + + + + + + + + + + +
Failed ContainersContainers Failed Since Last ThresholdPreempted ContainersPending Anti-Affinity Containers
{{metrics.ContainersFailed}}{{metrics.FailedSinceLastThreshold}}{{metrics.ContainersPreempted}}{{metrics.PendingAAContainers}}
+
+
+
+{{else}} +
+
+

No {{type}} metrics available!

+
+
+{{/if}} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs new file mode 100644 index 0000000..8f3904d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs @@ -0,0 +1,113 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+
+ + + +
+ + + + + + + + + + + + + + {{#each serviceDef.serviceComponents as |component|}} + + + + + + + + + + {{else}} + + + + {{/each}} + +
Component NameCPUMemory# ContainersArtifact IdLaunch Command
{{component.name}}{{component.cpus}}{{component.memory}}{{component.numOfContainers}}{{component.artifactId}}{{component.launchCommand}} + +
No data available
+
+
+
+ + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-config-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-config-table.hbs new file mode 100644 index 0000000..46a66ee --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-config-table.hbs @@ -0,0 +1,130 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+
+ + + + +
+ + + + + + + + + + + + {{#each serviceDef.serviceConfigs as |config|}} + + + + + + + + {{else}} + + + + {{/each}} + +
NameValueTypeScope
{{config.name}}{{config.value}}{{config.capitalizedType}}{{config.formattedScope}} + +
No data available
+
+
+
+ + + +{{upload-config + dialogId="service_config_upload_modal" + title="Upload Service Configurations" + configJson=serviceConfigJson + uploadConfig="uploadServiceConfig" +}} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-fileconfig-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-fileconfig-table.hbs new file mode 100644 index 0000000..97442c6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-fileconfig-table.hbs @@ -0,0 +1,152 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+
+ + + + +
+ + + + + + + + + + + + + {{#each serviceDef.fileConfigs as |file|}} + + + + + + + + + {{else}} + + + + {{/each}} + +
Source FilePropertiesDestination FileTypeScope
+ {{#if file.srcFile}} + {{file.srcFile}} + {{else}} + N/A + {{/if}} + + {{#if file.props}} + View Properties + {{else}} + N/A + {{/if}} + {{file.destFile}}{{file.type}}{{file.formattedScope}} + +
No data available
+
+
+
+ + + +{{upload-config + dialogId="service_file_config_upload_modal" + title="Upload File Configurations" + configJson=fileConfigJson + uploadConfig="uploadFileConfig" +}} + +{{fileconfig-viewer-dialog dialogId="file_config_properties_viewer" props=propertyViewer}} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/upload-config.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/upload-config.hbs new file mode 100644 index 0000000..045fb0f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/upload-config.hbs @@ -0,0 +1,44 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs index 2fb5ab3..570011c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app.hbs @@ -42,6 +42,14 @@ {{#link-to 'yarn-app.charts' tagName="li" class=(if (eq target.currentPath 'yarn-app.charts') "active")}} {{#link-to 'yarn-app.charts' appId (query-params service=serviceName)}}Resource Usage{{/link-to}} {{/link-to}} + {{#if serviceName}} + {{#link-to 'yarn-app.components' tagName="li" class=(if (eq target.currentPath 'yarn-app.components') "active")}} + {{#link-to 'yarn-app.components' appId (query-params service=serviceName)}}Components{{/link-to}} + {{/link-to}} + {{#link-to 'yarn-app.configs' tagName="li" class=(if (eq target.currentPath 'yarn-app.configs') "active")}} + {{#link-to 'yarn-app.configs' appId (query-params service=serviceName)}}Configurations & Metrics{{/link-to}} + {{/link-to}} + {{/if}} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/components.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/components.hbs new file mode 100644 index 0000000..39e6257 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/components.hbs @@ -0,0 +1,23 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+
+ {{em-table columns=tableColumns rows=model.components}} +
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs new file mode 100644 index 0000000..ae1e603 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/configs.hbs @@ -0,0 +1,57 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+ {{metrics-table metrics=model.metrics type="Service"}} +
+ +
+ {{#if model.configs}} +
+
+
+
Service Configurations
+
+
+ + + + + + + + + {{#each model.configs as |config|}} + + + + + {{/each}} + +
NameValue
{{config.name}}{{config.value}}
+
+
+
+ {{else}} +
+
+

No service configurations available!

+
+
+ {{/if}} +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs index 3cfec33..534869e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs @@ -22,11 +22,34 @@ +{{#if actionResponse}} +
+
+ +
+
+{{/if}} + +{{#if isLoading}} +
+ Loading... +
+{{/if}} +
Basic Info + {{#if isRunningService}} +
+ + +
+ {{/if}}
- - - - - - - - + + + + + + + + + - - - - - - - - + + + + + + + +
Allocated ResourceRunning ContainersPreempted ResourceNum Non-AM container preemptedNum AM container preemptedAggregated Resource Usage
Allocated ResourceRunning ContainersPreempted ResourceNum Non-AM container preemptedNum AM container preemptedAggregated Resource Usage
{{model.app.allocatedResource}}{{model.app.runningContainersNumber}}{{model.app.preemptedResource}}{{model.app.numAMContainerPreempted}}{{model.app.numAMContainerPreempted}}{{model.app.aggregatedResourceUsage}}
{{model.app.allocatedResource}}{{model.app.runningContainersNumber}}{{model.app.preemptedResource}}{{model.app.numAMContainerPreempted}}{{model.app.numAMContainerPreempted}}{{model.app.aggregatedResourceUsage}}
@@ -148,20 +172,55 @@ - - - - - + + + + + + - - - - - + + + + +
Master Container LogMaster NodeMaster Node Label Expression
Master Container LogMaster NodeMaster Node Label Expression
LinkLink{{model.app.amNodeLabelExpression}}
LinkLink{{model.app.amNodeLabelExpression}}
+ + {{#if model.serviceName}} +
+
+
Quick Links
+ + + {{#each model.quicklinks as |link|}} + + + + + {{else}} + + + + {{/each}} + +
{{link.name}}{{link.value}}
No quicklinks available!
+
+
+ {{/if}}
+ +{{confirm-dialog + dialogId="stopServiceConfirmDialog" + message=(concat 'Are you sure you want to stop service "' model.serviceName '" ?') + action="stopService" +}} + +{{confirm-dialog + dialogId="deleteServiceConfirmDialog" + message=(concat 'Are you sure you want to delete service "' model.serviceName '" ?') + action="deleteService" +}} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance.hbs new file mode 100644 index 0000000..36336ad --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance.hbs @@ -0,0 +1,43 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +{{breadcrumb-bar breadcrumbs=breadcrumbs}} + +
+
+ +
+
+
+ Component +
+
+ +
+
+
+ +
+ {{outlet}} +
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs new file mode 100644 index 0000000..3d5720e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs @@ -0,0 +1,81 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+ {{#if model.container}} +
+
+
Component Information
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Component Name{{check-availability model.container.instanceName}}
Component Group{{check-availability model.container.component}}
Current Container Id{{check-availability model.container.containerId}}
State{{check-availability model.container.state}}
Created Time{{check-availability model.container.createdDate}}
Started Time{{check-availability model.container.startedDate}}
Host{{check-availability model.container.host}}
Host URL{{check-availability model.container.hostUrl}}
Node{{check-availability model.container.node}}
IP Address{{check-availability model.container.ip}}
Exit Status Code{{check-availability model.container.exitStatusCode}}
+
+
+ {{else}} +
+

No component information available!

+
+ {{/if}} +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances.hbs new file mode 100644 index 0000000..e7ac57a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances.hbs @@ -0,0 +1,46 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +{{breadcrumb-bar breadcrumbs=breadcrumbs}} + +
+
+ +
+
+
+ Component +
+
+ +
+
+
+ +
+ {{outlet}} +
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/configs.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/configs.hbs new file mode 100644 index 0000000..85b6b42 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/configs.hbs @@ -0,0 +1,53 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+ {{#if model.configs}} +
+
+
+
Component Configurations
+
+
+ + + + + + + + + {{#each model.configs as |config|}} + + + + + {{/each}} + +
NameValue
{{config.name}}{{config.value}}
+
+
+
+ {{else}} +
+
+

No component configurations available!

+
+
+ {{/if}} +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/info.hbs new file mode 100644 index 0000000..0b642b0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/info.hbs @@ -0,0 +1,28 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+
+

Active Components: {{model.componentName}}

+ {{em-table columns=tableColumns rows=model.instances}} +
+
+ +
+ {{metrics-table metrics=model.metrics type="Component"}} +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/loading.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/loading.hbs new file mode 100644 index 0000000..a95af2b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instances/loading.hbs @@ -0,0 +1,23 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +
+
+ Loading... +
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-deploy-service.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-deploy-service.hbs new file mode 100644 index 0000000..98bc917 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-deploy-service.hbs @@ -0,0 +1,33 @@ +{{! + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +}} + +{{breadcrumb-bar breadcrumbs=breadcrumbs hideRefresh=true}} + +
+
+ {{deploy-service + savedStandardTemplates=savedStandardTemplates + savedJsonTemplates=savedJsonTemplates + serviceDef=model + serviceResp=serviceResponse + isLoading=isLoading + deployServiceDef="deployServiceDef" + deployServiceJson="deployServiceJson" + }} +
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs index 04788be..274217a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-services.hbs @@ -74,6 +74,10 @@ + + {{#if model.apps}} {{em-table columns=serviceColumns rows=model.apps definition=tableDefinition}} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js new file mode 100644 index 0000000..d63b3c5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/info-seeder.js @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export default { + serviceName: "A unique application name", + queueName: "The YARN queue that this application should be submitted to", + lifetime: "Life time (in seconds) of the application from the time it reaches the STARTED state (after which it is automatically destroyed by YARN). For unlimited lifetime do not set a lifetime value.", + components: "One or more components of the application. If the application is HBase say, then the component can be a simple role like master or regionserver. If the application is a complex business webapp then a component can be other applications say Kafka or Storm. Thereby it opens up the support for complex and nested applications.", + configurations: "Set of configuration properties that can be injected into the application components via envs, files and custom pluggable helper docker containers. Files of several standard formats like xml, properties, json, yaml and templates will be supported.", + fileConfigs: "Set of file configurations that needs to be created and made available as a volume in an application component container." +}; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/configs.env b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/configs.env index 04577c9..a795fc5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/configs.env +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/configs.env @@ -40,6 +40,13 @@ ENV = { */ //rmWebAddress: "localhost:8088", + /* + * Dash server web interface can be configured below. + * By default dash web address is set as localhost:9191, uncomment and change + * the following value for pointing to a different address. + */ + //dashWebAddress: "localhost:9191", + /* * Protocol scheme. It can be "http:" or "https:". By default, http is used. */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js index acde40c..d24e960 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/config/default-config.js @@ -21,13 +21,16 @@ module.exports = { // YARN UI App configurations localBaseAddress: "", timelineWebAddress: "localhost:8188", rmWebAddress: "localhost:8088", + dashWebAddress: "localhost:9191", protocolScheme: "http:" }, namespaces: { timeline: 'ws/v1/applicationhistory', + timelineService: 'ws/v2/timeline/apps', cluster: 'ws/v1/cluster', metrics: 'ws/v1/cluster/metrics', - node: '{nodeAddress}/ws/v1/node', - timelineV2: 'ws/v2/timeline' + timelineV2: 'ws/v2/timeline', + dashService: 'services/v1/applications', + node: '{nodeAddress}/ws/v1/node' }, }; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/confirm-dialog-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/confirm-dialog-test.js new file mode 100644 index 0000000..fedf00b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/confirm-dialog-test.js @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForComponent, test } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; + +moduleForComponent('confirm-dialog', 'Integration | Component | confirm dialog', { + integration: true +}); + +test('it renders', function(assert) { + + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL + + + this.render(hbs`{{confirm-dialog}}`); + + assert.equal(this.$().text().trim(), ''); + + // Template block usage:" + EOL + + this.render(hbs` + {{#confirm-dialog}} + template block text + {{/confirm-dialog}} + `); + + assert.equal(this.$().text().trim(), 'template block text'); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/deploy-service-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/deploy-service-test.js new file mode 100644 index 0000000..ba855a7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/deploy-service-test.js @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForComponent, test } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; + +moduleForComponent('deploy-service', 'Integration | Component | deploy service', { + integration: true +}); + +test('it renders', function(assert) { + + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL + + + this.render(hbs`{{deploy-service}}`); + + assert.equal(this.$().text().trim(), ''); + + // Template block usage:" + EOL + + this.render(hbs` + {{#deploy-service}} + template block text + {{/deploy-service}} + `); + + assert.equal(this.$().text().trim(), 'template block text'); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/fileconfig-viewer-dialog-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/fileconfig-viewer-dialog-test.js new file mode 100644 index 0000000..f99e08f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/fileconfig-viewer-dialog-test.js @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForComponent, test } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; + +moduleForComponent('fileconfig-viewer-dialog', 'Integration | Component | fileconfig viewer dialog', { + integration: true +}); + +test('it renders', function(assert) { + + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL + + + this.render(hbs`{{fileconfig-viewer-dialog}}`); + + assert.equal(this.$().text().trim(), ''); + + // Template block usage:" + EOL + + this.render(hbs` + {{#fileconfig-viewer-dialog}} + template block text + {{/fileconfig-viewer-dialog}} + `); + + assert.equal(this.$().text().trim(), 'template block text'); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/info-tooltip-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/info-tooltip-test.js new file mode 100644 index 0000000..7b0c1a1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/info-tooltip-test.js @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForComponent, test } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; + +moduleForComponent('info-tooltip', 'Integration | Component | info tooltip', { + integration: true +}); + +test('it renders', function(assert) { + + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL + + + this.render(hbs`{{info-tooltip}}`); + + assert.equal(this.$().text().trim(), ''); + + // Template block usage:" + EOL + + this.render(hbs` + {{#info-tooltip}} + template block text + {{/info-tooltip}} + `); + + assert.equal(this.$().text().trim(), 'template block text'); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/metrics-table-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/metrics-table-test.js new file mode 100644 index 0000000..f6f9ef0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/metrics-table-test.js @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForComponent, test } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; + +moduleForComponent('metrics-table', 'Integration | Component | metrics table', { + integration: true +}); + +test('it renders', function(assert) { + + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL + + + this.render(hbs`{{metrics-table}}`); + + assert.equal(this.$().text().trim(), ''); + + // Template block usage:" + EOL + + this.render(hbs` + {{#metrics-table}} + template block text + {{/metrics-table}} + `); + + assert.equal(this.$().text().trim(), 'template block text'); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-component-table-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-component-table-test.js new file mode 100644 index 0000000..3ea27a5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-component-table-test.js @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForComponent, test } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; + +moduleForComponent('service-component-table', 'Integration | Component | service component table', { + integration: true +}); + +test('it renders', function(assert) { + + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL + + + this.render(hbs`{{service-component-table}}`); + + assert.equal(this.$().text().trim(), ''); + + // Template block usage:" + EOL + + this.render(hbs` + {{#service-component-table}} + template block text + {{/service-component-table}} + `); + + assert.equal(this.$().text().trim(), 'template block text'); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-config-table-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-config-table-test.js new file mode 100644 index 0000000..39f269a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-config-table-test.js @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForComponent, test } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; + +moduleForComponent('service-config-table', 'Integration | Component | service config table', { + integration: true +}); + +test('it renders', function(assert) { + + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL + + + this.render(hbs`{{service-config-table}}`); + + assert.equal(this.$().text().trim(), ''); + + // Template block usage:" + EOL + + this.render(hbs` + {{#service-config-table}} + template block text + {{/service-config-table}} + `); + + assert.equal(this.$().text().trim(), 'template block text'); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-fileconfig-table-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-fileconfig-table-test.js new file mode 100644 index 0000000..a486fa0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/service-fileconfig-table-test.js @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForComponent, test } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; + +moduleForComponent('service-fileconfig-table', 'Integration | Component | service fileconfig table', { + integration: true +}); + +test('it renders', function(assert) { + + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL + + + this.render(hbs`{{service-fileconfig-table}}`); + + assert.equal(this.$().text().trim(), ''); + + // Template block usage:" + EOL + + this.render(hbs` + {{#service-fileconfig-table}} + template block text + {{/service-fileconfig-table}} + `); + + assert.equal(this.$().text().trim(), 'template block text'); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/upload-config-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/upload-config-test.js new file mode 100644 index 0000000..64fdf4a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/integration/components/upload-config-test.js @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForComponent, test } from 'ember-qunit'; +import hbs from 'htmlbars-inline-precompile'; + +moduleForComponent('upload-config', 'Integration | Component | upload config', { + integration: true +}); + +test('it renders', function(assert) { + + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... });" + EOL + EOL + + + this.render(hbs`{{upload-config}}`); + + assert.equal(this.$().text().trim(), ''); + + // Template block usage:" + EOL + + this.render(hbs` + {{#upload-config}} + template block text + {{/upload-config}} + `); + + assert.equal(this.$().text().trim(), 'template block text'); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-component-instance-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-component-instance-test.js new file mode 100644 index 0000000..f1eaba6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-component-instance-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('adapter:yarn-component-instance', 'Unit | Adapter | yarn component instance', { + // Specify the other units that are required for this test. + // needs: ['serializer:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let adapter = this.subject(); + assert.ok(adapter); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-component-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-component-test.js new file mode 100644 index 0000000..15b862b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-component-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('adapter:yarn-service-component', 'Unit | Adapter | yarn service component', { + // Specify the other units that are required for this test. + // needs: ['serializer:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let adapter = this.subject(); + assert.ok(adapter); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-info-test.js new file mode 100644 index 0000000..4ab8680 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-service-info-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('adapter:yarn-service-info', 'Unit | Adapter | yarn service info', { + // Specify the other units that are required for this test. + // needs: ['serializer:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let adapter = this.subject(); + assert.ok(adapter); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-servicedef-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-servicedef-test.js new file mode 100644 index 0000000..ea12bc5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/adapters/yarn-servicedef-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('adapter:yarn-servicedef', 'Unit | Adapter | yarn servicedef', { + // Specify the other units that are required for this test. + // needs: ['serializer:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let adapter = this.subject(); + assert.ok(adapter); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/components-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/components-test.js new file mode 100644 index 0000000..4ef38ff --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/components-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:yarn-app/components', 'Unit | Controller | yarn app/components', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let controller = this.subject(); + assert.ok(controller); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/configs-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/configs-test.js new file mode 100644 index 0000000..0b26cde --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-app/configs-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:yarn-app/configs', 'Unit | Controller | yarn app/configs', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let controller = this.subject(); + assert.ok(controller); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance-test.js new file mode 100644 index 0000000..c8f29b9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:yarn-component-instance', 'Unit | Controller | yarn component instance', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let controller = this.subject(); + assert.ok(controller); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance/info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance/info-test.js new file mode 100644 index 0000000..2abbe9f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instance/info-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:yarn-component-instance/info', 'Unit | Controller | yarn component instance/info', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let controller = this.subject(); + assert.ok(controller); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances-test.js new file mode 100644 index 0000000..8622c71 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:yarn-component-instances', 'Unit | Controller | yarn component instances', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let controller = this.subject(); + assert.ok(controller); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/configs-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/configs-test.js new file mode 100644 index 0000000..63a6836 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/configs-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:yarn-component-instances/configs', 'Unit | Controller | yarn component instances/configs', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let controller = this.subject(); + assert.ok(controller); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/info-test.js new file mode 100644 index 0000000..328679a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-component-instances/info-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:yarn-component-instances/info', 'Unit | Controller | yarn component instances/info', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let controller = this.subject(); + assert.ok(controller); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-deploy-service-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-deploy-service-test.js new file mode 100644 index 0000000..c3918f4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/controllers/yarn-deploy-service-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('controller:yarn-deploy-service', 'Unit | Controller | yarn deploy service', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +// Replace this with your real tests. +test('it exists', function(assert) { + let controller = this.subject(); + assert.ok(controller); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/helpers/check-availability-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/helpers/check-availability-test.js new file mode 100644 index 0000000..2a9db72 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/helpers/check-availability-test.js @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { checkAvailability } from '../../../helpers/check-availability'; +import { module, test } from 'qunit'; + +module('Unit | Helper | check availability'); + +// Replace this with your real tests. +test('it works', function(assert) { + let result = checkAvailability(42); + assert.ok(result); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-component-instance-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-component-instance-test.js new file mode 100644 index 0000000..0c79c39 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-component-instance-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForModel, test } from 'ember-qunit'; + +moduleForModel('yarn-component-instance', 'Unit | Model | yarn component instance', { + // Specify the other units that are required for this test. + needs: [] +}); + +test('it exists', function(assert) { + let model = this.subject(); + // let store = this.store(); + assert.ok(!!model); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-component-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-component-test.js new file mode 100644 index 0000000..d7476a2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-component-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForModel, test } from 'ember-qunit'; + +moduleForModel('yarn-service-component', 'Unit | Model | yarn service component', { + // Specify the other units that are required for this test. + needs: [] +}); + +test('it exists', function(assert) { + let model = this.subject(); + // let store = this.store(); + assert.ok(!!model); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-info-test.js new file mode 100644 index 0000000..114be00 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-service-info-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForModel, test } from 'ember-qunit'; + +moduleForModel('yarn-service-info', 'Unit | Model | yarn service info', { + // Specify the other units that are required for this test. + needs: [] +}); + +test('it exists', function(assert) { + let model = this.subject(); + // let store = this.store(); + assert.ok(!!model); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-servicedef-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-servicedef-test.js new file mode 100644 index 0000000..141a94b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/models/yarn-servicedef-test.js @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForModel, test } from 'ember-qunit'; + +moduleForModel('yarn-servicedef', 'Unit | Model | yarn servicedef', { + // Specify the other units that are required for this test. + needs: [] +}); + +test('it exists', function(assert) { + let model = this.subject(); + // let store = this.store(); + assert.ok(!!model); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/components-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/components-test.js new file mode 100644 index 0000000..1dd8909 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/components-test.js @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:yarn-app/components', 'Unit | Route | yarn app/components', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + let route = this.subject(); + assert.ok(route); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/configs-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/configs-test.js new file mode 100644 index 0000000..7b90712 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-app/configs-test.js @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:yarn-app/configs', 'Unit | Route | yarn app/configs', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + let route = this.subject(); + assert.ok(route); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance-test.js new file mode 100644 index 0000000..b9ab2f0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance-test.js @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:yarn-component-instance', 'Unit | Route | yarn component instance', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + let route = this.subject(); + assert.ok(route); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance/info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance/info-test.js new file mode 100644 index 0000000..a2784ce --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instance/info-test.js @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:yarn-component-instance/info', 'Unit | Route | yarn component instance/info', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + let route = this.subject(); + assert.ok(route); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances-test.js new file mode 100644 index 0000000..6aee99a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances-test.js @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:yarn-component-instances', 'Unit | Route | yarn component instances', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + let route = this.subject(); + assert.ok(route); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/configs-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/configs-test.js new file mode 100644 index 0000000..281aabb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/configs-test.js @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:yarn-component-instances/configs', 'Unit | Route | yarn component instances/configs', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + let route = this.subject(); + assert.ok(route); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/info-test.js new file mode 100644 index 0000000..23d9bb8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-component-instances/info-test.js @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:yarn-component-instances/info', 'Unit | Route | yarn component instances/info', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + let route = this.subject(); + assert.ok(route); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-deploy-service-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-deploy-service-test.js new file mode 100644 index 0000000..4e2dcf1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/routes/yarn-deploy-service-test.js @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:yarn-deploy-service', 'Unit | Route | yarn deploy service', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + let route = this.subject(); + assert.ok(route); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-component-instance-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-component-instance-test.js new file mode 100644 index 0000000..6add066 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-component-instance-test.js @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForModel, test } from 'ember-qunit'; + +moduleForModel('yarn-component-instance', 'Unit | Serializer | yarn component instance', { + // Specify the other units that are required for this test. + needs: ['serializer:yarn-component-instance'] +}); + +// Replace this with your real tests. +test('it serializes records', function(assert) { + let record = this.subject(); + + let serializedRecord = record.serialize(); + + assert.ok(serializedRecord); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-component-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-component-test.js new file mode 100644 index 0000000..c9df24b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-component-test.js @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForModel, test } from 'ember-qunit'; + +moduleForModel('yarn-service-component', 'Unit | Serializer | yarn service component', { + // Specify the other units that are required for this test. + needs: ['serializer:yarn-service-component'] +}); + +// Replace this with your real tests. +test('it serializes records', function(assert) { + let record = this.subject(); + + let serializedRecord = record.serialize(); + + assert.ok(serializedRecord); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-info-test.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-info-test.js new file mode 100644 index 0000000..9b1d310 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tests/unit/serializers/yarn-service-info-test.js @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { moduleForModel, test } from 'ember-qunit'; + +moduleForModel('yarn-service-info', 'Unit | Serializer | yarn service info', { + // Specify the other units that are required for this test. + needs: ['serializer:yarn-service-info'] +}); + +// Replace this with your real tests. +test('it serializes records', function(assert) { + let record = this.subject(); + + let serializedRecord = record.serialize(); + + assert.ok(serializedRecord); +}); diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index 288f4bc..e4e611b 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -75,7 +75,7 @@ org.apache.maven.plugins maven-javadoc-plugin - org.apache.hadoop.yarn.proto:org.apache.hadoop.yarn.federation.proto + org.apache.hadoop.yarn.proto:org.apache.hadoop.yarn.federation.proto:org.apache.hadoop.yarn.service diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml index 3cbbaa7..1b3c5f0 100644 --- a/hadoop-yarn-project/pom.xml +++ b/hadoop-yarn-project/pom.xml @@ -82,6 +82,10 @@ org.apache.hadoop hadoop-yarn-server-router + + org.apache.hadoop + hadoop-yarn-services-core +