diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml index ee77ecb..f02f7bb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml @@ -322,16 +322,38 @@ easymock 3.1 test + + + org.objenesis + objenesis + + org.powermock powermock-api-easymock - 1.5 + 1.6.5 test + org.powermock + powermock-module-junit4 + 1.6.5 + + + org.javassist + javassist + + + org.objenesis + objenesis + + + + + javax.servlet.jsp jsp-api runtime @@ -354,6 +376,13 @@ swagger-annotations 1.5.4 + + + org.apache.hadoop + hadoop-minicluster + test + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java index ef96c9b..0c9b52c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java @@ -337,5 +337,11 @@ public static String mkSliderUserPath(String username) { return SVC_SLIDER_USERS + "/" + username; } - + /** + * Blocking enum of users + * @return an unordered list of clusters under a user + */ + public List getClusters() throws KeeperException, InterruptedException { + return zookeeper.getChildren(userPath, null); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java index 9a89c39..c5394fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java @@ -17,6 +17,7 @@ package org.apache.slider.server.appmaster.security; import com.google.common.base.Preconditions; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import static org.apache.slider.core.main.LauncherExitCodes.EXIT_UNAUTHORIZED; @@ -126,13 +127,14 @@ public String getPrincipal () throws IOException { } public boolean isKeytabProvided() { - boolean keytabProvided = instanceDefinition.getAppConfOperations() - .getComponent(SliderKeys.COMPONENT_AM) - .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH) != null || - instanceDefinition.getAppConfOperations() - .getComponent(SliderKeys.COMPONENT_AM). - get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME) != null; - return keytabProvided; + String keytabLocalPath = instanceDefinition.getAppConfOperations() + .getComponent(SliderKeys.COMPONENT_AM) + .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH); + String keytabName = instanceDefinition.getAppConfOperations() + .getComponent(SliderKeys.COMPONENT_AM) + .get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME); + return StringUtils.isNotBlank(keytabLocalPath) + || StringUtils.isNotBlank(keytabName); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java index 6f54959..b00a610 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java @@ -448,6 +448,11 @@ public RoleHistory getRoleHistory() { return roleHistory; } + @VisibleForTesting + public void setRoleHistory(RoleHistory roleHistory) { + this.roleHistory = roleHistory; + } + /** * Get the path used for history files * @return the directory used for history files @@ -1979,7 +1984,7 @@ private void checkFailureThreshold(RoleStatus role) role.getName(), failures, threshold); } - if (failures > threshold) { + if (threshold > 0 && failures > threshold) { throw new TriggerClusterTeardownException( SliderExitCodes.EXIT_DEPLOYMENT_FAILED, FinalApplicationStatus.FAILED, ErrorStrings.E_UNSTABLE_CLUSTER + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/api/TestRPCBinding.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/api/TestRPCBinding.java new file mode 100644 index 0000000..635ba68 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/api/TestRPCBinding.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.api; + +import org.apache.hadoop.conf.Configuration; +import org.apache.slider.server.appmaster.rpc.RpcBinder; +import org.apache.slider.server.appmaster.rpc.SliderClusterProtocolPB; +import org.junit.Test; + +import java.net.InetSocketAddress; + +import static org.junit.Assert.assertTrue; + +/** + * Tests RPC work + */ +public class TestRPCBinding { + + @Test + public void testRegistration() throws Throwable { + Configuration conf = new Configuration(); + RpcBinder.registerSliderAPI(conf); + assertTrue(RpcBinder.verifyBondedToProtobuf(conf, SliderClusterProtocolPB.class)); + } + + @Test + public void testGetProxy() throws Throwable { + Configuration conf = new Configuration(); + InetSocketAddress saddr= new InetSocketAddress("127.0.0.1",9000); + SliderClusterProtocol proxy = + RpcBinder.connectToServer(saddr, null, conf, 1000); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBadArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBadArgs.java new file mode 100644 index 0000000..df95ac2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBadArgs.java @@ -0,0 +1,263 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.slider.common.params.Arguments; +import org.apache.slider.common.params.SliderActions; +import org.apache.slider.core.exceptions.BadCommandArgumentsException; +import org.apache.slider.core.exceptions.ErrorStrings; +import org.apache.slider.core.exceptions.UsageException; +import org.apache.slider.core.main.ServiceLauncherBaseTest; +import org.junit.Test; + +import java.util.Arrays; + +/** + * Test the argument parsing/validation logic + */ +public class TestClientBadArgs extends ServiceLauncherBaseTest { + + @Test + public void testNoAction() throws Throwable { + launchExpectingException(SliderClient.class, + createTestConfig(), + "Usage: slider COMMAND", + EMPTY_LIST); + + } + + @Test + public void testUnknownAction() throws Throwable { + launchExpectingException(SliderClient.class, + createTestConfig(), + "not-a-known-action", + Arrays.asList("not-a-known-action")); + } + + @Test + public void testActionWithoutOptions() throws Throwable { + launchExpectingException(SliderClient.class, + createTestConfig(), + "Usage: slider build ", + Arrays.asList(SliderActions.ACTION_BUILD)); + } + + @Test + public void testActionWithoutEnoughArgs() throws Throwable { + launchExpectingException(SliderClient.class, + createTestConfig(), + ErrorStrings.ERROR_NOT_ENOUGH_ARGUMENTS, + Arrays.asList(SliderActions.ACTION_THAW)); + } + + @Test + public void testActionWithTooManyArgs() throws Throwable { + launchExpectingException(SliderClient.class, + createTestConfig(), + ErrorStrings.ERROR_TOO_MANY_ARGUMENTS, + Arrays.asList(SliderActions.ACTION_HELP, + "hello, world")); + } + + @Test + public void testBadImageArg() throws Throwable { + launchExpectingException(SliderClient.class, + createTestConfig(), + "Unknown option: --image", + Arrays.asList(SliderActions.ACTION_HELP, + Arguments.ARG_IMAGE)); + } + + @Test + public void testRegistryUsage() throws Throwable { + Throwable exception = launchExpectingException(SliderClient.class, + createTestConfig(), + "org.apache.slider.core.exceptions.UsageException: Argument --name missing", + Arrays.asList(SliderActions.ACTION_REGISTRY)); + assertTrue(exception instanceof UsageException); + log.info(exception.toString()); + } + + @Test + public void testRegistryExportBadUsage1() throws Throwable { + Throwable exception = launchExpectingException(SliderClient.class, + createTestConfig(), + "Expected a value after parameter --getexp", + Arrays.asList(SliderActions.ACTION_REGISTRY, + Arguments.ARG_NAME, + "cl1", + Arguments.ARG_GETEXP)); + assertTrue(exception instanceof BadCommandArgumentsException); + log.info(exception.toString()); + } + + @Test + public void testRegistryExportBadUsage2() throws Throwable { + Throwable exception = launchExpectingException(SliderClient.class, + createTestConfig(), + "Expected a value after parameter --getexp", + Arrays.asList(SliderActions.ACTION_REGISTRY, + Arguments.ARG_NAME, + "cl1", + Arguments.ARG_LISTEXP, + Arguments.ARG_GETEXP)); + assertTrue(exception instanceof BadCommandArgumentsException); + log.info(exception.toString()); + } + + @Test + public void testRegistryExportBadUsage3() throws Throwable { + Throwable exception = launchExpectingException(SliderClient.class, + createTestConfig(), + "Usage: registry", + Arrays.asList(SliderActions.ACTION_REGISTRY, + Arguments.ARG_NAME, + "cl1", + Arguments.ARG_LISTEXP, + Arguments.ARG_GETEXP, + "export1")); + assertTrue(exception instanceof UsageException); + log.info(exception.toString()); + } + + @Test + public void testUpgradeUsage() throws Throwable { + Throwable exception = launchExpectingException(SliderClient.class, + createTestConfig(), + "org.apache.slider.core.exceptions.BadCommandArgumentsException: Not enough arguments for action: upgrade Expected minimum 1 but got 0", + Arrays.asList(SliderActions.ACTION_UPGRADE)); + assertTrue(exception instanceof BadCommandArgumentsException); + log.info(exception.toString()); + } + + @Test + public void testUpgradeWithTemplateOptionOnly() throws Throwable { + String appName = "test_hbase"; + Throwable exception = launchExpectingException(SliderClient.class, + createTestConfig(), + "BadCommandArgumentsException: Option --resources must be specified with option --template", + Arrays.asList(SliderActions.ACTION_UPGRADE, + appName, + Arguments.ARG_TEMPLATE, + "/tmp/appConfig.json" + )); + assertTrue(exception instanceof BadCommandArgumentsException); + log.info(exception.toString()); + } + + public Configuration createTestConfig() { + Configuration configuration = new Configuration(); + configuration.set(YarnConfiguration.RM_ADDRESS, "127.0.0.1:8032"); + return configuration; + } + + @Test + public void testUpgradeWithResourcesOptionOnly() throws Throwable { + String appName = "test_hbase"; + Throwable exception = launchExpectingException(SliderClient.class, + createTestConfig(), + "BadCommandArgumentsException: Option --template must be specified with option --resources", + Arrays.asList(SliderActions.ACTION_UPGRADE, + appName, + Arguments.ARG_RESOURCES, + "/tmp/resources.json" + )); + assertTrue(exception instanceof BadCommandArgumentsException); + log.info(exception.toString()); + } + + @Test + public void testUpgradeWithTemplateResourcesAndContainersOption() throws Throwable { + String appName = "test_hbase"; + Throwable exception = launchExpectingException(SliderClient.class, + createTestConfig(), + "BadCommandArgumentsException: Option --containers cannot be " + + "specified with --template or --resources", + Arrays.asList(SliderActions.ACTION_UPGRADE, + appName, + Arguments.ARG_TEMPLATE, + "/tmp/appConfig.json", + Arguments.ARG_RESOURCES, + "/tmp/resources.json", + Arguments.ARG_CONTAINERS, + "container_1" + )); + assertTrue(exception instanceof BadCommandArgumentsException); + log.info(exception.toString()); + } + + @Test + public void testUpgradeWithTemplateResourcesAndComponentsOption() throws Throwable { + String appName = "test_hbase"; + Throwable exception = launchExpectingException(SliderClient.class, + createTestConfig(), + "BadCommandArgumentsException: Option --components cannot be " + + "specified with --template or --resources", + Arrays.asList(SliderActions.ACTION_UPGRADE, + appName, + Arguments.ARG_TEMPLATE, + "/tmp/appConfig.json", + Arguments.ARG_RESOURCES, + "/tmp/resources.json", + Arguments.ARG_COMPONENTS, + "HBASE_MASTER" + )); + assertTrue(exception instanceof BadCommandArgumentsException); + log.info(exception.toString()); + } + + @Test + public void testCreateAppWithAddonPkgBadArg1() throws Throwable { + //add on package without specifying add on package name + Throwable exception = launchExpectingException(SliderClient.class, + createTestConfig(), + "Expected 2 values after --addon", + Arrays.asList(SliderActions.ACTION_CREATE, + "cl1", + Arguments.ARG_ADDON, + "addon1")); + assertTrue(exception instanceof BadCommandArgumentsException); + log.info(exception.toString()); + } + + @Test + public void testNodesMissingFile() throws Throwable { + Throwable exception = launchExpectingException(SliderClient.class, + createTestConfig(), + "after parameter --out", + Arrays.asList(SliderActions.ACTION_NODES, Arguments.ARG_OUTPUT)); + assertTrue(exception instanceof BadCommandArgumentsException); + } + + @Test + public void testFlexWithNoComponents() throws Throwable { + Throwable exception = launchExpectingException(SliderClient.class, + new Configuration(), + "Usage: slider flex ", + Arrays.asList( + SliderActions.ACTION_FLEX, + "flex1", + Arguments.ARG_DEFINE, YarnConfiguration.RM_ADDRESS + "=127.0.0.1:8032" + )); + assertTrue(exception instanceof UsageException); + log.info(exception.toString()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBasicArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBasicArgs.java new file mode 100644 index 0000000..400e8d9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBasicArgs.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.client; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.slider.common.params.Arguments; +import org.apache.slider.common.params.ClientArgs; +import org.apache.slider.common.tools.SliderUtils; +import org.apache.slider.core.main.ServiceLauncher; +import org.apache.slider.core.main.ServiceLauncherBaseTest; +import org.junit.Test; + +import java.net.UnknownHostException; +import java.util.Arrays; + +/** + * Test bad argument handling + */ +public class TestClientBasicArgs extends ServiceLauncherBaseTest { + + /** + * help should print out help string and then succeed + * @throws Throwable + */ + @Test + public void testHelp() throws Throwable { + ServiceLauncher launcher = launch(SliderClient.class, + SliderUtils.createConfiguration(), + Arrays.asList(ClientArgs.ACTION_HELP)); + assertEquals(0, launcher.getServiceExitCode()); + } + + @Test + public void testNoArgs() throws Throwable { + launchExpectingException(SliderClient.class, + SliderUtils.createConfiguration(), + "Usage: slider COMMAND", + EMPTY_LIST); + } + + @Test + public void testListUnknownRM() throws Throwable { + try { + YarnConfiguration conf = SliderUtils.createConfiguration(); + conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, + 1000); + conf.setLong(YarnConfiguration + .RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, 1000); + ServiceLauncher launcher = launch(SliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_LIST, + "cluster", + Arguments.ARG_MANAGER, + "badhost:8888")); + fail("expected an exception, got a launcher with exit code " + + launcher.getServiceExitCode()); + } catch (UnknownHostException expected) { + //expected + } + + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestCommonArgParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestCommonArgParsing.java new file mode 100644 index 0000000..841b010 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestCommonArgParsing.java @@ -0,0 +1,526 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.api.RoleKeys; +import org.apache.slider.common.SliderXmlConfKeys; +import org.apache.slider.common.params.AbstractClusterBuildingActionArgs; +import org.apache.slider.common.params.ActionBuildArgs; +import org.apache.slider.common.params.ActionCreateArgs; +import org.apache.slider.common.params.ActionDestroyArgs; +import org.apache.slider.common.params.ActionExistsArgs; +import org.apache.slider.common.params.ActionFlexArgs; +import org.apache.slider.common.params.ActionFreezeArgs; +import org.apache.slider.common.params.ActionListArgs; +import org.apache.slider.common.params.ActionStatusArgs; +import org.apache.slider.common.params.ActionThawArgs; +import org.apache.slider.common.params.ActionUpdateArgs; +import org.apache.slider.common.params.ArgOps; +import org.apache.slider.common.params.Arguments; +import org.apache.slider.common.params.ClientArgs; +import org.apache.slider.common.params.SliderActions; +import org.apache.slider.common.tools.SliderUtils; +import org.apache.slider.core.exceptions.BadCommandArgumentsException; +import org.apache.slider.core.exceptions.ErrorStrings; +import org.apache.slider.core.exceptions.SliderException; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +/** + * Test handling of common arguments, specifically how things get split up + */ +public class TestCommonArgParsing implements SliderActions, Arguments { + + + public static final String CLUSTERNAME = "clustername"; + + @Test + public void testCreateActionArgs() throws Throwable { + ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_CREATE, + "cluster1")); + assertEquals("cluster1", clientArgs.getClusterName()); + } + + @Test + public void testCreateFailsNoClustername() throws Throwable { + assertParseFails(Arrays.asList(ACTION_CREATE)); + } + + @Test + public void testCreateFailsTwoClusternames() throws Throwable { + assertParseFails(Arrays.asList( + ACTION_CREATE, + "c1", + "c2" + )); + } + + @Test + public void testHelp() throws Throwable { + ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_HELP)); + assertNull(clientArgs.getClusterName()); + } + + @Test + public void testSliderBasePath() throws Throwable { + ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST, + ARG_BASE_PATH, "/projects/slider/clusters")); + assertEquals(new Path("/projects/slider/clusters"), + clientArgs.getBasePath()); + } + + @Test + public void testNoSliderBasePath() throws Throwable { + ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST)); + assertNull(clientArgs.getBasePath()); + } + + @Test + public void testListNoClusternames() throws Throwable { + ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST)); + assertNull(clientArgs.getClusterName()); + } + + @Test + public void testListNoClusternamesDefinition() throws Throwable { + ClientArgs clientArgs = createClientArgs(Arrays.asList( + ACTION_LIST, + ARG_DEFINE, + "fs.default.FS=file://localhost" + )); + assertNull(clientArgs.getClusterName()); + } + + @Test + public void testList1Clustername() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList(ACTION_LIST, "cluster1")); + assertEquals("cluster1", ca.getClusterName()); + assertTrue(ca.getCoreAction() instanceof ActionListArgs); + } + + @Test + public void testListFailsTwoClusternames() throws Throwable { + assertParseFails(Arrays.asList( + ACTION_LIST, + "c1", + "c2" + )); + } + + @Test + public void testDefinitions() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_CREATE, + CLUSTERNAME, + "-D","yarn.resourcemanager.principal=yarn/server@LOCAL", + "-D","dfs.datanode.kerberos.principal=hdfs/server@LOCAL" + )); + Configuration conf = new Configuration(false); + ca.applyDefinitions(conf); + assertEquals(CLUSTERNAME, ca.getClusterName()); + assertNull(conf.get(SliderXmlConfKeys.KEY_SLIDER_BASE_PATH)); + SliderUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL); + SliderUtils.verifyPrincipalSet( + conf, + SliderXmlConfKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY); + + } + + @Test + public void testDefinitionsSettingBaseSliderDir() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_CREATE, + CLUSTERNAME, + "--basepath", "/projects/slider/clusters", + "-D","yarn.resourcemanager.principal=yarn/server@LOCAL", + "-D","dfs.datanode.kerberos.principal=hdfs/server@LOCAL" + )); + Configuration conf = new Configuration(false); + ca.applyDefinitions(conf); + assertEquals(CLUSTERNAME, ca.getClusterName()); + assertEquals("/projects/slider/clusters", conf.get(SliderXmlConfKeys + .KEY_SLIDER_BASE_PATH)); + SliderUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL); + SliderUtils.verifyPrincipalSet(conf, SliderXmlConfKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY); + + } + + /** + * Test a start command + * @throws Throwable + */ + @Test + public void testComplexThaw() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_THAW, + "--manager", "rhel:8032", "--filesystem", "hdfs://rhel:9090", + "-S","java.security.krb5.realm=LOCAL","-S", "java.security.krb5.kdc=rhel", + "-D","yarn.resourcemanager.principal=yarn/rhel@LOCAL", + "-D","namenode.resourcemanager.principal=hdfs/rhel@LOCAL", + "cl1" + )); + assertEquals("cl1", ca.getClusterName()); + assertTrue(ca.getCoreAction() instanceof ActionThawArgs); + } + + /** + * Test a force kill command where the app comes at the end of the line + * @throws Throwable + * + */ + @Test + public void testStatusSplit() throws Throwable { + + String appId = "application_1381252124398_0013"; + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_STATUS, + "--manager", "rhel:8032", + "--filesystem", "hdfs://rhel:9090", + "-S","java.security.krb5.realm=LOCAL", + "-S", "java.security.krb5.kdc=rhel", + "-D","yarn.resourcemanager.principal=yarn/rhel@LOCAL", + "-D","namenode.resourcemanager.principal=hdfs/rhel@LOCAL", + appId + )); + assertEquals(appId, ca.getClusterName()); + } + + @Test + public void testFreezeFailsNoArg() throws Throwable { + assertParseFails(Arrays.asList( + ACTION_FREEZE + )); + } + + @Test + public void testFreezeWorks1Arg() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_FREEZE, + CLUSTERNAME + )); + assertEquals(CLUSTERNAME, ca.getClusterName()); + assertTrue(ca.getCoreAction() instanceof ActionFreezeArgs); + } + + @Test + public void testFreezeFails2Arg() throws Throwable { + assertParseFails(Arrays.asList( + ACTION_FREEZE, "cluster", "cluster2" + )); + } + + @Test + public void testFreezeForceWaitAndMessage() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_FREEZE, CLUSTERNAME, + ARG_FORCE, + ARG_WAIT, "0", + ARG_MESSAGE, "explanation" + )); + assertEquals(CLUSTERNAME, ca.getClusterName()); + assertTrue(ca.getCoreAction() instanceof ActionFreezeArgs); + ActionFreezeArgs freezeArgs = (ActionFreezeArgs) ca.getCoreAction(); + assertEquals("explanation", freezeArgs.message); + assertTrue(freezeArgs.force); + } + + @Test + public void testGetStatusWorks1Arg() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_STATUS, + CLUSTERNAME + )); + assertEquals(CLUSTERNAME, ca.getClusterName()); + assertTrue(ca.getCoreAction() instanceof ActionStatusArgs); + } + + @Test + public void testExistsWorks1Arg() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_EXISTS, + CLUSTERNAME, + ARG_LIVE + )); + assertEquals(CLUSTERNAME, ca.getClusterName()); + assertTrue(ca.getCoreAction() instanceof ActionExistsArgs); + assertTrue(ca.getActionExistsArgs().live); + } + + @Test + public void testDestroy1Arg() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_DESTROY, + CLUSTERNAME + )); + assertEquals(CLUSTERNAME, ca.getClusterName()); + assertTrue(ca.getCoreAction() instanceof ActionDestroyArgs); + } + + /** + * Assert that a pass fails with a BadCommandArgumentsException + * @param argsList + */ + + private void assertParseFails(List argsList) throws SliderException { + try { + ClientArgs clientArgs = createClientArgs(argsList); + Assert.fail("exected an exception, got " + clientArgs); + } catch (BadCommandArgumentsException ignored) { + //expected + } + } + + /** + * build and parse client args, after adding the base args list + * @param argsList + */ + public ClientArgs createClientArgs(List argsList) + throws SliderException { + ClientArgs serviceArgs = new ClientArgs(argsList); + serviceArgs.parse(); + return serviceArgs; + } + + public ActionCreateArgs createAction(List argsList) + throws SliderException { + ClientArgs ca = createClientArgs(argsList); + assertEquals(ACTION_CREATE, ca.getAction()); + ActionCreateArgs args = ca.getActionCreateArgs(); + assertNotNull(args); + return args; + } + + @Test + public void testCreateWaitTime() throws Throwable { + ActionCreateArgs createArgs = createAction(Arrays.asList( + ACTION_CREATE, "cluster1", + ARG_WAIT, "600" + )); + assertEquals(600, createArgs.getWaittime()); + } + + + @Test + public void testSingleRoleArg() throws Throwable { + ActionCreateArgs createArgs = createAction(Arrays.asList( + ACTION_CREATE, "cluster1", + ARG_COMPONENT,"master","5" + )); + List tuples = createArgs.getComponentTuples(); + assertEquals(2, tuples.size()); + Map roleMap = ArgOps.convertTupleListToMap("roles", tuples); + assertEquals("5", roleMap.get("master")); + } + + @Test + public void testNoRoleArg() throws Throwable { + ActionCreateArgs createArgs = createAction(Arrays.asList( + ACTION_CREATE, "cluster1" + )); + List tuples = createArgs.getComponentTuples(); + Map roleMap = ArgOps.convertTupleListToMap("roles", tuples); + assertNull(roleMap.get("master")); + } + + + @Test + public void testMultiRoleArgBuild() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_BUILD, "cluster1", + ARG_COMPONENT, "master", "1", + ARG_COMPONENT, "worker", "2" + )); + assertEquals(ACTION_BUILD, ca.getAction()); + assertTrue(ca.getCoreAction() instanceof ActionBuildArgs); + assertTrue(ca.getBuildingActionArgs() instanceof ActionBuildArgs); + AbstractClusterBuildingActionArgs args = ca.getActionBuildArgs(); + List tuples = args.getComponentTuples(); + assertEquals(4, tuples.size()); + Map roleMap = ArgOps.convertTupleListToMap("roles", tuples); + assertEquals("1", roleMap.get("master")); + assertEquals("2", roleMap.get("worker")); + } + + @Test + public void testArgUpdate() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_UPDATE, "cluster1", + ARG_TEMPLATE, "appConfig.json" + )); + assertEquals(ACTION_UPDATE, ca.getAction()); + assertTrue(ca.getCoreAction() instanceof ActionUpdateArgs); + assertTrue(ca.getActionUpdateArgs() instanceof ActionUpdateArgs); + AbstractClusterBuildingActionArgs args = ca.getActionUpdateArgs(); + assertNotNull(args.template); + } + + @Test + public void testFlexArgs() throws Throwable { + ClientArgs ca = createClientArgs(Arrays.asList( + ACTION_FLEX, "cluster1", + ARG_COMPONENT, "master", "1", + ARG_COMPONENT, "worker", "2" + )); + assertTrue(ca.getCoreAction() instanceof ActionFlexArgs); + List tuples = ca.getActionFlexArgs().getComponentTuples(); + assertEquals(4, tuples.size()); + Map roleMap = ArgOps.convertTupleListToMap("roles", tuples); + assertEquals("1", roleMap.get("master")); + assertEquals("2", roleMap.get("worker")); + } + + @Test + public void testDuplicateRole() throws Throwable { + ActionCreateArgs createArgs = createAction(Arrays.asList( + ACTION_CREATE, "cluster1", + ARG_COMPONENT, "master", "1", + ARG_COMPONENT, "master", "2" + )); + List tuples = createArgs.getComponentTuples(); + assertEquals(4, tuples.size()); + try { + Map roleMap = ArgOps.convertTupleListToMap( + "roles", + tuples); + Assert.fail("got a role map $roleMap not a failure"); + } catch (BadCommandArgumentsException expected) { + assertTrue(expected.getMessage().contains(ErrorStrings.ERROR_DUPLICATE_ENTRY)); + } + } + + @Test + public void testOddRoleCount() throws Throwable { + ActionCreateArgs createArgs = createAction(Arrays.asList( + ACTION_CREATE, "cluster1", + ARG_COMPONENT,"master","1", + ARG_COMPONENT,"master","2" + )); + List tuples = createArgs.getComponentTuples(); + tuples.add("loggers"); + assertEquals(5, tuples.size()); + try { + Map roleMap = ArgOps.convertTupleListToMap("roles", tuples); + Assert.fail("got a role map " + roleMap + " not a failure"); + } catch (BadCommandArgumentsException expected) { + assertTrue(expected.getMessage().contains(ErrorStrings.ERROR_PARSE_FAILURE)); + } + } + + /** + * Create some role-opt client args, so that multiple tests can use it + * @return the args + */ + public ActionCreateArgs createRoleOptClientArgs() throws SliderException { + ActionCreateArgs createArgs = createAction(Arrays.asList( + ACTION_CREATE, "cluster1", + ARG_COMPONENT, "master", "1", + ARG_COMP_OPT, "master", "cheese", "swiss", + ARG_COMP_OPT, "master", "env.CHEESE", "cheddar", + ARG_COMP_OPT, "master", ResourceKeys.YARN_CORES, "3", + + ARG_COMPONENT, "worker", "2", + ARG_COMP_OPT, "worker", ResourceKeys.YARN_CORES, "2", + ARG_COMP_OPT, "worker", RoleKeys.JVM_HEAP, "65536", + ARG_COMP_OPT, "worker", "env.CHEESE", "stilton" + )); + return createArgs; + } + + @Test + public void testRoleOptionParse() throws Throwable { + ActionCreateArgs createArgs = createRoleOptClientArgs(); + Map> tripleMaps = createArgs.getCompOptionMap(); + Map workerOpts = tripleMaps.get("worker"); + assertEquals(3, workerOpts.size()); + assertEquals("2", workerOpts.get(ResourceKeys.YARN_CORES)); + assertEquals("65536", workerOpts.get(RoleKeys.JVM_HEAP)); + + Map masterOpts = tripleMaps.get("master"); + assertEquals(3, masterOpts.size()); + assertEquals("3", masterOpts.get(ResourceKeys.YARN_CORES)); + + } + + @Test + public void testRoleOptionsMerge() throws Throwable { + ActionCreateArgs createArgs = createRoleOptClientArgs(); + + Map> roleOpts = createArgs.getCompOptionMap(); + + Map> clusterRoleMap = createEnvMap(); + SliderUtils.applyCommandLineRoleOptsToRoleMap(clusterRoleMap, roleOpts); + + Map masterOpts = clusterRoleMap.get("master"); + assertEquals("swiss", masterOpts.get("cheese")); + + Map workerOpts = clusterRoleMap.get("worker"); + assertEquals("stilton", workerOpts.get("env.CHEESE")); + } + + @Test + public void testEnvVariableApply() throws Throwable { + ActionCreateArgs createArgs = createRoleOptClientArgs(); + + + Map> roleOpts = createArgs.getCompOptionMap(); + + Map> clusterRoleMap = createEnvMap(); + SliderUtils.applyCommandLineRoleOptsToRoleMap(clusterRoleMap, roleOpts); + + Map workerOpts = clusterRoleMap.get("worker"); + assertEquals("stilton", workerOpts.get("env.CHEESE")); + + Map envmap = SliderUtils.buildEnvMap(workerOpts); + assertEquals("stilton", envmap.get("CHEESE")); + + } + + /** + * static compiler complaining about matching LinkedHashMap with Map, + * so some explicit creation here + * @return a map of maps + */ + public Map> createEnvMap() { + + Map cheese = new HashMap<>(); + cheese.put("cheese", "french"); + Map envCheese = new HashMap<>(); + envCheese.put("env.CHEESE", "french"); + Map> envMap = new HashMap<>(); + envMap.put("master", cheese); + envMap.put("worker", envCheese); + return envMap; + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java new file mode 100644 index 0000000..0267c79 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java @@ -0,0 +1,391 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.client; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RawLocalFileSystem; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Logger; +import org.apache.log4j.spi.LoggingEvent; +import org.apache.slider.common.params.Arguments; +import org.apache.slider.common.params.ClientArgs; +import org.apache.slider.common.tools.SliderFileSystem; +import org.apache.slider.common.tools.SliderUtils; +import org.apache.slider.core.exceptions.BadCommandArgumentsException; +import org.apache.slider.core.exceptions.SliderException; +import org.apache.slider.core.main.ServiceLauncher; +import org.apache.slider.core.main.ServiceLauncherBaseTest; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + +/** + * Test a keytab installation + */ +public class TestKeytabCommandOptions extends ServiceLauncherBaseTest { + + private static SliderFileSystem testFileSystem; + + @Before + public void setupFilesystem() throws IOException { + org.apache.hadoop.fs.FileSystem fileSystem = new RawLocalFileSystem(); + YarnConfiguration configuration = SliderUtils.createConfiguration(); + fileSystem.setConf(configuration); + testFileSystem = new SliderFileSystem(fileSystem, configuration); + File testFolderDir = new File(testFileSystem + .buildKeytabInstallationDirPath("").toUri().getPath()); + FileUtils.deleteDirectory(testFolderDir); + } + + @Test + public void testInstallKeytab() throws Throwable { + // create a mock keytab file + File localKeytab = + FileUtil.createLocalTempFile(getTempLocation(), "test", true); + String contents = UUID.randomUUID().toString(); + FileUtils.write(localKeytab, contents); + YarnConfiguration conf = SliderUtils.createConfiguration(); + ServiceLauncher launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABINSTALL, + ClientArgs.ARG_KEYTAB, + localKeytab.getAbsolutePath(), + Arguments.ARG_FOLDER, + "testFolder")); + Path installedPath = new Path(testFileSystem + .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName()); + File installedKeytab = new File(installedPath.toUri().getPath()); + assertTrue(installedKeytab.exists()); + assertEquals(FileUtils.readFileToString(installedKeytab), + FileUtils.readFileToString(localKeytab)); + } + + @Test + public void testInstallThenDeleteKeytab() throws Throwable { + // create a mock keytab file + File localKeytab = + FileUtil.createLocalTempFile(getTempLocation(), "test", true); + String contents = UUID.randomUUID().toString(); + FileUtils.write(localKeytab, contents); + YarnConfiguration conf = SliderUtils.createConfiguration(); + ServiceLauncher launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_INSTALL_KEYTAB, + ClientArgs.ARG_KEYTAB, + localKeytab.getAbsolutePath(), + Arguments.ARG_FOLDER, + "testFolder")); + Path installedPath = new Path(testFileSystem + .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName()); + File installedKeytab = new File(installedPath.toUri().getPath()); + assertTrue(installedKeytab.exists()); + assertEquals(FileUtils.readFileToString(installedKeytab), + FileUtils.readFileToString(localKeytab)); + + launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABDELETE, + ClientArgs.ARG_KEYTAB, + localKeytab.getName(), + Arguments.ARG_FOLDER, + "testFolder")); + + assertFalse(installedKeytab.exists()); + + } + + @Test + public void testInstallThenListKeytab() throws Throwable { + // create a mock keytab file + File localKeytab = + FileUtil.createLocalTempFile(getTempLocation(), "test", true); + String contents = UUID.randomUUID().toString(); + FileUtils.write(localKeytab, contents); + YarnConfiguration conf = SliderUtils.createConfiguration(); + ServiceLauncher launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_INSTALL_KEYTAB, + ClientArgs.ARG_KEYTAB, + localKeytab.getAbsolutePath(), + Arguments.ARG_FOLDER, + "testFolder")); + Path installedPath = new Path(testFileSystem + .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName()); + File installedKeytab = new File(installedPath.toUri().getPath()); + assertTrue(installedKeytab.exists()); + assertEquals(FileUtils.readFileToString(installedKeytab), + FileUtils.readFileToString(localKeytab)); + + // install an additional copy into another folder to test listing + launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_INSTALL_KEYTAB, + ClientArgs.ARG_KEYTAB, + localKeytab.getAbsolutePath(), + Arguments.ARG_FOLDER, + "testFolder2")); + + TestAppender testAppender = new TestAppender(); + + Logger.getLogger(SliderClient.class).addAppender(testAppender); + + try { + launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABLIST) + ); + assertEquals(3, testAppender.events.size()); + String msg = (String) testAppender.events.get(1).getMessage(); + assertTrue(msg.contains("/.slider/keytabs/testFolder")); + assertTrue(msg.endsWith(installedKeytab.getName())); + msg = (String) testAppender.events.get(2).getMessage(); + assertTrue(msg.contains("/.slider/keytabs/testFolder")); + assertTrue(msg.endsWith(installedKeytab.getName())); + } finally { + Logger.getLogger(SliderClient.class).removeAppender(testAppender); + } + + // now listing while specifying the folder name + testAppender = new TestAppender(); + + Logger.getLogger(SliderClient.class).addAppender(testAppender); + + try { + launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABLIST, + Arguments.ARG_FOLDER, + "testFolder")); + assertEquals(2, testAppender.events.size()); + String msg = (String) testAppender.events.get(1).getMessage(); + assertTrue(msg.contains( "/.slider/keytabs/testFolder/" + + installedKeytab.getName())); + } finally { + Logger.getLogger(SliderClient.class).removeAppender(testAppender); + } + } + + @Test + public void testDeleteNonExistentKeytab() throws Throwable { + // create a mock keytab file + YarnConfiguration conf = SliderUtils.createConfiguration(); + try { + ServiceLauncher launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABDELETE, + ClientArgs.ARG_KEYTAB, + "HeyIDontExist.keytab", + Arguments.ARG_FOLDER, + "testFolder")); + fail("expected BadCommandArgumentsException from launch"); + } catch (BadCommandArgumentsException e) { + // expected + } + } + + @Test + public void testInstallKeytabWithNoFolder() throws Throwable { + // create a mock keytab file + File localKeytab = + FileUtil.createLocalTempFile(getTempLocation(), "test", true); + String contents = UUID.randomUUID().toString(); + FileUtils.write(localKeytab, contents); + YarnConfiguration conf = SliderUtils.createConfiguration(); + try { + ServiceLauncher launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABINSTALL, + ClientArgs.ARG_KEYTAB, + localKeytab.getAbsolutePath())); + fail("expected BadCommandArgumentsException from launch"); + } catch (BadCommandArgumentsException e) { + // expected + } + } + + @Test + public void testInstallKeytabWithNoKeytab() throws Throwable { + // create a mock keytab file + File localKeytab = + FileUtil.createLocalTempFile(getTempLocation(), "test", true); + String contents = UUID.randomUUID().toString(); + FileUtils.write(localKeytab, contents); + YarnConfiguration conf = SliderUtils.createConfiguration(); + try { + ServiceLauncher launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABINSTALL, + ClientArgs.ARG_FOLDER, + "testFolder")); + fail("expected BadCommandArgumentsException from launch"); + } catch (BadCommandArgumentsException e) { + // expected + } + } + + @Test + public void testInstallKeytabAllowingOverwrite() throws Throwable { + // create a mock keytab file + File localKeytab = + FileUtil.createLocalTempFile(getTempLocation(), "test", true); + String contents = UUID.randomUUID().toString(); + FileUtils.write(localKeytab, contents); + YarnConfiguration conf = SliderUtils.createConfiguration(); + ServiceLauncher launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABINSTALL, + ClientArgs.ARG_KEYTAB, + localKeytab.getAbsolutePath(), + Arguments.ARG_FOLDER, + "testFolder")); + Path installedPath = new Path(testFileSystem + .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName()); + File installedKeytab = new File(installedPath.toUri().getPath()); + assertTrue(installedKeytab.exists()); + assertEquals(FileUtils.readFileToString(installedKeytab), FileUtils + .readFileToString(localKeytab)); + launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABINSTALL, + ClientArgs.ARG_KEYTAB, + localKeytab.getAbsolutePath(), + Arguments.ARG_FOLDER, + "testFolder", + Arguments.ARG_OVERWRITE) + ); + assertTrue(installedKeytab.exists()); + assertEquals(FileUtils.readFileToString(installedKeytab), + FileUtils.readFileToString(localKeytab)); + } + + @Test + public void testInstallKeytabNotAllowingOverwrite() throws Throwable { + // create a mock keytab file + File localKeytab = + FileUtil.createLocalTempFile(getTempLocation(), "test", true); + String contents = UUID.randomUUID().toString(); + FileUtils.write(localKeytab, contents); + YarnConfiguration conf = SliderUtils.createConfiguration(); + ServiceLauncher launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABINSTALL, + ClientArgs.ARG_KEYTAB, + localKeytab.getAbsolutePath(), + Arguments.ARG_FOLDER, + "testFolder")); + Path installedPath = new Path(testFileSystem + .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName()); + File installedKeytab = new File(installedPath.toUri().getPath()); + assertTrue(installedKeytab.exists()); + assertEquals(FileUtils.readFileToString(installedKeytab), + FileUtils.readFileToString(localKeytab)); + try { + launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABINSTALL, + ClientArgs.ARG_KEYTAB, + localKeytab.getAbsolutePath(), + Arguments.ARG_FOLDER, + "testFolder")); + fail("expected BadCommandArgumentsException from launch"); + } catch (BadCommandArgumentsException e) { + // expected + } + } + + @Test + public void testInstallKeytabWithMissingKeytab() throws Throwable { + // create a mock keytab file + YarnConfiguration conf = SliderUtils.createConfiguration(); + try { + ServiceLauncher launcher = launch(TestSliderClient.class, + conf, + Arrays.asList( + ClientArgs.ACTION_KEYTAB, + ClientArgs.ARG_KEYTABINSTALL, + ClientArgs.ARG_KEYTAB, + "HeyIDontExist.keytab", + Arguments.ARG_FOLDER, + "testFolder")); + fail("expected BadCommandArgumentsException from launch"); + } catch (BadCommandArgumentsException e) { + // expected + } + } + + private File getTempLocation () { + return new File(System.getProperty("user.dir") + "/target"); + } + + public static class TestSliderClient extends SliderClient { + public TestSliderClient() { + super(); + } + + @Override + protected void initHadoopBinding() throws IOException, SliderException { + sliderFileSystem = testFileSystem; + } + + } + + public static class TestAppender extends AppenderSkeleton{ + public List events = new ArrayList<>(); + public void close() {} + public boolean requiresLayout() {return false;} + @Override + protected void append(LoggingEvent event) { + events.add(event); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestReplaceTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestReplaceTokens.java new file mode 100644 index 0000000..9f94e9f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestReplaceTokens.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.client; + +import org.apache.slider.core.conf.ConfTree; +import org.apache.slider.core.persist.JsonSerDeser; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test bad argument handling + */ +public class TestReplaceTokens extends Assert { + + static final String PACKAGE = "/org/apache/slider/core/conf/examples/"; + static final String app_configuration = "app_configuration_tokenized.json"; + + /** + * help should print out help string and then succeed + * @throws Throwable + */ + @Test + public void testHelp() throws Throwable { + JsonSerDeser confTreeJsonSerDeser = + new JsonSerDeser<>(ConfTree.class); + ConfTree confTree = confTreeJsonSerDeser.fromResource(PACKAGE + + app_configuration); + SliderClient.replaceTokens(confTree, "testUser", "testCluster"); + // cluster name is resolved later now + assertEquals("hdfs://${CLUSTER_NAME}:8020", + confTree.global.get("site.fs.defaultFS")); + assertEquals("hdfs://${CLUSTER_NAME}:8020", + confTree.global.get("site.fs.default.name")); + assertEquals("testUser", confTree.global.get("site.hbase.user_name")); + assertEquals("testUser", confTree.global.get("site.hbase.another.user")); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderClientMethods.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderClientMethods.java new file mode 100644 index 0000000..62bf12e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderClientMethods.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.client; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.slider.common.SliderXmlConfKeys; +import org.apache.slider.common.tools.SliderUtils; +import org.apache.slider.core.buildutils.InstanceBuilder; +import org.apache.slider.core.conf.AggregateConf; +import org.apache.slider.core.exceptions.SliderException; +import org.apache.slider.core.launch.LaunchedApplication; +import org.apache.slider.core.main.ServiceLauncherBaseTest; +import org.apache.slider.core.persist.LockAcquireFailedException; +import org.apache.slider.server.appmaster.model.mock.MockApplicationId; +import org.easymock.EasyMock; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.easymock.PowerMock; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +@RunWith(PowerMockRunner.class) +@PrepareForTest(SliderUtils.class) +public class TestSliderClientMethods extends ServiceLauncherBaseTest { + protected static final Logger log = + LoggerFactory.getLogger(TestSliderClientMethods.class); + + String AM_ENV = "LD_LIBRARY_PATH"; + String PLACEHOLDER_KEY = "${distro.version}"; + String PLACEHOLDER_SYSTEM_KEY = "DISTRO_VERSION"; + String PLACEHOLDER_VALUE = "1.0.0"; + String AM_ENV_2 = "PATH"; + String PLACEHOLDER_KEY_2 = "${native.version}"; + String PLACEHOLDER_SYSTEM_KEY_2 = "NATIVE_VERSION"; + String PLACEHOLDER_VALUE_2 = "2.0.0"; + + @Test + public void testGeneratePlaceholderKeyValueMap() throws Throwable { + TestSliderClient testSliderClient = new TestSliderClient(); + + PowerMock.mockStatic(System.class); + EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY)) + .andReturn(PLACEHOLDER_VALUE).anyTimes(); + PowerMock.replayAll(); + + Map placeholders = testSliderClient.generatePlaceholderKeyValueMap( + AM_ENV + "=/usr/lib/" + PLACEHOLDER_KEY); + Assert.assertTrue(placeholders.containsKey(PLACEHOLDER_KEY)); + Assert.assertEquals("Should be equal", PLACEHOLDER_VALUE, + placeholders.get(PLACEHOLDER_KEY)); + + PowerMock.verifyAll(); + log.info("Placeholders = {}", placeholders); + } + + @Test + public void testSetAmLaunchEnv() throws Throwable { + TestSliderClient testSliderClient = new TestSliderClient(); + YarnConfiguration conf = SliderUtils.createConfiguration(); + conf.set(SliderXmlConfKeys.KEY_AM_LAUNCH_ENV, AM_ENV + "=/usr/lib/" + + PLACEHOLDER_KEY); + + PowerMock.mockStatic(System.class); + EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY)) + .andReturn(PLACEHOLDER_VALUE); + PowerMock.replayAll(); + + Map amLaunchEnv = testSliderClient.getAmLaunchEnv(conf); + Assert.assertNotNull(amLaunchEnv); + Assert.assertNotNull(amLaunchEnv.get(AM_ENV)); + Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV), + (Shell.WINDOWS ? "%" + AM_ENV + "%;" : "$" + AM_ENV + ":") + + "/usr/lib/" + PLACEHOLDER_VALUE); + + PowerMock.verifyAll(); + log.info("amLaunchEnv = {}", amLaunchEnv); + } + + @Test + public void testSetAmLaunchEnvMulti() throws Throwable { + TestSliderClient testSliderClient = new TestSliderClient(); + YarnConfiguration conf = SliderUtils.createConfiguration(); + conf.set(SliderXmlConfKeys.KEY_AM_LAUNCH_ENV, AM_ENV + "=/usr/lib/" + + PLACEHOLDER_KEY + "," + AM_ENV_2 + "=/usr/bin/" + PLACEHOLDER_KEY_2); + + PowerMock.mockStatic(System.class); + EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY)) + .andReturn(PLACEHOLDER_VALUE); + EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY_2)) + .andReturn(PLACEHOLDER_VALUE_2); + PowerMock.replayAll(); + + Map amLaunchEnv = testSliderClient.getAmLaunchEnv(conf); + Assert.assertNotNull(amLaunchEnv); + Assert.assertEquals("Should have 2 envs", amLaunchEnv.size(), 2); + Assert.assertNotNull(amLaunchEnv.get(AM_ENV)); + Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV), + (Shell.WINDOWS ? "%" + AM_ENV + "%;" : "$" + AM_ENV + ":") + + "/usr/lib/" + PLACEHOLDER_VALUE); + Assert.assertNotNull(amLaunchEnv.get(AM_ENV_2)); + Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV_2), + (Shell.WINDOWS ? "%" + AM_ENV_2 + "%;" : "$" + AM_ENV_2 + ":") + + "/usr/bin/" + PLACEHOLDER_VALUE_2); + + PowerMock.verifyAll(); + log.info("amLaunchEnv = " + amLaunchEnv); + } + + static class TestSliderClient extends SliderClient { + @Override + protected void persistInstanceDefinition(boolean overwrite, + Path appconfdir, + InstanceBuilder builder) + throws IOException, SliderException, LockAcquireFailedException { + super.persistInstanceDefinition(overwrite, appconfdir, builder); + } + + @Override + public LaunchedApplication launchApplication(String clustername, + Path clusterDirectory, + AggregateConf instanceDefinition, + boolean debugAM, + long lifetime) + throws YarnException, IOException { + return new LaunchedApplication(new MockApplicationId(1), new SliderYarnClientImpl()); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderTokensCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderTokensCommand.java new file mode 100644 index 0000000..27f46ce --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderTokensCommand.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.client; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.slider.common.params.ActionTokensArgs; +import org.apache.slider.common.params.Arguments; +import org.apache.slider.common.params.SliderActions; +import org.apache.slider.core.exceptions.BadClusterStateException; +import org.apache.slider.core.exceptions.NotFoundException; +import org.apache.slider.core.main.ServiceLauncherBaseTest; +import org.junit.Test; + +import java.util.Arrays; + +/** + * Test the argument parsing/validation logic + */ +public class TestSliderTokensCommand extends ServiceLauncherBaseTest { + + public static YarnConfiguration config = createTestConfig(); + + public static YarnConfiguration createTestConfig() { + YarnConfiguration configuration = new YarnConfiguration(); + configuration.set(YarnConfiguration.RM_ADDRESS, "127.0.0.1:8032"); + return configuration; + } + + @Test + public void testBadSourceArgs() throws Throwable { + launchExpectingException(SliderClient.class, + config, + ActionTokensArgs.DUPLICATE_ARGS, + Arrays.asList(SliderActions.ACTION_TOKENS, + Arguments.ARG_SOURCE, "target/tokens.bin", + Arguments.ARG_OUTPUT, "target/tokens.bin" + )); + } + + @Test + public void testKTNoPrincipal() throws Throwable { + launchExpectingException(SliderClient.class, + config, + ActionTokensArgs.MISSING_KT_PROVIDER, + Arrays.asList(SliderActions.ACTION_TOKENS, + Arguments.ARG_KEYTAB, "target/keytab" + )); + } + + @Test + public void testPrincipalNoKT() throws Throwable { + launchExpectingException(SliderClient.class, + config, + ActionTokensArgs.MISSING_KT_PROVIDER, + Arrays.asList(SliderActions.ACTION_TOKENS, + Arguments.ARG_PRINCIPAL, "bob@REALM" + )); + } + + /** + * A missing keytab is an error + * @throws Throwable + */ + @Test + public void testMissingKT() throws Throwable { + Throwable ex = launchExpectingException(SliderClient.class, + config, + TokensOperation.E_NO_KEYTAB, + Arrays.asList(SliderActions.ACTION_TOKENS, + Arguments.ARG_PRINCIPAL, "bob@REALM", + Arguments.ARG_KEYTAB, "target/keytab" + )); + if (!(ex instanceof NotFoundException)) { + throw ex; + } + } + + @Test + public void testMissingSourceFile() throws Throwable { + Throwable ex = launchExpectingException(SliderClient.class, + config, + TokensOperation.E_MISSING_SOURCE_FILE, + Arrays.asList(SliderActions.ACTION_TOKENS, + Arguments.ARG_SOURCE, "target/tokens.bin" + )); + if (!(ex instanceof NotFoundException)) { + throw ex; + } + } + + @Test + public void testListHarmlessWhenInsecure() throws Throwable { + execSliderCommand(0, config, Arrays.asList(SliderActions.ACTION_TOKENS)); + } + + @Test + public void testCreateFailsWhenInsecure() throws Throwable { + Throwable ex = launchExpectingException(SliderClient.class, + config, + TokensOperation.E_INSECURE, + Arrays.asList(SliderActions.ACTION_TOKENS, + Arguments.ARG_OUTPUT, "target/tokens.bin" + )); + if (!(ex instanceof BadClusterStateException)) { + throw ex; + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestClusterNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestClusterNames.java new file mode 100644 index 0000000..9e79169 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestClusterNames.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.common.tools; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +public class TestClusterNames { + + void assertValidName(String name) { + boolean valid = SliderUtils.isClusternameValid(name); + Assert.assertTrue("Clustername '" + name + "' mistakenly declared invalid", + valid); + } + + void assertInvalidName(String name) { + boolean valid = SliderUtils.isClusternameValid(name); + Assert.assertFalse("Clustername '\" + name + \"' mistakenly declared valid", + valid); + } + + void assertInvalid(List names) { + for (String name : names) { + assertInvalidName(name); + } + } + + void assertValid(List names) { + for (String name : names) { + assertValidName(name); + } + } + + @Test + public void testEmptyName() throws Throwable { + assertInvalidName(""); + } + + @Test + public void testSpaceName() throws Throwable { + assertInvalidName(" "); + } + + + @Test + public void testLeadingHyphen() throws Throwable { + assertInvalidName("-hyphen"); + } + + @Test + public void testTitleLetters() throws Throwable { + assertInvalidName("Title"); + } + + @Test + public void testCapitalLetters() throws Throwable { + assertInvalidName("UPPER-CASE-CLUSTER"); + } + + @Test + public void testInnerBraced() throws Throwable { + assertInvalidName("a[a"); + } + + @Test + public void testLeadingBrace() throws Throwable { + assertInvalidName("["); + } + + @Test + public void testNonalphaLeadingChars() throws Throwable { + assertInvalid(Arrays.asList( + "[a", "#", "@", "=", "*", "." + )); + } + + @Test + public void testNonalphaInnerChars() throws Throwable { + assertInvalid(Arrays.asList( + "a[a", "b#", "c@", "d=", "e*", "f.", "g ", "h i" + )); + } + + @Test + public void testClusterValid() throws Throwable { + assertValidName("cluster"); + } + + @Test + public void testValidNames() throws Throwable { + assertValid(Arrays.asList( + "cluster", + "cluster1", + "very-very-very-long-cluster-name", + "c1234567890" + )); + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelper.java new file mode 100644 index 0000000..93ba06c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelper.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.common.tools; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.slider.common.SliderXmlConfKeys; +import org.apache.slider.test.YarnMiniClusterTestBase; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.util.Map; + +public class TestConfigHelper extends YarnMiniClusterTestBase { + + + @Test + public void testConfigLoaderIteration() throws Throwable { + + String xml = "" + + "keyvalue" + + "programatically"; + InputStream ins = new ByteArrayInputStream(xml.getBytes("UTF8")); + Configuration conf = new Configuration(false); + conf.addResource(ins); + Configuration conf2 = new Configuration(false); + for (Map.Entry entry : conf) { + conf2.set(entry.getKey(), entry.getValue(), "src"); + } + + } + + @Test + public void testConfigDeprecation() throws Throwable { + ConfigHelper.registerDeprecatedConfigItems(); + Configuration conf = new Configuration(false); + conf.set(SliderXmlConfKeys.REGISTRY_PATH, "path"); + assertEquals("path", conf.get(SliderXmlConfKeys.REGISTRY_PATH)); + assertEquals("path", conf.get(RegistryConstants.KEY_REGISTRY_ZK_ROOT)); + + conf.set(SliderXmlConfKeys.REGISTRY_ZK_QUORUM, "localhost"); + assertEquals("localhost", conf.get(SliderXmlConfKeys.REGISTRY_ZK_QUORUM)); + assertEquals("localhost", conf.get(RegistryConstants + .KEY_REGISTRY_ZK_QUORUM)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelperHDFS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelperHDFS.java new file mode 100644 index 0000000..4a88fb6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelperHDFS.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.common.tools; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.slider.test.YarnMiniClusterTestBase; +import org.junit.Test; + +import java.net.URI; + +public class TestConfigHelperHDFS extends YarnMiniClusterTestBase { + + @Test + public void testConfigHelperHDFS() throws Throwable { + YarnConfiguration config = getConfiguration(); + createMiniHDFSCluster("testConfigHelperHDFS", config); + + Configuration conf = new Configuration(false); + conf.set("key","value"); + URI fsURI = new URI(getFsDefaultName()); + Path root = new Path(fsURI); + Path confPath = new Path(root, "conf.xml"); + FileSystem dfs = FileSystem.get(fsURI,config); + ConfigHelper.saveConfig(dfs,confPath, conf); + //load time + Configuration loaded = ConfigHelper.loadConfiguration(dfs,confPath); + log.info(ConfigHelper.dumpConfigToString(loaded)); + assertEquals("value", loaded.get("key")); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestExecutionEnvironment.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestExecutionEnvironment.java new file mode 100644 index 0000000..f8c2eab --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestExecutionEnvironment.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.common.tools; + +import org.apache.slider.test.SliderTestBase; +import org.junit.Test; + +public class TestExecutionEnvironment extends SliderTestBase { + + @Test + public void testClientEnv() throws Throwable { + SliderUtils.validateSliderClientEnvironment(log); + } + + @Test + public void testWinutils() throws Throwable { + SliderUtils.maybeVerifyWinUtilsValid(); + } + + @Test + public void testServerEnv() throws Throwable { + SliderUtils.validateSliderServerEnvironment(log, true); + } + + @Test + public void testServerEnvNoDependencies() throws Throwable { + SliderUtils.validateSliderServerEnvironment(log, false); + } + + @Test + public void testopenSSLEnv() throws Throwable { + SliderUtils.validateOpenSSLEnv(log); + } + + @Test + public void testValidatePythonEnv() throws Throwable { + SliderUtils.validatePythonEnv(log); + } + + @Test + public void testNativeLibs() throws Throwable { + assertNativeLibrariesPresent(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java new file mode 100644 index 0000000..976cd39 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.common.tools; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.slider.test.SliderTestBase; +import org.junit.Test; + +import java.net.URI; + +public class TestMiscSliderUtils extends SliderTestBase { + + + public static final String CLUSTER1 = "cluster1"; + + @Test + public void testPurgeTempDir() throws Throwable { + + Configuration configuration = new Configuration(); + FileSystem fs = FileSystem.get(new URI("file:///"), configuration); + SliderFileSystem sliderFileSystem = new SliderFileSystem(fs, configuration); + Path inst = sliderFileSystem.createAppInstanceTempPath(CLUSTER1, "001"); + + assertTrue(fs.exists(inst)); + sliderFileSystem.purgeAppInstanceTempFiles(CLUSTER1); + assertFalse(fs.exists(inst)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestPortScan.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestPortScan.java new file mode 100644 index 0000000..deee95b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestPortScan.java @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.common.tools; + +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.core.exceptions.SliderException; +import org.junit.Test; + +import java.net.ServerSocket; +import java.util.Arrays; +import java.util.List; + +import static org.junit.Assert.*; + +public class TestPortScan { + + @Test + public void testScanPorts() throws Throwable { + + ServerSocket server = new ServerSocket(0); + + try { + int serverPort = server.getLocalPort(); + assertFalse(SliderUtils.isPortAvailable(serverPort)); + int port = SliderUtils.findFreePort(serverPort, 10); + assertTrue(port > 0 && serverPort < port); + } finally { + server.close(); + } + } + + @Test + public void testRequestedPortsLogic() throws Throwable { + PortScanner portScanner = new PortScanner(); + portScanner.setPortRange("5,6,8-10, 11,14 ,20 - 22"); + List ports = portScanner.getRemainingPortsToCheck(); + List expectedPorts = + Arrays.asList(5, 6, 8, 9, 10, 11, 14, 20, 21, 22); + assertEquals(expectedPorts, ports); + } + + @Test + public void testRequestedPortsOutOfOrder() throws Throwable { + PortScanner portScanner = new PortScanner(); + portScanner.setPortRange("8-10,5,6, 11,20 - 22, 14 "); + List ports = portScanner.getRemainingPortsToCheck(); + List expectedPorts = + Arrays.asList(5, 6, 8, 9, 10, 11, 14, 20, 21, 22); + assertEquals(expectedPorts, ports); + } + + @Test + public void testFindAvailablePortInRange() throws Throwable { + ServerSocket server = new ServerSocket(0); + try { + int serverPort = server.getLocalPort(); + + PortScanner portScanner = new PortScanner(); + portScanner.setPortRange("" + (serverPort-1) + "-" + (serverPort + 3)); + int port = portScanner.getAvailablePort(); + assertNotEquals(port, serverPort); + assertTrue(port >= serverPort -1 && port <= serverPort + 3); + } finally { + server.close(); + } + } + + @Test + public void testFindAvailablePortInList() throws Throwable { + ServerSocket server = new ServerSocket(0); + try { + int serverPort = server.getLocalPort(); + + PortScanner portScanner = new PortScanner(); + portScanner.setPortRange("" + (serverPort-1) + ", " + (serverPort + 1)); + int port = portScanner.getAvailablePort(); + assertNotEquals(port, serverPort); + assertTrue(port == serverPort -1 || port == serverPort + 1); + } finally { + server.close(); + } + } + + @Test + public void testNoAvailablePorts() throws Throwable { + ServerSocket server1 = new ServerSocket(0); + ServerSocket server2 = new ServerSocket(0); + try { + int serverPort1 = server1.getLocalPort(); + int serverPort2 = server2.getLocalPort(); + + PortScanner portScanner = new PortScanner(); + portScanner.setPortRange("" + serverPort1+ ", " + serverPort2); + try { + portScanner.getAvailablePort(); + fail("expected SliderException"); + } catch (SliderException e) { + // expected + } + } finally { + server1.close(); + server2.close(); + } + } + + @Test + public void testPortRemovedFromRange() throws Throwable { + ServerSocket server = new ServerSocket(0); + try { + int serverPort = server.getLocalPort(); + + PortScanner portScanner = new PortScanner(); + portScanner.setPortRange("" + (serverPort-1) + "-" + (serverPort + 3)); + int port = portScanner.getAvailablePort(); + assertNotEquals(port, serverPort); + assertTrue(port >= serverPort -1 && port <= serverPort + 3); + assertFalse(portScanner.getRemainingPortsToCheck().contains(port)); + } finally { + server.close(); + } + } + + @Test(expected = BadConfigException.class) + public void testBadRange() throws BadConfigException { + PortScanner portScanner = new PortScanner(); + // note the em dash + portScanner.setPortRange("2000–2010"); + } + + @Test(expected = BadConfigException.class) + public void testEndBeforeStart() throws BadConfigException { + PortScanner portScanner = new PortScanner(); + portScanner.setPortRange("2001-2000"); + } + + @Test(expected = BadConfigException.class) + public void testEmptyRange() throws BadConfigException { + PortScanner portScanner = new PortScanner(); + portScanner.setPortRange(""); + } + + @Test(expected = BadConfigException.class) + public void testBlankRange() throws BadConfigException { + PortScanner portScanner = new PortScanner(); + portScanner.setPortRange(" "); + } + + @Test + public void testExtraComma() throws BadConfigException { + PortScanner portScanner = new PortScanner(); + portScanner.setPortRange("2000-2001, "); + List ports = portScanner.getRemainingPortsToCheck(); + List expectedPorts = Arrays.asList(2000, 2001); + assertEquals(expectedPorts, ports); + } + + @Test + public void testExtraCommas() throws BadConfigException { + PortScanner portScanner = new PortScanner(); + portScanner.setPortRange("2000-2001,, ,2003,"); + List ports = portScanner.getRemainingPortsToCheck(); + List expectedPorts = Arrays.asList(2000, 2001, 2003); + assertEquals(expectedPorts, ports); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderFileSystem.java new file mode 100644 index 0000000..8734613 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderFileSystem.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.slider.common.tools; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.slider.common.SliderXmlConfKeys; +import org.apache.slider.test.SliderTestBase; +import org.junit.Test; + +public class TestSliderFileSystem extends SliderTestBase { + private static Configuration defaultConfiguration() { + return new Configuration(); + } + + private static Configuration createConfigurationWithKV(String key, String value) { + Configuration conf = defaultConfiguration(); + conf.set(key, value); + return conf; + } + + @Test + public void testSliderBasePathDefaultValue() throws Throwable { + Configuration configuration = defaultConfiguration(); + FileSystem fileSystem = FileSystem.get(configuration); + + SliderFileSystem fs2 = new SliderFileSystem(fileSystem, configuration); + assertEquals(fs2.getBaseApplicationPath(), new Path(fileSystem + .getHomeDirectory(), ".slider")); + } + + @Test + public void testSliderBasePathCustomValue() throws Throwable { + Configuration configuration = createConfigurationWithKV(SliderXmlConfKeys + .KEY_SLIDER_BASE_PATH, "/slider/cluster"); + FileSystem fileSystem = FileSystem.get(configuration); + SliderFileSystem fs2 = new SliderFileSystem(fileSystem, configuration); + + assertEquals(fs2.getBaseApplicationPath(), new Path("/slider/cluster")); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderTestUtils.java new file mode 100644 index 0000000..0126798 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderTestUtils.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.common.tools; + +import org.apache.hadoop.conf.Configuration; +import org.apache.slider.test.SliderTestUtils; +import org.junit.Test; +import org.junit.internal.AssumptionViolatedException; + +public class TestSliderTestUtils extends SliderTestUtils { + + + @Test + public void testAssumeTrue() throws Throwable { + + try { + assume(true, "true"); + } catch (AssumptionViolatedException e) { + throw new Exception(e); + } + } + + @Test + public void testAssumeFalse() throws Throwable { + + try { + assume(false, "false"); + fail("expected an exception"); + } catch (AssumptionViolatedException ignored) { + //expected + } + } + + @Test + public void testAssumeBoolOptionSetInConf() throws Throwable { + Configuration conf = new Configuration(false); + conf.set("key", "true"); + try { + assumeBoolOption(conf, "key", false); + } catch (AssumptionViolatedException e) { + throw new Exception(e); + } + } + + @Test + public void testAssumeBoolOptionUnsetInConf() throws Throwable { + Configuration conf = new Configuration(false); + try { + assumeBoolOption(conf, "key", true); + } catch (AssumptionViolatedException e) { + throw new Exception(e); + } + } + + + @Test + public void testAssumeBoolOptionFalseInConf() throws Throwable { + Configuration conf = new Configuration(false); + conf.set("key", "false"); + try { + assumeBoolOption(conf, "key", true); + fail("expected an exception"); + } catch (AssumptionViolatedException ignored) { + //expected + } + } + + @Test + public void testAssumeBoolOptionFalseUnsetInConf() throws Throwable { + Configuration conf = new Configuration(false); + try { + assumeBoolOption(conf, "key", false); + fail("expected an exception"); + } catch (AssumptionViolatedException ignored) { + //expected + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestWindowsSupport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestWindowsSupport.java new file mode 100644 index 0000000..00b50f0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestWindowsSupport.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.common.tools; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ChecksumFileSystem; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.service.ServiceStateException; +import org.apache.hadoop.util.Shell; +import org.apache.slider.test.YarnMiniClusterTestBase; +import org.junit.Test; + +import java.io.File; +import java.io.FileNotFoundException; +import java.net.URI; +import java.util.Arrays; +import java.util.List; +import java.util.regex.Pattern; + +public class TestWindowsSupport extends YarnMiniClusterTestBase { + + private static final Pattern hasDriveLetterSpecifier = + Pattern.compile("^/?[a-zA-Z]:"); + public static final String windowsFile = + "C:\\Users\\Administrator\\AppData\\Local\\Temp" + + "\\junit3180177850133852404\\testpkg\\appdef_1.zip"; + + + private static boolean hasWindowsDrive(String path) { + return hasDriveLetterSpecifier.matcher(path).find(); + } + + private static int startPositionWithoutWindowsDrive(String path) { + if (hasWindowsDrive(path)) { + return path.charAt(0) == '/' ? 3 : 2; + } else { + return 0; + } + } + + @Test + public void testHasWindowsDrive() throws Throwable { + assertTrue(hasWindowsDrive(windowsFile)); + } + + @Test + public void testStartPosition() throws Throwable { + assertEquals(2, startPositionWithoutWindowsDrive(windowsFile)); + } + + @Test + public void testPathHandling() throws Throwable { + assumeWindows(); + + Path path = new Path(windowsFile); + URI uri = path.toUri(); +// assert "file" == uri.scheme + assertNull(uri.getAuthority()); + + Configuration conf = new Configuration(); + + FileSystem localfs = FileSystem.get(uri, conf); + assertTrue(localfs instanceof ChecksumFileSystem); + try { + FileStatus stat = localfs.getFileStatus(path); + fail("expected an exception, got " + stat); + } catch (FileNotFoundException fnfe) { + // expected + } + + try { + FSDataInputStream appStream = localfs.open(path); + } catch (FileNotFoundException fnfe) { + // expected + } + } + + @Test + public void testExecNonexistentBinary() throws Throwable { + assumeWindows(); + List commands = Arrays.asList("undefined-application", "--version"); + try { + exec(0, commands); + fail("expected an exception"); + } catch (ServiceStateException e) { + if (!(e.getCause() instanceof FileNotFoundException)) { + throw e; + } + } + } + @Test + public void testExecNonexistentBinary2() throws Throwable { + assumeWindows(); + assertFalse(doesAppExist(Arrays.asList("undefined-application", + "--version"))); + } + + @Test + public void testEmitKillCommand() throws Throwable { + + int result = killJavaProcesses("regionserver", 9); + // we know the exit code if there is no supported kill operation + assertTrue(kill_supported || result == -1); + } + + @Test + public void testHadoopHomeDefined() throws Throwable { + assumeWindows(); + String hadoopHome = Shell.getHadoopHome(); + log.info("HADOOP_HOME={}", hadoopHome); + } + + @Test + public void testHasWinutils() throws Throwable { + assumeWindows(); + SliderUtils.maybeVerifyWinUtilsValid(); + } + + @Test + public void testExecWinutils() throws Throwable { + assumeWindows(); + String winUtilsPath = Shell.getWinUtilsPath(); + assertTrue(SliderUtils.isSet(winUtilsPath)); + File winUtils = new File(winUtilsPath); + log.debug("Winutils is at {}", winUtils); + + exec(0, Arrays.asList(winUtilsPath, "systeminfo")); + } + + @Test + public void testPath() throws Throwable { + String path = extractPath(); + log.info("Path value = {}", path); + } + + @Test + public void testFindJavac() throws Throwable { + String name = Shell.WINDOWS ? "javac.exe" : "javac"; + assertNotNull(locateExecutable(name)); + } + + @Test + public void testHadoopDLL() throws Throwable { + assumeWindows(); + // split the path + File exepath = locateExecutable("HADOOP.DLL"); + assertNotNull(exepath); + log.info("Hadoop DLL at: {}", exepath); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestZKIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestZKIntegration.java new file mode 100644 index 0000000..994068e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestZKIntegration.java @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.common.tools; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.registry.server.services.MicroZookeeperServiceKeys; +import org.apache.slider.client.SliderClient; +import org.apache.slider.core.zk.ZKIntegration; +import org.apache.slider.test.KeysForTests; +import org.apache.slider.test.YarnZKMiniClusterTestBase; +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.ZooDefs; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.List; + +public class TestZKIntegration extends YarnZKMiniClusterTestBase implements + KeysForTests { + public static final String USER = KeysForTests.USERNAME; + public static final int CONNECT_TIMEOUT = 5000; + private ZKIntegration zki; + + @Before + public void createCluster() { + Configuration conf = getConfiguration(); + String name = methodName.getMethodName(); + File zkdir = new File("target/zk/${name}"); + FileUtil.fullyDelete(zkdir); + conf.set(MicroZookeeperServiceKeys.KEY_ZKSERVICE_DIR, zkdir.getAbsolutePath()); + createMicroZKCluster("-"+ name, conf); + } + + @After + public void closeZKI() throws IOException { + if (zki != null) { + zki.close(); + zki = null; + } + } + + public ZKIntegration initZKI() throws IOException, InterruptedException { + zki = createZKIntegrationInstance( + getZKBinding(), methodName.getMethodName(), true, false, + CONNECT_TIMEOUT); + return zki; + } + + @Test + public void testListUserClustersWithoutAnyClusters() throws Throwable { + assertHasZKCluster(); + initZKI(); + String userPath = ZKIntegration.mkSliderUserPath(USER); + List clusters = this.zki.getClusters(); + assertTrue(SliderUtils.isEmpty(clusters)); + } + + @Test + public void testListUserClustersWithOneCluster() throws Throwable { + assertHasZKCluster(); + + initZKI(); + String userPath = ZKIntegration.mkSliderUserPath(USER); + String fullPath = zki.createPath(userPath, "/cluster-", + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL_SEQUENTIAL); + log.info("Ephemeral path {}", fullPath); + List clusters = zki.getClusters(); + assertEquals(1, clusters.size()); + assertTrue(fullPath.endsWith(clusters.get(0))); + } + + @Test + public void testListUserClustersWithTwoCluster() throws Throwable { + initZKI(); + String userPath = ZKIntegration.mkSliderUserPath(USER); + String c1 = createEphemeralChild(zki, userPath); + log.info("Ephemeral path $c1"); + String c2 = createEphemeralChild(zki, userPath); + log.info("Ephemeral path $c2"); + List clusters = zki.getClusters(); + assertEquals(2, clusters.size()); + assertTrue((c1.endsWith(clusters.get(0)) && c2.endsWith(clusters.get(1))) || + (c1.endsWith(clusters.get(1)) && c2.endsWith(clusters.get(0)))); + } + + @Test + public void testCreateAndDeleteDefaultZKPath() throws Throwable { + MockSliderClient client = new MockSliderClient(); + + String path = client.createZookeeperNodeInner("cl1", true); + zki = client.getLastZKIntegration(); + + String zkPath = ZKIntegration.mkClusterPath(USER, "cl1"); + assertEquals("zkPath must be as expected", zkPath, + "/services/slider/users/" + USER + "/cl1"); + assertEquals(path, zkPath); + assertNull("ZKIntegration should be null.", zki); + zki = createZKIntegrationInstance(getZKBinding(), "cl1", true, false, CONNECT_TIMEOUT); + assertFalse(zki.exists(zkPath)); + + path = client.createZookeeperNodeInner("cl1", false); + zki = client.getLastZKIntegration(); + assertNotNull(zki); + assertEquals("zkPath must be as expected", zkPath, + "/services/slider/users/" + USER + "/cl1"); + assertEquals(path, zkPath); + assertTrue(zki.exists(zkPath)); + zki.createPath(zkPath, "/cn", ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + assertTrue(zki.exists(zkPath + "/cn")); + client.deleteZookeeperNode("cl1"); + assertFalse(zki.exists(zkPath)); + } + + public String createEphemeralChild(ZKIntegration zki, String userPath) + throws KeeperException, InterruptedException { + return zki.createPath(userPath, "/cluster-", + ZooDefs.Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL_SEQUENTIAL); + } + + public class MockSliderClient extends SliderClient { + private ZKIntegration zki; + + @Override + public String getUsername() { + return USER; + } + + @Override + protected ZKIntegration getZkClient(String clusterName, String user) { + try { + zki = createZKIntegrationInstance(getZKBinding(), clusterName, true, false, CONNECT_TIMEOUT); + } catch (Exception e) { + fail("creating ZKIntergration threw an exception"); + } + return zki; + } + + @Override + public Configuration getConfig() { + return new Configuration(); + } + + public ZKIntegration getLastZKIntegration() { + return zki; + } + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java new file mode 100644 index 0000000..06875ee --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.core.conf; + +import org.apache.slider.core.persist.JsonSerDeser; +import org.apache.slider.providers.slideram.SliderAMClientProvider; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/* + names of the example configs + */ + +public class ExampleConfResources { + + public static final String overridden = "overridden.json"; + public static final String overriddenRes = "overridden-resolved.json"; + public static final String internal = "internal.json"; + public static final String internalRes = "internal-resolved.json"; + public static final String app_configuration = "app_configuration.json"; + public static final String app_configurationRes = "app_configuration-resolved.json"; + public static final String resources = "resources.json"; + public static final String empty = "empty.json"; + + public static final String PACKAGE = "/org/apache/slider/core/conf/examples/"; + + + public static final String[] all_examples = {overridden, overriddenRes, + internal, internalRes, app_configuration, app_configurationRes, + resources, empty}; + + public static final List all_example_resources = new ArrayList<>(); + static { + for (String example : all_examples) { + all_example_resources.add(PACKAGE + example); + } + + all_example_resources.add(SliderAMClientProvider.RESOURCES_JSON); + all_example_resources.add(SliderAMClientProvider.INTERNAL_JSON); + all_example_resources.add(SliderAMClientProvider.APPCONF_JSON); + + } + + /** + * Build up an aggregate conf by loading in the details of the individual resources + * and then aggregating them + * @return a new instance + */ + public static AggregateConf loadExampleAggregateResource() throws + IOException { + JsonSerDeser confTreeJsonSerDeser = + new JsonSerDeser<>(ConfTree.class); + ConfTree internalConf = confTreeJsonSerDeser.fromResource(PACKAGE + + internal); + ConfTree appConf = confTreeJsonSerDeser.fromResource(PACKAGE + + app_configuration); + ConfTree resourcesConf = confTreeJsonSerDeser.fromResource(PACKAGE + + resources); + AggregateConf aggregateConf = new AggregateConf( + resourcesConf, + appConf, + internalConf); + return aggregateConf; + } + + static ConfTree loadResource(String name) throws IOException { + JsonSerDeser confTreeJsonSerDeser = + new JsonSerDeser<>(ConfTree.class); + return confTreeJsonSerDeser.fromResource(PACKAGE + name); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java new file mode 100644 index 0000000..b48ac9c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.core.conf; + +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.core.persist.JsonSerDeser; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.Arrays; +import java.util.Collection; + + +/** + * Test + */ +@RunWith(value = Parameterized.class) +public class TestConfTreeLoadExamples extends Assert { + + String resource; + + static final JsonSerDeser confTreeJsonSerDeser = + new JsonSerDeser<>(ConfTree.class); + + public TestConfTreeLoadExamples(String resource) { + this.resource = resource; + } + + @Parameterized.Parameters + public static Collection filenames() { + String[][] stringArray = new String[ExampleConfResources + .all_example_resources.size()][1]; + int i = 0; + for (String s : ExampleConfResources.all_example_resources) { + stringArray[i++][0] = s; + } + return Arrays.asList(stringArray); + } + + @Test + public void testLoadResource() throws Throwable { + ConfTree confTree = confTreeJsonSerDeser.fromResource(resource); + ConfTreeOperations ops = new ConfTreeOperations(confTree); + ops.resolve(); + ops.validate(); + } + + @Test + public void testLoadResourceWithValidator() throws Throwable { + ConfTree confTree = confTreeJsonSerDeser.fromResource(resource); + ConfTreeOperations ops = new ConfTreeOperations(confTree); + ops.resolve(); + if (resource.endsWith("resources.json")) { + // these should pass since they are configured conrrectly with "yarn." + // properties + ops.validate(new ResourcesInputPropertiesValidator()); + } else if (resource.startsWith("app_configuration")) { + ops.validate(new TemplateInputPropertiesValidator()); + } + else { + // these have properties with other prefixes so they should generate + // BadConfigExceptions + try { + ops.validate(new ResourcesInputPropertiesValidator()); + if ( !resource.endsWith(ExampleConfResources.empty)) { + fail (resource + " should have generated validation exception"); + } + } catch (BadConfigException e) { + // ignore + } + + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeResolve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeResolve.java new file mode 100644 index 0000000..1383089 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeResolve.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.core.conf; + +import org.junit.Assert; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; + +import static org.apache.slider.api.InternalKeys.CHAOS_MONKEY_INTERVAL; +import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS; +import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS; +import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES; +import static org.apache.slider.core.conf.ExampleConfResources.internal; +import static org.apache.slider.core.conf.ExampleConfResources.overridden; + +/** + * Test + */ +public class TestConfTreeResolve extends Assert { + protected static final Logger log = + LoggerFactory.getLogger(TestConfTreeResolve.class); + + @Test + public void testOverride() throws Throwable { + + ConfTree orig = ExampleConfResources.loadResource(overridden); + + ConfTreeOperations origOperations = new ConfTreeOperations(orig); + origOperations.validate(); + + + MapOperations global = origOperations.getGlobalOptions(); + assertEquals("a", global.get("g1")); + assertEquals("b", global.get("g2")); + + MapOperations simple = origOperations.getMandatoryComponent("simple"); + assertEquals(0, simple.size()); + + MapOperations master = origOperations.getMandatoryComponent("master"); + assertEquals("m", master.get("name")); + assertEquals("overridden", master.get("g1")); + + MapOperations worker = origOperations.getMandatoryComponent("worker"); + log.info("worker = {}", worker); + assertEquals(3, worker.size()); + + assertEquals("worker", worker.get("name")); + assertEquals("overridden-by-worker", worker.get("g1")); + assertNull(worker.get("g2")); + assertEquals("1000", worker.get("timeout")); + + // here is the resolution + origOperations.resolve(); + + global = origOperations.getGlobalOptions(); + log.info("global = {}", global); + assertEquals("a", global.get("g1")); + assertEquals("b", global.get("g2")); + + simple = origOperations.getMandatoryComponent("simple"); + assertEquals(2, simple.size()); + simple.getMandatoryOption("g1"); + assertNotNull(simple.get("g1")); + + + master = origOperations.getMandatoryComponent("master"); + log.info("master = {}", master); + assertEquals(3, master.size()); + assertEquals("m", master.get("name")); + assertEquals("overridden", master.get("g1")); + assertEquals("b", master.get("g2")); + + worker = origOperations.getMandatoryComponent("worker"); + log.info("worker = {}", worker); + assertEquals(4, worker.size()); + + assertEquals("worker", worker.get("name")); + assertEquals("overridden-by-worker", worker.get("g1")); + assertEquals("b", worker.get("g2")); + assertEquals("1000", worker.get("timeout")); + + } + + @Test + public void testTimeIntervalLoading() throws Throwable { + + ConfTree orig = ExampleConfResources.loadResource(internal); + + MapOperations internals = new MapOperations("internal", orig.global); + int s = internals.getOptionInt( + CHAOS_MONKEY_INTERVAL + MapOperations.SECONDS, + 0); + assertEquals(60, s); + long monkeyInterval = internals.getTimeRange( + CHAOS_MONKEY_INTERVAL, + DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS, + DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS, + DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES, + 0); + assertEquals(60L, monkeyInterval); + } + + @Test + public void testPrefix() throws Throwable { + ConfTree orig = ExampleConfResources.loadResource(overridden); + ConfTreeOperations cto = new ConfTreeOperations(orig); + cto.resolve(); + Map prefixed = + cto.getComponent("worker").prefixedWith("g"); + assertEquals(2, prefixed.size()); + assertEquals("overridden-by-worker", prefixed.get("g1")); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java index b955931..cb51a82 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java @@ -18,12 +18,6 @@ package org.apache.slider.core.launch; -import java.lang.reflect.Method; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.client.api.YarnClientApplication; @@ -35,6 +29,12 @@ import org.junit.Before; import org.junit.Test; +import java.lang.reflect.Method; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + public class TestAppMasterLauncher { SliderYarnClientImpl mockYarnClient; YarnClientApplication yarnClientApp; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java index a8f6b26..c11f493 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java @@ -18,11 +18,6 @@ package org.apache.slider.core.launch; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.client.api.YarnClientApplication; @@ -35,6 +30,11 @@ import org.junit.Before; import org.junit.Test; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + public class TestAppMasterLauncherWithAmReset { SliderYarnClientImpl mockYarnClient; YarnClientApplication yarnClientApp; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/main/ServiceLauncherBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/main/ServiceLauncherBaseTest.java new file mode 100644 index 0000000..067984e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/main/ServiceLauncherBaseTest.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.core.main; + +import org.apache.slider.test.SliderTestBase; + +/** + * Base class for tests that use the service launcher + */ +public class ServiceLauncherBaseTest extends SliderTestBase { + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterLocksHDFS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterLocksHDFS.java new file mode 100644 index 0000000..03c4a7f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterLocksHDFS.java @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.core.persist; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.slider.common.tools.CoreFileSystem; +import org.apache.slider.test.YarnMiniClusterTestBase; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +public class TestConfPersisterLocksHDFS extends YarnMiniClusterTestBase { + public static MiniDFSCluster hdfs; + public static YarnConfiguration conf = new YarnConfiguration(); + public static CoreFileSystem coreFileSystem; + public static URI fsURI; + public static FileSystem dfsClient; + + public TestConfPersisterLocksHDFS() { + + } + + @BeforeClass + public static void createCluster() throws IOException, URISyntaxException { + hdfs = buildMiniHDFSCluster( + "TestConfPersister", + conf); + + fsURI = new URI(buildFsDefaultName(hdfs)); + dfsClient = FileSystem.get(fsURI, conf); + coreFileSystem = new CoreFileSystem(dfsClient, conf); + } + + @AfterClass + public static void destroyCluster() { + if (hdfs != null) { + hdfs.shutdown(); + } + hdfs = null; + } + + /** + * Create the persister. This also creates the destination directory + * @param name name of cluster + * @return a conf persister + */ + public ConfPersister createPersister(String name) throws IOException { + Path path = coreFileSystem.buildClusterDirPath(name); + ConfPersister persister = new ConfPersister( + coreFileSystem, + path); + coreFileSystem.getFileSystem().mkdirs(path); + return persister; + } + + @Test + public void testReleaseNonexistentWritelock() throws Exception { + + ConfPersister persister = createPersister + ("testReleaseNonexistentWritelock"); + assertFalse(persister.releaseWritelock()); + } + + + @Test + public void testAcqRelWriteLock() throws Throwable { + ConfPersister persister = createPersister("testAcqRelWriteLock"); + persister.acquireWritelock(); + assertTrue(persister.releaseWritelock()); + assertFalse(persister.releaseWritelock()); + } + + @Test + public void testSecondWriteLockAcqFails() throws Throwable { + ConfPersister persister = createPersister("testSecondWriteLockAcqFails"); + persister.acquireWritelock(); + try { + persister.acquireWritelock(); + fail("write lock acquired twice"); + } catch (LockAcquireFailedException lafe) { + //expected + assertTrue(lafe.getPath().toString().endsWith(Filenames.WRITELOCK)); + } + assertTrue(persister.releaseWritelock()); + + //now we can ask for it + persister.acquireWritelock(); + } + + @Test + public void testReleaseNonexistentReadlockOwner() throws Exception { + ConfPersister persister = createPersister + ("testReleaseNonexistentReadlock"); + assertFalse(persister.releaseReadlock(true)); + } + + @Test + public void testReleaseNonexistentReadlock() throws Exception { + ConfPersister persister = createPersister + ("testReleaseNonexistentReadlock"); + assertFalse(persister.releaseReadlock(false)); + } + + @Test + public void testAcqRelReadlock() throws Exception { + ConfPersister persister = createPersister("testAcqRelReadlock"); + assertTrue(persister.acquireReadLock()); + assertTrue(persister.readLockExists()); + + assertFalse(persister.releaseReadlock(false)); + assertTrue(persister.readLockExists()); + assertTrue(persister.releaseReadlock(true)); + } + + @Test + public void testAcqAcqRelReadlock() throws Exception { + ConfPersister persister = createPersister("testAcqRelReadlock"); + assertTrue(persister.acquireReadLock()); + assertTrue(persister.readLockExists()); + assertFalse(persister.acquireReadLock()); + assertTrue(persister.readLockExists()); + + assertFalse(persister.releaseReadlock(false)); + assertTrue(persister.readLockExists()); + assertTrue(persister.releaseReadlock(true)); + assertFalse(persister.readLockExists()); + } + + @Test + public void testAcqAcqRelReadlockOtherOrderOfRelease() throws Exception { + ConfPersister persister = createPersister("testAcqRelReadlock"); + assertTrue(persister.acquireReadLock()); + assertTrue(persister.readLockExists()); + assertFalse(persister.acquireReadLock()); + assertTrue(persister.readLockExists()); + + assertTrue(persister.releaseReadlock(true)); + assertFalse(persister.readLockExists()); + assertFalse(persister.releaseReadlock(false)); + + } + + + @Test + public void testNoReadlockWhenWriteHeld() throws Throwable { + ConfPersister persister = createPersister("testNoReadlockWhenWriteHeld"); + persister.acquireWritelock(); + try { + persister.acquireReadLock(); + fail("read lock acquired"); + } catch (LockAcquireFailedException lafe) { + //expected + assertWritelockBlocked(lafe); + } + assertTrue(persister.releaseWritelock()); + assertFalse(persister.writelockExists()); + + //now we can ask for it + persister.acquireReadLock(); + } + + public void assertWritelockBlocked(LockAcquireFailedException lafe) { + assertTrue(lafe.getPath().toString().endsWith(Filenames.WRITELOCK)); + } + + public void assertReadlockBlocked(LockAcquireFailedException lafe) { + assertTrue(lafe.getPath().toString().endsWith(Filenames.READLOCK)); + } + + @Test + public void testNoWritelockWhenReadHeld() throws Throwable { + ConfPersister persister = createPersister("testNoWritelockWhenReadHeld"); + assertTrue(persister.acquireReadLock()); + try { + persister.acquireWritelock(); + fail("write lock acquired"); + } catch (LockAcquireFailedException lafe) { + //expected + assertReadlockBlocked(lafe); + } + assertTrue(persister.releaseReadlock(true)); + + //now we can ask for it + persister.acquireWritelock(); + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterReadWrite.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterReadWrite.java new file mode 100644 index 0000000..c421a57 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterReadWrite.java @@ -0,0 +1,204 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.core.persist; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.slider.common.tools.CoreFileSystem; +import org.apache.slider.core.conf.AggregateConf; +import org.apache.slider.core.conf.ConfTree; +import org.apache.slider.core.conf.ConfTreeOperations; +import org.apache.slider.core.conf.ExampleConfResources; +import org.apache.slider.core.conf.MapOperations; +import org.apache.slider.core.exceptions.SliderException; +import org.apache.slider.test.YarnMiniClusterTestBase; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +public class TestConfPersisterReadWrite extends YarnMiniClusterTestBase { + private static YarnConfiguration conf = new YarnConfiguration(); + static CoreFileSystem coreFileSystem; + static URI fsURI; + static FileSystem dfsClient; + static final JsonSerDeser confTreeJsonSerDeser = + new JsonSerDeser<>(ConfTree.class); + AggregateConf aggregateConf = ExampleConfResources + .loadExampleAggregateResource(); + + + public TestConfPersisterReadWrite() throws IOException { + } + + @BeforeClass + public static void createCluster() throws URISyntaxException, IOException { + fsURI = new URI(buildFsDefaultName(null)); + dfsClient = FileSystem.get(fsURI, conf); + coreFileSystem = new CoreFileSystem(dfsClient, conf); + } + + /** + * Create the persister. This also creates the destination directory + * @param name name of cluster + * @return a conf persister + */ + public ConfPersister createPersister(String name) throws IOException { + Path path = coreFileSystem.buildClusterDirPath(name); + ConfPersister persister = new ConfPersister( + coreFileSystem, + path); + coreFileSystem.getFileSystem().mkdirs(path); + return persister; + } + + @Test + public void testSaveLoadEmptyConf() throws Throwable { + AggregateConf aggregateConf = new AggregateConf(); + + ConfPersister persister = createPersister("testSaveLoad"); + persister.save(aggregateConf, null); + AggregateConf loaded = new AggregateConf(); + persister.load(loaded); + loaded.validate(); + } + + + @Test + public void testSaveLoadTestConf() throws Throwable { + ConfPersister persister = createPersister("testSaveLoadTestConf"); + persister.save(aggregateConf, null); + AggregateConf loaded = new AggregateConf(); + persister.load(loaded); + loaded.validate(); + } + + + + @Test + public void testSaveLoadTestConfResolveAndCheck() throws Throwable { + ConfTreeOperations appConfOperations = aggregateConf.getAppConfOperations(); + appConfOperations.getMandatoryComponent("master").put("PATH", "."); + ConfPersister persister = createPersister("testSaveLoadTestConf"); + persister.save(aggregateConf, null); + AggregateConf loaded = new AggregateConf(); + persister.load(loaded); + loaded.validate(); + loaded.resolve(); + ConfTreeOperations resources = loaded.getResourceOperations(); + MapOperations master = resources.getMandatoryComponent("master"); + assertEquals("1024", master.get("yarn.memory")); + + ConfTreeOperations appConfOperations2 = loaded.getAppConfOperations(); + assertEquals(".", appConfOperations2.getMandatoryComponent("master") + .get("PATH")); + } + + @Test + public void testSaveFailsIfWritelocked() throws Throwable { + ConfPersister persister = createPersister("testSaveFailsIfWritelocked"); + persister.releaseWritelock(); + persister.acquireWritelock(); + try { + expectSaveToFailOnLock(persister, aggregateConf); + } finally { + persister.releaseWritelock(); + } + } + + @Test + public void testSaveFailsIfReadlocked() throws Throwable { + ConfPersister persister = createPersister("testSaveFailsIfReadlocked"); + persister.releaseWritelock(); + persister.acquireReadLock(); + try { + expectSaveToFailOnLock(persister, aggregateConf); + } finally { + persister.releaseReadlock(true); + } + } + + @Test + public void testLoadFailsIfWritelocked() throws Throwable { + ConfPersister persister = createPersister("testLoadFailsIfWritelocked"); + persister.acquireWritelock(); + try { + expectLoadToFailOnLock(persister, aggregateConf); + } finally { + persister.releaseWritelock(); + } + } + + @Test + public void testLoadFailsIfDestDoesNotExist() throws Throwable { + ConfPersister persister = createPersister + ("testLoadFailsIfDestDoesNotExist"); + try { + persister.load(aggregateConf); + fail("expected save to fail to find a file"); + } catch (FileNotFoundException e) { + //expected + } + } + + @Test + public void testLoadSucceedsIfReadlocked() throws Throwable { + ConfPersister persister = createPersister("testLoadSucceedsIfReadlocked"); + persister.releaseReadlock(true); + try { + persister.save(aggregateConf, null); + persister.acquireReadLock(); + AggregateConf loaded = new AggregateConf(); + persister.load(loaded); + loaded.validate(); + loaded.resolve(); + } finally { + persister.releaseReadlock(true); + } + } + + public void expectSaveToFailOnLock( + ConfPersister persister, + AggregateConf aggregateConf) throws IOException, SliderException { + try { + persister.save(aggregateConf, null); + fail("expected save to fail to get a lock"); + } catch (LockAcquireFailedException lafe) { + //expected + } + } + + + public void expectLoadToFailOnLock( + ConfPersister persister, + AggregateConf aggregateConf) throws IOException, SliderException { + try { + persister.load(aggregateConf); + fail("expected save to fail to get a lock"); + } catch (LockAcquireFailedException lafe) { + //expected + } + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestFilesystemPermissions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestFilesystemPermissions.java new file mode 100644 index 0000000..48fb9ce --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestFilesystemPermissions.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.other; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.util.DiskChecker; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; +import org.apache.slider.test.YarnMiniClusterTestBase; +import org.junit.After; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * This test class exists to look at permissions of the filesystem, especially + * that created by Mini YARN clusters. On some windows jenkins machines, + * YARN actions were failing as the directories had the wrong permissions + * (i.e. too lax) + */ +public class TestFilesystemPermissions extends YarnMiniClusterTestBase { + + static final Logger LOG = LoggerFactory.getLogger(TestFilesystemPermissions + .class); + + List filesToDelete = new ArrayList<>(); + + @After + public void deleteFiles() { + for (File f : filesToDelete) { + FileUtil.fullyDelete(f, true); + } + } + + @Test + public void testJavaFSOperations() throws Throwable { + assertNativeLibrariesPresent(); + File subdir = testDir(); + subdir.mkdir(); + assertTrue(subdir.isDirectory()); + assertTrue(FileUtil.canRead(subdir)); + assertTrue(FileUtil.canWrite(subdir)); + assertTrue(FileUtil.canExecute(subdir)); + } + + @Test + public void testDiskCheckerOperations() throws Throwable { + assertNativeLibrariesPresent(); + File subdir = testDir(); + subdir.mkdir(); + DiskChecker checker = new DiskChecker(); + checker.checkDir(subdir); + } + + @Test + public void testDiskCheckerMkdir() throws Throwable { + assertNativeLibrariesPresent(); + File subdir = testDir(); + subdir.mkdirs(); + DiskChecker checker = new DiskChecker(); + checker.checkDir(subdir); + } + + /** + * Get a test dir for this method; one that will be deleted on teardown + * @return a filename unique to this test method + */ + File testDir() { + File parent = new File("target/testfspermissions"); + parent.mkdir(); + File testdir = new File(parent, methodName.getMethodName()); + filesToDelete.add(testdir); + return testdir; + } + + + @Test + public void testPermsMap() throws Throwable { + File dir = testDir(); + String diruri = dir.toURI().toString(); + FileContext lfs = createLocalFS(dir, getConfiguration()); + getLocalDirsPathPermissionsMap(lfs, diruri); + } + + @Test + public void testInitLocaldir() throws Throwable { + File dir = testDir(); + String diruri = dir.toURI().toString(); + FileContext lfs = createLocalFS(dir, getConfiguration()); + initializeLocalDir(lfs, diruri); + List localDirs = getInitializedLocalDirs(lfs, Arrays.asList + (diruri)); + assertEquals(1, localDirs.size()); + } + + + @Test + public void testValidateMiniclusterPerms() throws Throwable { + int numLocal = 1; + String cluster = createMiniCluster("", getConfiguration(), 1, numLocal, 1, + false); + File workDir = miniCluster.getTestWorkDir(); + List localdirs = new ArrayList<>(); + for (File file : workDir.listFiles()) { + if (file.isDirectory() && file.getAbsolutePath().contains("-local")) { + // local dir + localdirs.add(file); + } + } + assertEquals(numLocal, localdirs.size()); + FileContext lfs = createLocalFS(workDir, getConfiguration()); + for (File file : localdirs) { + checkLocalDir(lfs, file.toURI().toString()); + } + } + + FileContext createLocalFS(File dir, Configuration conf) + throws UnsupportedFileSystemException { + return FileContext.getFileContext(dir.toURI(), conf); + } + + /** + * extracted from ResourceLocalizationService + * @param lfs + * @param localDir + * @return perms map + * @see ResourceLocalizationService + */ + private Map getLocalDirsPathPermissionsMap( + FileContext lfs, + String localDir) { + Map localDirPathFsPermissionsMap = new HashMap<>(); + + FsPermission defaultPermission = + FsPermission.getDirDefault().applyUMask(lfs.getUMask()); + FsPermission nmPrivatePermission = + ResourceLocalizationService.NM_PRIVATE_PERM.applyUMask(lfs.getUMask()); + + Path userDir = new Path(localDir, ContainerLocalizer.USERCACHE); + Path fileDir = new Path(localDir, ContainerLocalizer.FILECACHE); + Path sysDir = new Path( + localDir, + ResourceLocalizationService.NM_PRIVATE_DIR); + + localDirPathFsPermissionsMap.put(userDir, defaultPermission); + localDirPathFsPermissionsMap.put(fileDir, defaultPermission); + localDirPathFsPermissionsMap.put(sysDir, nmPrivatePermission); + return localDirPathFsPermissionsMap; + } + + private boolean checkLocalDir(FileContext lfs, String localDir) + throws IOException { + + Map pathPermissionMap = + getLocalDirsPathPermissionsMap(lfs, localDir); + + for (Map.Entry entry : pathPermissionMap.entrySet()) { + FileStatus status; + status = lfs.getFileStatus(entry.getKey()); + + if (!status.getPermission().equals(entry.getValue())) { + String msg = + "Permissions incorrectly set for dir " + entry.getKey() + + ", should be " + entry.getValue() + ", actual value = " + + status.getPermission(); + throw new YarnRuntimeException(msg); + } + } + return true; + } + + + private void initializeLocalDir(FileContext lfs, String localDir) + throws IOException { + + Map pathPermissionMap = + getLocalDirsPathPermissionsMap(lfs, localDir); + for (Map.Entry entry : pathPermissionMap.entrySet()) { + FileStatus status; + try { + status = lfs.getFileStatus(entry.getKey()); + } + catch (FileNotFoundException fs) { + status = null; + } + + if (status == null) { + lfs.mkdir(entry.getKey(), entry.getValue(), true); + status = lfs.getFileStatus(entry.getKey()); + } + FsPermission perms = status.getPermission(); + if (!perms.equals(entry.getValue())) { + lfs.setPermission(entry.getKey(), entry.getValue()); + } + } + } + + synchronized private List getInitializedLocalDirs(FileContext lfs, + List dirs) throws IOException { + List checkFailedDirs = new ArrayList(); + for (String dir : dirs) { + try { + checkLocalDir(lfs, dir); + } catch (YarnRuntimeException e) { + checkFailedDirs.add(dir); + } + } + for (String dir : checkFailedDirs) { + LOG.info("Attempting to initialize " + dir); + initializeLocalDir(lfs, dir); + checkLocalDir(lfs, dir); + } + return dirs; + } + + + private void createDir(FileContext localFs, Path dir, FsPermission perm) + throws IOException { + if (dir == null) { + return; + } + try { + localFs.getFileStatus(dir); + } catch (FileNotFoundException e) { + createDir(localFs, dir.getParent(), perm); + localFs.mkdir(dir, perm, false); + if (!perm.equals(perm.applyUMask(localFs.getUMask()))) { + localFs.setPermission(dir, perm); + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestLocalDirStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestLocalDirStatus.java new file mode 100644 index 0000000..aea38f3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestLocalDirStatus.java @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.other; + +import org.apache.slider.test.SliderTestUtils; +import org.apache.slider.tools.TestUtility; +import org.junit.Test; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; + +/** + * This test exists to diagnose local FS permissions + */ +public class TestLocalDirStatus extends SliderTestUtils { + + + public static final int SIZE = 0x200000; + + @Test + public void testTempDir() throws Throwable { + File tmpf = null; + try { + tmpf = File.createTempFile("testl", ".bin"); + createAndReadFile(tmpf, SIZE); + tmpf.delete(); + assertFalse(tmpf.exists()); + } finally { + if (tmpf != null) { + tmpf.delete(); + } + } + } + + @Test + public void testTargetDir() throws Throwable { + File target = target(); + File tmpf = null; + try { + tmpf = File.createTempFile("testl", ".bin", target); + createAndReadFile(tmpf, SIZE); + tmpf.delete(); + assertFalse(tmpf.exists()); + } finally { + if (tmpf != null) { + tmpf.delete(); + } + + } + } + + public File target() { + File target = new File("target").getAbsoluteFile(); + assertTrue(target.exists()); + return target; + } + + @Test + public void testRenameInTargetDir() throws Throwable { + File target = target(); + File tmpf = null; + File dst= null; + try { + tmpf = File.createTempFile("testl", ".bin", target); + dst = File.createTempFile("test-dest", ".bin", target); + createRenameAndReadFile(tmpf, dst, SIZE); + assertFalse(tmpf.exists()); + dst.delete(); + } finally { + if (tmpf != null) { + tmpf.delete(); + } + if (dst != null) { + dst.delete(); + } + } + } + + @Test + public void testRenameInTmpDir() throws Throwable { + File tmpf = null; + File dst= null; + try { + tmpf = File.createTempFile("testl", ".bin"); + dst = File.createTempFile("test-dest", ".bin"); + createRenameAndReadFile(tmpf, dst, SIZE); + assertFalse(tmpf.exists()); + dst.delete(); + } finally { + if (tmpf != null) { + tmpf.delete(); + } + if (dst != null) { + dst.delete(); + } + } + } + + protected void createAndReadFile(File path, int len) throws IOException { + byte[] dataset = TestUtility.dataset(len, 32, 128); + writeFile(path, dataset); + assertTrue(path.exists()); + assertEquals(len, path.length()); + byte[] persisted = readFile(path); + TestUtility.compareByteArrays(dataset, persisted, len); + } + + protected void createRenameAndReadFile(File src, File dst , int len) + throws IOException { + byte[] dataset = TestUtility.dataset(len, 32, 128); + writeFile(src, dataset); + assertTrue(src.exists()); + assertEquals(len, src.length()); + dst.delete(); + assertFalse(dst.exists()); + assertTrue(src.renameTo(dst)); + assertEquals(len, dst.length()); + byte[] persisted = readFile(dst); + TestUtility.compareByteArrays(dataset, persisted, len); + } + + protected void writeFile(File path, byte[] dataset) + throws IOException { + FileOutputStream out = new FileOutputStream(path); + try { + out.write(dataset); + out.flush(); + } finally { + out.close(); + } + } + + protected byte[] readFile(File path) throws IOException { + assertTrue(path.getAbsoluteFile().exists()); + assertTrue(path.getAbsoluteFile().isFile()); + int len = (int)path.length(); + byte[] dataset = new byte[len]; + FileInputStream ins = new FileInputStream(path); + try { + ins.read(dataset); + } finally { + ins.close(); + } + return dataset; + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java new file mode 100644 index 0000000..6302f63 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.providers; + +import org.apache.slider.providers.docker.DockerKeys; +import org.apache.slider.providers.docker.DockerProviderFactory; +import org.junit.Test; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class TestProviderFactory { + @Test + public void testLoadAgentProvider() throws Throwable { + SliderProviderFactory factory = SliderProviderFactory + .createSliderProviderFactory(DockerKeys.PROVIDER_DOCKER); + assertTrue(factory instanceof DockerProviderFactory); + } + + @Test + public void testCreateClientProvider() throws Throwable { + SliderProviderFactory factory = SliderProviderFactory.createSliderProviderFactory( + DockerKeys.PROVIDER_DOCKER); + assertNotNull(factory.createClientProvider()); + } + + @Test + public void testCreateProviderByClassname() throws Throwable { + SliderProviderFactory factory = SliderProviderFactory.createSliderProviderFactory( + DockerKeys.PROVIDER_DOCKER); + assertNotNull(factory.createServerProvider()); + assertTrue(factory instanceof DockerProviderFactory); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestConfigSetNaming.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestConfigSetNaming.java new file mode 100644 index 0000000..9dbb52e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestConfigSetNaming.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.registry; + +import org.apache.slider.core.registry.docstore.PublishedConfigSet; +import org.junit.Assert; +import org.junit.Test; + +import java.util.Arrays; + +public class TestConfigSetNaming { + + void assertValid(String name) { + PublishedConfigSet.validateName(name); + } + + void assertInvalid(String name) { + try { + PublishedConfigSet.validateName(name); + Assert.fail("Invalid name was unexpectedly parsed: " + name); + } catch (IllegalArgumentException expected) { + // expected + } + } + + @Test + public void testLowerCase() throws Throwable { + assertValid("abcdefghijklmnopqrstuvwxyz"); + } + + @Test + public void testUpperCaseInvalid() throws Throwable { + assertInvalid("ABCDEFGHIJKLMNOPQRSTUVWXYZ"); + } + + @Test + public void testNumbers() throws Throwable { + assertValid("01234567890"); + } + + @Test + public void testChars() throws Throwable { + assertValid("a-_+"); + } + + @Test + public void testInvalids() throws Throwable { + for (String s : Arrays.asList( + "", + " ", + "*", + "a/b", + "b\\a", + "\"", + "'", + "\u0000", + "\u0f00", + "key.value", + "-", + "+", + "_", + "?")) { + assertInvalid(s); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestRegistryPaths.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestRegistryPaths.java new file mode 100644 index 0000000..11d94c0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestRegistryPaths.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.registry; + +import org.apache.hadoop.registry.client.binding.RegistryUtils; +import org.apache.slider.core.registry.SliderRegistryUtils; +import org.apache.slider.test.SliderTestUtils; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestRegistryPaths { + + @Test + public void testHomedirKerberos() throws Throwable { + String home = RegistryUtils.homePathForUser("hbase@HADOOP.APACHE.ORG"); + try { + assertEquals("/users/hbase", home); + } catch (AssertionError e) { + SliderTestUtils.skip("homedir filtering not yet in hadoop registry " + + "module"); + } + } + + @Test + public void testHomedirKerberosHost() throws Throwable { + String home = RegistryUtils.homePathForUser("hbase/localhost@HADOOP" + + ".APACHE.ORG"); + try { + assertEquals("/users/hbase", home); + } catch (AssertionError e) { + SliderTestUtils.skip("homedir filtering not yet in hadoop registry " + + "module"); + } + } + + @Test + public void testRegistryPathForInstance() throws Throwable { + String path = SliderRegistryUtils.registryPathForInstance("instance"); + assertTrue(path.endsWith("/instance")); + } + + @Test + public void testPathResolution() throws Throwable { + String home = RegistryUtils.homePathForCurrentUser(); + assertEquals(home, SliderRegistryUtils.resolvePath("~")); + assertEquals(home +"/", SliderRegistryUtils.resolvePath("~/")); + assertEquals(home +"/something", SliderRegistryUtils.resolvePath( + "~/something")); + assertEquals("~unresolved", SliderRegistryUtils.resolvePath( + "~unresolved")); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/actions/TestActions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/actions/TestActions.java new file mode 100644 index 0000000..8a35078 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/actions/TestActions.java @@ -0,0 +1,240 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.actions; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.ServiceOperations; +import org.apache.slider.server.appmaster.SliderAppMaster; +import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.services.workflow.ServiceThreadFactory; +import org.apache.slider.server.services.workflow.WorkflowExecutorService; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class TestActions { + protected static final Logger log = + LoggerFactory.getLogger(TestActions.class); + + QueueService queues; + WorkflowExecutorService executorService; + + + @Before + public void createService() { + queues = new QueueService(); + + Configuration conf = new Configuration(); + queues.init(conf); + + queues.start(); + + executorService = new WorkflowExecutorService<>("AmExecutor", + Executors.newCachedThreadPool( + new ServiceThreadFactory("AmExecutor", true))); + + executorService.init(conf); + executorService.start(); + } + + @After + public void destroyService() { + ServiceOperations.stop(executorService); + ServiceOperations.stop(queues); + } + + @Test + public void testBasicService() throws Throwable { + queues.start(); + } + + @Test + public void testDelayLogic() throws Throwable { + ActionNoteExecuted action = new ActionNoteExecuted("", 1000); + long now = System.currentTimeMillis(); + + long delay = action.getDelay(TimeUnit.MILLISECONDS); + assertTrue(delay >= 800); + assertTrue(delay <= 1800); + + ActionNoteExecuted a2 = new ActionNoteExecuted("a2", 10000); + assertTrue(action.compareTo(a2) < 0); + assertTrue(a2.compareTo(action) > 0); + assertEquals(0, action.compareTo(action)); + + } + + @Test + public void testActionDelayedExecutorTermination() throws Throwable { + long start = System.currentTimeMillis(); + + ActionStopQueue stopAction = new ActionStopQueue(1000); + queues.scheduledActions.add(stopAction); + queues.run(); + AsyncAction take = queues.actionQueue.take(); + assertEquals(take, stopAction); + long stop = System.currentTimeMillis(); + assertTrue(stop - start > 500); + assertTrue(stop - start < 1500); + } + + @Test + public void testImmediateQueue() throws Throwable { + ActionNoteExecuted noteExecuted = new ActionNoteExecuted("executed", 0); + queues.put(noteExecuted); + queues.put(new ActionStopQueue(0)); + QueueExecutor ex = new QueueExecutor(queues); + ex.run(); + assertTrue(queues.actionQueue.isEmpty()); + assertTrue(noteExecuted.executed.get()); + } + + @Test + public void testActionOrdering() throws Throwable { + + ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500); + ActionStopQueue stop = new ActionStopQueue(1500); + ActionNoteExecuted note2 = new ActionNoteExecuted("note2", 800); + + List actions = Arrays.asList(note1, stop, note2); + Collections.sort(actions); + assertEquals(actions.get(0), note1); + assertEquals(actions.get(1), note2); + assertEquals(actions.get(2), stop); + } + + @Test + public void testDelayedQueueWithReschedule() throws Throwable { + + ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500); + ActionStopQueue stop = new ActionStopQueue(1500); + ActionNoteExecuted note2 = new ActionNoteExecuted("note2", 800); + + assertTrue(note2.compareTo(stop) < 0); + assertTrue(note1.getNanos() < note2.getNanos()); + assertTrue(note2.getNanos() < stop.getNanos()); + queues.schedule(note1); + queues.schedule(note2); + queues.schedule(stop); + // async to sync expected to run in order + runQueuesToCompletion(); + assertTrue(note1.executed.get()); + assertTrue(note2.executed.get()); + } + + public void runQueuesToCompletion() { + queues.run(); + assertTrue(queues.scheduledActions.isEmpty()); + assertFalse(queues.actionQueue.isEmpty()); + QueueExecutor ex = new QueueExecutor(queues); + ex.run(); + // flush all stop commands from the queue + queues.flushActionQueue(ActionStopQueue.class); + + assertTrue(queues.actionQueue.isEmpty()); + } + + @Test + public void testRenewedActionFiresOnceAtLeast() throws Throwable { + ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500); + RenewingAction renewer = new RenewingAction( + note1, + 500, + 100, + TimeUnit.MILLISECONDS, + 3); + queues.schedule(renewer); + ActionStopQueue stop = new ActionStopQueue(4, TimeUnit.SECONDS); + queues.schedule(stop); + // this runs all the delayed actions FIRST, so can't be used + // to play tricks of renewing actions ahead of the stop action + runQueuesToCompletion(); + assertEquals(1, renewer.executionCount.intValue()); + assertEquals(1, note1.executionCount.intValue()); + // assert the renewed item is back in + assertTrue(queues.scheduledActions.contains(renewer)); + } + + + @Test + public void testRenewingActionOperations() throws Throwable { + ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500); + RenewingAction renewer = new RenewingAction( + note1, + 100, + 100, + TimeUnit.MILLISECONDS, + 3); + queues.renewing("note", renewer); + assertTrue(queues.removeRenewingAction("note")); + queues.stop(); + assertTrue(queues.waitForServiceToStop(10000)); + } + + public class ActionNoteExecuted extends AsyncAction { + public final AtomicBoolean executed = new AtomicBoolean(false); + public final AtomicLong executionTimeNanos = new AtomicLong(); + private final AtomicLong executionCount = new AtomicLong(); + + public ActionNoteExecuted(String text, int delay) { + super(text, delay); + } + + @Override + public void execute( + SliderAppMaster appMaster, + QueueAccess queueService, + AppState appState) throws Exception { + log.info("Executing {}", name); + executed.set(true); + executionTimeNanos.set(System.nanoTime()); + executionCount.incrementAndGet(); + log.info(this.toString()); + + synchronized (this) { + this.notify(); + } + } + + @Override + public String toString() { + return super.toString() + " executed=" + executed.get() + "; count=" + + executionCount.get() + ";"; + } + + public long getExecutionCount() { + return executionCount.get(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java new file mode 100644 index 0000000..4a34929 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.state.AppStateBindingInfo; +import org.apache.slider.server.appmaster.state.RoleStatus; + +import java.util.Arrays; + +/** + * class for basis of Anti-affine placement tests; sets up role2 + * for anti-affinity + */ +public class BaseMockAppStateAATest extends BaseMockAppStateTest + implements MockRoles { + + /** Role status for the base AA role */ + protected RoleStatus aaRole; + + /** Role status for the AA role requiring a node with the gpu label */ + RoleStatus gpuRole; + + @Override + public AppStateBindingInfo buildBindingInfo() { + AppStateBindingInfo bindingInfo = super.buildBindingInfo(); + bindingInfo.roles = Arrays.asList( + MockFactory.PROVIDER_ROLE0, + MockFactory.AAROLE_1_GPU, + MockFactory.AAROLE_2 + ); + return bindingInfo; + } + + @Override + public void setup() throws Exception { + super.setup(); + aaRole = lookupRole(MockFactory.AAROLE_2.name); + gpuRole = lookupRole(MockFactory.AAROLE_1_GPU.name); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAOvercapacity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAOvercapacity.java new file mode 100644 index 0000000..e8fd7a8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAOvercapacity.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.slider.core.main.LauncherExitCodes; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.model.mock.MockYarnEngine; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.NodeMap; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +/** + * Test Anti-affine placement with a cluster of size 1 + */ +public class TestMockAppStateAAOvercapacity extends BaseMockAppStateAATest + implements MockRoles { + + private int NODES = 1; + + @Override + public MockYarnEngine createYarnEngine() { + return new MockYarnEngine(NODES, 1); + } + + void assertAllContainersAA() { + assertAllContainersAA(aaRole.getKey()); + } + + /** + * + * @throws Throwable + */ + @Test + public void testOvercapacityRecovery() throws Throwable { + + describe("Ask for 1 more than the no of available nodes;" + + "verify the state. kill the allocated container and review"); + //more than expected + long desired = 3; + aaRole.setDesired(desired); + assertTrue(appState.getRoleHistory().canPlaceAANodes()); + + //first request + List operations = + appState.reviewRequestAndReleaseNodes(); + assertTrue(aaRole.isAARequestOutstanding()); + assertEquals(desired - 1, aaRole.getPendingAntiAffineRequests()); + List operationsOut = new ArrayList<>(); + // allocate and re-submit + List instances = submitOperations(operations, + EMPTY_ID_LIST, operationsOut); + assertEquals(1, instances.size()); + assertAllContainersAA(); + + // expect an outstanding AA request to be unsatisfied + assertTrue(aaRole.getActual() < aaRole.getDesired()); + assertEquals(0, aaRole.getRequested()); + assertFalse(aaRole.isAARequestOutstanding()); + assertEquals(desired - 1, aaRole.getPendingAntiAffineRequests()); + List allocatedContainers = engine.execute(operations, + EMPTY_ID_LIST); + assertEquals(0, allocatedContainers.size()); + + // now lets trigger a failure + NodeMap nodemap = cloneNodemap(); + assertEquals(1, nodemap.size()); + + RoleInstance instance = instances.get(0); + ContainerId cid = instance.getContainerId(); + + AppState.NodeCompletionResult result = appState.onCompletedNode(containerStatus(cid, + LauncherExitCodes.EXIT_TASK_LAUNCH_FAILURE)); + assertTrue(result.containerFailed); + + assertEquals(1, aaRole.getFailed()); + assertEquals(0, aaRole.getActual()); + List availablePlacements = appState.getRoleHistory() + .findNodeForNewAAInstance(aaRole); + assertEquals(1, availablePlacements.size()); + describe("expecting a successful review with available placements of " + + availablePlacements); + operations = appState.reviewRequestAndReleaseNodes(); + assertEquals(1, operations.size()); + } + + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java new file mode 100644 index 0000000..4540f0a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java @@ -0,0 +1,361 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.api.types.NodeInformation; +import org.apache.slider.common.tools.SliderUtils; +import org.apache.slider.core.conf.ConfTreeOperations; +import org.apache.slider.providers.PlacementPolicy; +import org.apache.slider.server.appmaster.model.mock.MockAppState; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.model.mock.MockYarnEngine; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.appmaster.state.AppState.NodeUpdatedOutcome; +import org.apache.slider.server.appmaster.state.AppStateBindingInfo; +import org.apache.slider.server.appmaster.state.ContainerAssignment; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +/** + * Test Anti-affine placement + */ +public class TestMockAppStateAAPlacement extends BaseMockAppStateAATest + implements MockRoles { + + private int NODES = 3; + + /** + * The YARN engine has a cluster with very few nodes (3) and lots of containers, so + * if AA placement isn't working, there will be affine placements surfacing. + * @return + */ + @Override + public MockYarnEngine createYarnEngine() { + return new MockYarnEngine(NODES, 8); + } + + /** + * This is the simplest AA allocation: no labels, so allocate anywhere + * @throws Throwable + */ + @Test + public void testAllocateAANoLabel() throws Throwable { + assertTrue(cloneNodemap().size() > 0); + + // want multiple instances, so there will be iterations + aaRole.setDesired(2); + + List ops = appState.reviewRequestAndReleaseNodes(); + AMRMClient.ContainerRequest request = getSingleRequest(ops); + assertFalse(request.getRelaxLocality()); + assertEquals(request.getNodes().size(), engine.cluster.clusterSize); + assertNull(request.getRacks()); + assertNotNull(request.getCapability()); + + Container allocated = engine.allocateContainer(request); + + // notify the container ane expect + List assignments = new ArrayList<>(); + List operations = new ArrayList<>(); + appState.onContainersAllocated(Arrays.asList(allocated), assignments, + operations); + + String host = allocated.getNodeId().getHost(); + NodeInstance hostInstance = cloneNodemap().get(host); + assertEquals(1, hostInstance.get(aaRole.getKey()).getStarting()); + assertFalse(hostInstance.canHost(aaRole.getKey(), "")); + assertFalse(hostInstance.canHost(aaRole.getKey(), null)); + + // assignment + assertEquals(1, assignments.size()); + + // verify the release matches the allocation + assertEquals(2, operations.size()); + assertNotNull(getCancel(operations, 0).getCapability().equals(allocated + .getResource())); + + // we also expect a new allocation request to have been issued + + ContainerRequest req2 = getRequest(operations, 1); + assertEquals(req2.getNodes().size(), engine.cluster.clusterSize - 1); + + assertFalse(req2.getNodes().contains(host)); + assertFalse(request.getRelaxLocality()); + + // verify the pending couner is down + assertEquals(0L, aaRole.getPendingAntiAffineRequests()); + Container allocated2 = engine.allocateContainer(req2); + + // placement must be on a different host + assertNotEquals(allocated2.getNodeId(), allocated.getNodeId()); + + ContainerAssignment assigned = assignments.get(0); + Container container = assigned.container; + RoleInstance ri = roleInstance(assigned); + //tell the app it arrived + appState.containerStartSubmitted(container, ri); + assertNotNull(appState.onNodeManagerContainerStarted(container.getId())); + ops = appState.reviewRequestAndReleaseNodes(); + assertEquals(0, ops.size()); + assertAllContainersAA(); + + // identify those hosts with an aa role on + Map naming = appState.buildNamingMap(); + assertEquals(3, naming.size()); + + String name = aaRole.getName(); + assertEquals(name, naming.get(aaRole.getKey())); + Map info = + appState.getRoleHistory().getNodeInformationSnapshot(naming); + assertTrue(SliderUtils.isNotEmpty(info)); + + NodeInformation nodeInformation = info.get(host); + assertNotNull(nodeInformation); + assertTrue(SliderUtils.isNotEmpty(nodeInformation.entries)); + assertNotNull(nodeInformation.entries.get(name)); + assertEquals(1, nodeInformation.entries.get(name).live); + } + + @Test + public void testAllocateFlexUp() throws Throwable { + // want multiple instances, so there will be iterations + aaRole.setDesired(2); + List ops = appState.reviewRequestAndReleaseNodes(); + getSingleRequest(ops); + assertEquals(1, aaRole.getRequested()); + assertEquals(1, aaRole.getPendingAntiAffineRequests()); + assertEquals( aaRole.getActualAndRequested() + aaRole + .getPendingAntiAffineRequests(), aaRole.getDesired()); + + // now trigger that flex up + aaRole.setDesired(3); + + // expect: no new reqests, pending count ++ + List ops2 = appState.reviewRequestAndReleaseNodes(); + assertTrue(ops2.isEmpty()); + assertEquals(aaRole.getActual() + aaRole.getPendingAntiAffineRequests() + + aaRole.getOutstandingAARequestCount(), aaRole.getDesired()); + + // 1 outstanding + assertEquals(0, aaRole.getActual()); + assertTrue(aaRole.isAARequestOutstanding()); + // and one AA + assertEquals(2, aaRole.getPendingAntiAffineRequests()); + assertAllContainersAA(); + + // next iter + assertEquals(1, submitOperations(ops, EMPTY_ID_LIST, ops2).size()); + assertEquals(2, ops2.size()); + assertEquals(1, aaRole.getPendingAntiAffineRequests()); + assertAllContainersAA(); + + assertEquals(0, appState.reviewRequestAndReleaseNodes().size()); + // now trigger the next execution cycle + List ops3 = new ArrayList<>(); + assertEquals(1, submitOperations(ops2, EMPTY_ID_LIST, ops3).size()); + assertEquals(2, ops3.size()); + assertEquals(0, aaRole.getPendingAntiAffineRequests()); + assertAllContainersAA(); + + } + + @Test + public void testAllocateFlexDownDecrementsPending() throws Throwable { + // want multiple instances, so there will be iterations + aaRole.setDesired(2); + List ops = appState.reviewRequestAndReleaseNodes(); + getSingleRequest(ops); + assertEquals(1, aaRole.getPendingAntiAffineRequests()); + assertTrue(aaRole.isAARequestOutstanding()); + + // flex down so that the next request should be cancelled + aaRole.setDesired(1); + + // expect: no new requests, pending count -- + List ops2 = appState.reviewRequestAndReleaseNodes(); + assertTrue(ops2.isEmpty()); + assertTrue(aaRole.isAARequestOutstanding()); + assertEquals(0, aaRole.getPendingAntiAffineRequests()); + assertAllContainersAA(); + + // next iter + submitOperations(ops, EMPTY_ID_LIST, ops2).size(); + assertEquals(1, ops2.size()); + assertAllContainersAA(); + } + + /** + * Here flex down while there is only one outstanding request. + * The outstanding flex should be cancelled + * @throws Throwable + */ + @Test + public void testAllocateFlexDownForcesCancel() throws Throwable { + // want multiple instances, so there will be iterations + aaRole.setDesired(1); + List ops = appState.reviewRequestAndReleaseNodes(); + getSingleRequest(ops); + assertEquals(0, aaRole.getPendingAntiAffineRequests()); + assertTrue(aaRole.isAARequestOutstanding()); + + // flex down so that the next request should be cancelled + aaRole.setDesired(0); + // expect: no new requests, pending count -- + List ops2 = appState.reviewRequestAndReleaseNodes(); + assertEquals(0, aaRole.getPendingAntiAffineRequests()); + assertFalse(aaRole.isAARequestOutstanding()); + assertEquals(1, ops2.size()); + getSingleCancel(ops2); + + // next iter + submitOperations(ops, EMPTY_ID_LIST, ops2).size(); + assertEquals(1, ops2.size()); + } + + void assertAllContainersAA() { + assertAllContainersAA(aaRole.getKey()); + } + + /** + * + * @throws Throwable + */ + @Test + public void testAskForTooMany() throws Throwable { + + describe("Ask for 1 more than the no of available nodes;" + + " expect the final request to be unsatisfied until the cluster " + + "changes size"); + //more than expected + aaRole.setDesired(NODES + 1); + List operations = appState + .reviewRequestAndReleaseNodes(); + assertTrue(aaRole.isAARequestOutstanding()); + assertEquals(NODES, aaRole.getPendingAntiAffineRequests()); + for (int i = 0; i < NODES; i++) { + String iter = "Iteration " + i + " role = " + aaRole; + log.info(iter); + List operationsOut = new ArrayList<>(); + assertEquals(1, submitOperations(operations, EMPTY_ID_LIST, + operationsOut).size()); + operations = operationsOut; + if (i + 1 < NODES) { + assertEquals(2, operations.size()); + } else { + assertEquals(1, operations.size()); + } + assertAllContainersAA(); + } + // expect an outstanding AA request to be unsatisfied + assertTrue(aaRole.getActual() < aaRole.getDesired()); + assertTrue(aaRole.getRequested() == 0); + assertFalse(aaRole.isAARequestOutstanding()); + List allocatedContainers = engine.execute(operations, + EMPTY_ID_LIST); + assertEquals(0, allocatedContainers.size()); + // in a review now, no more requests can be generated, as there is no space for AA placements, + // even though there is cluster capacity + assertEquals(0, appState.reviewRequestAndReleaseNodes().size()); + + // now do a node update (this doesn't touch the YARN engine; the node isn't really there) + NodeUpdatedOutcome outcome = addNewNode(); + assertEquals(cloneNodemap().size(), NODES + 1); + assertTrue(outcome.clusterChanged); + // no active calls to empty + assertTrue(outcome.operations.isEmpty()); + assertEquals(1, appState.reviewRequestAndReleaseNodes().size()); + } + + protected AppState.NodeUpdatedOutcome addNewNode() { + return updateNodes(MockFactory.instance.newNodeReport("4", NodeState + .RUNNING, "gpu")); + } + + @Test + public void testClusterSizeChangesDuringRequestSequence() throws Throwable { + describe("Change the cluster size where the cluster size changes during " + + "a test sequence."); + aaRole.setDesired(NODES + 1); + appState.reviewRequestAndReleaseNodes(); + assertTrue(aaRole.isAARequestOutstanding()); + assertEquals(NODES, aaRole.getPendingAntiAffineRequests()); + NodeUpdatedOutcome outcome = addNewNode(); + assertTrue(outcome.clusterChanged); + // one call to cancel + assertEquals(1, outcome.operations.size()); + // and on a review, one more to rebuild + assertEquals(1, appState.reviewRequestAndReleaseNodes().size()); + } + + @Test + public void testBindingInfoMustHaveNodeMap() throws Throwable { + AppStateBindingInfo bindingInfo = buildBindingInfo(); + bindingInfo.nodeReports = null; + try { + MockAppState state = new MockAppState(bindingInfo); + fail("Expected an exception, got " + state); + } catch (IllegalArgumentException expected) { + } + } + + @Test + public void testAMRestart() throws Throwable { + int desiredAA = 3; + aaRole.setDesired(desiredAA); + List instances = createAndStartNodes(); + List containers = new ArrayList<>(); + for (RoleInstance instance : instances) { + containers.add(instance.container); + } + + // now destroy the app state + AppStateBindingInfo bindingInfo = buildBindingInfo(); + bindingInfo.instanceDefinition = factory.newInstanceDefinition(0, 0, + desiredAA); + ConfTreeOperations cto = new ConfTreeOperations(bindingInfo + .instanceDefinition.getResources()); + cto.setComponentOpt(ROLE2, + ResourceKeys.COMPONENT_PLACEMENT_POLICY, + PlacementPolicy.ANTI_AFFINITY_REQUIRED); + bindingInfo.liveContainers = containers; + appState = new MockAppState(bindingInfo); + + RoleStatus aaRole = lookupRole(MockFactory.AAROLE_2.name); + RoleStatus gpuRole = lookupRole(MockFactory.AAROLE_1_GPU.name); + appState.reviewRequestAndReleaseNodes(); + assertTrue(aaRole.isAntiAffinePlacement()); + assertTrue(aaRole.isAARequestOutstanding()); + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAppRestIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAppRestIntegration.java new file mode 100644 index 0000000..7841ac2 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAppRestIntegration.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.slider.api.types.ContainerInformation; +import org.apache.slider.core.exceptions.SliderInternalStateException; +import org.apache.slider.core.exceptions.TriggerClusterTeardownException; +import org.apache.slider.core.persist.JsonSerDeser; +import org.apache.slider.server.appmaster.management.MetricsAndMonitoring; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockProviderService; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.state.ProviderAppState; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.apache.slider.server.appmaster.state.StateAccessForProviders; +import org.apache.slider.server.appmaster.web.WebAppApi; +import org.apache.slider.server.appmaster.web.WebAppApiImpl; +import org.apache.slider.server.appmaster.web.rest.application.ApplicationResouceContentCacheFactory; +import org.apache.slider.server.appmaster.web.rest.application.ApplicationResource; +import org.apache.slider.server.appmaster.web.rest.application.resources.CachedContent; +import org.apache.slider.server.appmaster.web.rest.application.resources.ContentCache; +import org.apache.slider.server.appmaster.web.rest.application.resources.LiveContainersRefresher; +import org.apache.slider.server.appmaster.web.rest.application.resources.ResourceRefresher; +import org.junit.Test; + +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +public class TestMockAppStateAppRestIntegration extends BaseMockAppStateTest + implements MockRoles { + + @Test + public void testCachedIntDocument() throws Throwable { + ContentCache cache = new ContentCache(); + + IntRefresher refresher = new IntRefresher(); + assertEquals(0, refresher.count); + CachedContentManagedTimer entry = new CachedContentManagedTimer(refresher); + cache.put("/int", entry); + CachedContent content1 = cache.get("/int"); + assertEquals(entry, content1); + + assertEquals(0, entry.get()); + assertEquals(1, refresher.count); + assertEquals(0, entry.getCachedValue()); + assertEquals(1, entry.getRefreshCounter()); + + Object got = entry.get(); + assertEquals(2, entry.getRefreshCounter()); + assertEquals(1, got); + } + + @Test + public void testContainerListRefresher() throws Throwable { + LiveContainersRefresher clr = new LiveContainersRefresher(stateAccess); + Map map = clr.refresh(); + assertEquals(0, map.size()); + List instances = startNodes(); + map = clr.refresh(); + assertEquals(map.size(), instances.size()); + log.info("{}", map); + JsonSerDeser serDeser = + new JsonSerDeser<>(ContainerInformation.class); + for (Entry entry : map.entrySet()) { + String key = entry.getKey(); + ContainerInformation value = entry.getValue(); + log.info("{} -> {}", key, serDeser.toJson(value)); + } + } + + public List startNodes() + throws TriggerClusterTeardownException, SliderInternalStateException { + int r0 = 1; + int r1 = 2; + int r2 = 3; + getRole0Status().setDesired(r0); + getRole1Status().setDesired(r1); + getRole2Status().setDesired(r2); + List instances = createAndStartNodes(); + assertEquals(instances.size(), r0 + r1 + r2); + return instances; + } + + @Test + public void testApplicationResource() throws Throwable { + List instances = startNodes(); + ApplicationResource applicationResource = + new ApplicationResource(getWebAppApi()); + Map containers = + applicationResource.getLiveContainers(); + assertEquals(containers.size(), instances.size()); + } + + /** + * Get a state accessor for the appState field + * @return something to hand down to refreshers and resources + */ + public StateAccessForProviders getStateAccess() { + StateAccessForProviders state = new ProviderAppState("name", appState); + return state; + } + + public WebAppApi getWebAppApi() { + WebAppApi api = new WebAppApiImpl(stateAccess, + new MockProviderService(), + null, + new MetricsAndMonitoring("metrics"), null,null, + ApplicationResouceContentCacheFactory.createContentCache(stateAccess) + ); + return api; + } + + /** + * Little class to do integer refreshing & so test refresh logic + */ + public class IntRefresher implements ResourceRefresher { + int count ; + @Override + public Integer refresh() throws Exception { + log.info("Refresh at {}", count); + int result = count; + count += 1; + return result; + } + + @Override + public String toString() { + return "IntRefresher at " + count; + } + + } + + public class CachedContentManagedTimer extends CachedContent { + int time = 0; + + @Override + protected long now() { + return time++; + } + + CachedContentManagedTimer(ResourceRefresher refresh) { + super(1, refresh); + } + + @Override + public String toString() { + return "CachedContentManagedTimer at " + time + "; " + super.toString(); + } + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java new file mode 100644 index 0000000..98f1308 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java @@ -0,0 +1,377 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.core.conf.AggregateConf; +import org.apache.slider.core.conf.MapOperations; +import org.apache.slider.core.exceptions.SliderException; +import org.apache.slider.core.exceptions.TriggerClusterTeardownException; +import org.apache.slider.server.appmaster.actions.ResetFailureWindow; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockAM; +import org.apache.slider.server.appmaster.model.mock.MockAppState; +import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.model.mock.MockYarnEngine; +import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.appmaster.state.AppStateBindingInfo; +import org.apache.slider.server.appmaster.state.ContainerOutcome; +import org.apache.slider.server.appmaster.state.NodeEntry; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.RoleHistory; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.junit.Test; + +import java.util.List; + +/** + * Test that if you have >1 role, the right roles are chosen for release. + */ +public class TestMockAppStateContainerFailure extends BaseMockAppStateTest + implements MockRoles { + MockRMOperationHandler operationHandler = new MockRMOperationHandler(); + MockAM mockAM = new MockAM(); + + @Override + public String getTestName() { + return "TestMockAppStateContainerFailure"; + } + + /** + * Small cluster with multiple containers per node, + * to guarantee many container allocations on each node + * @return + */ + @Override + public MockYarnEngine createYarnEngine() { + return new MockYarnEngine(4, 8000); + } + + @Override + public AggregateConf buildInstanceDefinition() { + AggregateConf aggregateConf = super.buildInstanceDefinition(); + MapOperations globalOptions = aggregateConf.getResourceOperations() + .getGlobalOptions(); + globalOptions.put(ResourceKeys.CONTAINER_FAILURE_THRESHOLD, "10"); + + return aggregateConf; + } + + @Test + public void testShortLivedFail() throws Throwable { + + getRole0Status().setDesired(1); + List instances = createAndStartNodes(); + assertEquals(1, instances.size()); + + RoleInstance instance = instances.get(0); + long created = instance.createTime; + long started = instance.startTime; + assertTrue(created > 0); + assertTrue(started >= created); + List ids = extractContainerIds(instances, 0); + + ContainerId cid = ids.get(0); + assertTrue(appState.isShortLived(instance)); + AppState.NodeCompletionResult result = appState.onCompletedNode + (containerStatus(cid, 1)); + assertNotNull(result.roleInstance); + assertTrue(result.containerFailed); + RoleStatus status = getRole0Status(); + assertEquals(1, status.getFailed()); + assertEquals(1, status.getStartFailed()); + + //view the world + appState.getRoleHistory().dump(); + List queue = appState.getRoleHistory().cloneRecentNodeList(0); + assertEquals(0, queue.size()); + + } + + @Test + public void testLongLivedFail() throws Throwable { + + getRole0Status().setDesired(1); + List instances = createAndStartNodes(); + assertEquals(1, instances.size()); + + RoleInstance instance = instances.get(0); + instance.startTime = System.currentTimeMillis() - 60 * 60 * 1000; + assertFalse(appState.isShortLived(instance)); + List ids = extractContainerIds(instances, 0); + + ContainerId cid = ids.get(0); + AppState.NodeCompletionResult result = appState.onCompletedNode( + containerStatus(cid, 1)); + assertNotNull(result.roleInstance); + assertTrue(result.containerFailed); + RoleStatus status = getRole0Status(); + assertEquals(1, status.getFailed()); + assertEquals(0, status.getStartFailed()); + + //view the world + appState.getRoleHistory().dump(); + List queue = appState.getRoleHistory().cloneRecentNodeList(0); + assertEquals(1, queue.size()); + + } + + @Test + public void testNodeStartFailure() throws Throwable { + + getRole0Status().setDesired(1); + List instances = createAndSubmitNodes(); + assertEquals(1, instances.size()); + + RoleInstance instance = instances.get(0); + + List ids = extractContainerIds(instances, 0); + + ContainerId cid = ids.get(0); + appState.onNodeManagerContainerStartFailed(cid, new SliderException("oops")); + RoleStatus status = getRole0Status(); + assertEquals(1, status.getFailed()); + assertEquals(1, status.getStartFailed()); + + + RoleHistory history = appState.getRoleHistory(); + history.dump(); + List queue = history.cloneRecentNodeList(0); + assertEquals(0, queue.size()); + + NodeInstance ni = history.getOrCreateNodeInstance(instance.container); + NodeEntry re = ni.get(0); + assertEquals(1, re.getFailed()); + assertEquals(1, re.getStartFailed()); + } + + @Test + public void testRecurrentStartupFailure() throws Throwable { + + getRole0Status().setDesired(1); + try { + for (int i = 0; i< 100; i++) { + List instances = createAndSubmitNodes(); + assertEquals(1, instances.size()); + + List ids = extractContainerIds(instances, 0); + + ContainerId cid = ids.get(0); + log.info("{} instance {} {}", i, instances.get(0), cid); + assertNotNull(cid); + appState.onNodeManagerContainerStartFailed(cid, + new SliderException("failure #" + i)); + AppState.NodeCompletionResult result = appState.onCompletedNode( + containerStatus(cid)); + assertTrue(result.containerFailed); + } + fail("Cluster did not fail from too many startup failures"); + } catch (TriggerClusterTeardownException teardown) { + log.info("Exception {} : {}", teardown.getExitCode(), teardown); + } + } + + @Test + public void testRecurrentStartupFailureWithUnlimitedFailures() throws Throwable { + // Update instance definition to allow containers to fail any number of times + AppStateBindingInfo bindingInfo = buildBindingInfo(); + MapOperations globalResourceOptions = bindingInfo.instanceDefinition + .getResourceOperations().getGlobalOptions(); + globalResourceOptions.put(ResourceKeys.CONTAINER_FAILURE_THRESHOLD, "0"); + appState = new MockAppState(bindingInfo); + + getRole0Status().setDesired(1); + try { + for (int i = 0; i < 100; i++) { + List instances = createAndSubmitNodes(); + assertEquals(1, instances.size()); + + List ids = extractContainerIds(instances, 0); + + ContainerId cid = ids.get(0); + log.info("{} instance {} {}", i, instances.get(0), cid); + assertNotNull(cid); + appState.onNodeManagerContainerStartFailed(cid, + new SliderException("failure #" + i)); + AppState.NodeCompletionResult result = appState.onCompletedNode( + containerStatus(cid)); + assertTrue(result.containerFailed); + } + } catch (TriggerClusterTeardownException teardown) { + log.info("Exception {} : {}", teardown.getExitCode(), teardown); + fail("Cluster failed despite " + ResourceKeys + .CONTAINER_FAILURE_THRESHOLD + " = 0"); + } + } + + @Test + public void testRoleStatusFailureWindow() throws Throwable { + + ResetFailureWindow resetter = new ResetFailureWindow(operationHandler); + + // initial reset + resetter.execute(mockAM, null, appState); + + getRole0Status().setDesired(1); + for (int i = 0; i < 100; i++) { + resetter.execute(mockAM, null, appState); + List instances = createAndSubmitNodes(); + assertEquals(1, instances.size()); + + List ids = extractContainerIds(instances, 0); + + ContainerId cid = ids.get(0); + log.info("{} instance {} {}", i, instances.get(0), cid); + assertNotNull(cid); + appState.onNodeManagerContainerStartFailed( + cid, + new SliderException("failure #" + i)); + AppState.NodeCompletionResult result = appState.onCompletedNode( + containerStatus(cid)); + assertTrue(result.containerFailed); + } + } + + @Test + public void testRoleStatusFailed() throws Throwable { + RoleStatus status = getRole0Status(); + // limits exceeded + status.noteFailed(false, "text", ContainerOutcome.Failed); + assertEquals(1, status.getFailed()); + assertEquals(1L, status.getFailedRecently()); + assertEquals(0L, status.getLimitsExceeded()); + assertEquals(0L, status.getPreempted()); + assertEquals(0L, status.getNodeFailed()); + + ResetFailureWindow resetter = new ResetFailureWindow(operationHandler); + resetter.execute(mockAM, null, appState); + assertEquals(1, status.getFailed()); + assertEquals(0L, status.getFailedRecently()); + } + + @Test + public void testRoleStatusFailedLimitsExceeded() throws Throwable { + RoleStatus status = getRole0Status(); + // limits exceeded + status.noteFailed(false, "text",ContainerOutcome.Failed_limits_exceeded); + assertEquals(1, status.getFailed()); + assertEquals(1L, status.getFailedRecently()); + assertEquals(1L, status.getLimitsExceeded()); + assertEquals(0L, status.getPreempted()); + assertEquals(0L, status.getNodeFailed()); + + ResetFailureWindow resetter = new ResetFailureWindow(operationHandler); + resetter.execute(mockAM, null, appState); + assertEquals(1, status.getFailed()); + assertEquals(0L, status.getFailedRecently()); + assertEquals(1L, status.getLimitsExceeded()); + } + + + @Test + public void testRoleStatusFailedPrempted() throws Throwable { + RoleStatus status = getRole0Status(); + // limits exceeded + status.noteFailed(false, "text", ContainerOutcome.Preempted); + assertEquals(0, status.getFailed()); + assertEquals(1L, status.getPreempted()); + assertEquals(0L, status.getFailedRecently()); + assertEquals(0L, status.getNodeFailed()); + + ResetFailureWindow resetter = new ResetFailureWindow(operationHandler); + resetter.execute(mockAM, null, appState); + assertEquals(1L, status.getPreempted()); + } + + + @Test + public void testRoleStatusFailedNode() throws Throwable { + RoleStatus status = getRole0Status(); + // limits exceeded + status.noteFailed(false, "text", ContainerOutcome.Node_failure); + assertEquals(1, status.getFailed()); + assertEquals(0L, status.getFailedRecently()); + assertEquals(0L, status.getLimitsExceeded()); + assertEquals(0L, status.getPreempted()); + assertEquals(1L, status.getNodeFailed()); + } + + @Test + public void testNodeEntryCompleted() throws Throwable { + NodeEntry nodeEntry = new NodeEntry(1); + nodeEntry.containerCompleted(true, ContainerOutcome.Completed); + assertEquals(0, nodeEntry.getFailed()); + assertEquals(0, nodeEntry.getFailedRecently()); + assertEquals(0, nodeEntry.getStartFailed()); + assertEquals(0, nodeEntry.getPreempted()); + assertEquals(0, nodeEntry.getActive()); + assertTrue(nodeEntry.isAvailable()); + } + + @Test + public void testNodeEntryFailed() throws Throwable { + NodeEntry nodeEntry = new NodeEntry(1); + nodeEntry.containerCompleted(false, ContainerOutcome.Failed); + assertEquals(1, nodeEntry.getFailed()); + assertEquals(1, nodeEntry.getFailedRecently()); + assertEquals(0, nodeEntry.getStartFailed()); + assertEquals(0, nodeEntry.getPreempted()); + assertEquals(0, nodeEntry.getActive()); + assertTrue(nodeEntry.isAvailable()); + nodeEntry.resetFailedRecently(); + assertEquals(1, nodeEntry.getFailed()); + assertEquals(0, nodeEntry.getFailedRecently()); + } + + @Test + public void testNodeEntryLimitsExceeded() throws Throwable { + NodeEntry nodeEntry = new NodeEntry(1); + nodeEntry.containerCompleted(false, ContainerOutcome.Failed_limits_exceeded); + assertEquals(0, nodeEntry.getFailed()); + assertEquals(0, nodeEntry.getFailedRecently()); + assertEquals(0, nodeEntry.getStartFailed()); + assertEquals(0, nodeEntry.getPreempted()); + } + + @Test + public void testNodeEntryPreempted() throws Throwable { + NodeEntry nodeEntry = new NodeEntry(1); + nodeEntry.containerCompleted(false, ContainerOutcome.Preempted); + assertEquals(0, nodeEntry.getFailed()); + assertEquals(0, nodeEntry.getFailedRecently()); + assertEquals(0, nodeEntry.getStartFailed()); + assertEquals(1, nodeEntry.getPreempted()); + } + + @Test + public void testNodeEntryNodeFailure() throws Throwable { + NodeEntry nodeEntry = new NodeEntry(1); + nodeEntry.containerCompleted(false, ContainerOutcome.Node_failure); + assertEquals(1, nodeEntry.getFailed()); + assertEquals(1, nodeEntry.getFailedRecently()); + assertEquals(0, nodeEntry.getStartFailed()); + assertEquals(0, nodeEntry.getPreempted()); + } + + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java new file mode 100644 index 0000000..d964775 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.common.tools.SliderUtils; +import org.apache.slider.core.conf.ConfTreeOperations; +import org.apache.slider.core.conf.MapOperations; +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.providers.PlacementPolicy; +import org.apache.slider.providers.ProviderRole; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockRoleHistory; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.model.mock.MockYarnEngine; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.operations.ContainerRequestOperation; +import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.appmaster.state.NodeEntry; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.RoleHistory; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Test that if you have >1 role, the right roles are chosen for release. + */ +public class TestMockAppStateDynamicHistory extends BaseMockAppStateTest + implements MockRoles { + + /** + * Small cluster with multiple containers per node, + * to guarantee many container allocations on each node + * @return + */ + @Override + public MockYarnEngine createYarnEngine() { + return new MockYarnEngine(8, 1); + } + + @Test + public void testDynamicRoleHistory() throws Throwable { + + String dynamic = "dynamicRole"; + int role_priority_8 = 8; + int desired = 1; + int placementPolicy = PlacementPolicy.DEFAULT; + // snapshot and patch existing spec + ConfTreeOperations resources = ConfTreeOperations.fromInstance( + appState.getResourcesSnapshot().confTree); + Map opts = new HashMap<>(); + opts.put(ResourceKeys.COMPONENT_INSTANCES, "" + desired); + opts.put(ResourceKeys.COMPONENT_PRIORITY, "" +role_priority_8); + opts.put(ResourceKeys.COMPONENT_PLACEMENT_POLICY, "" + placementPolicy); + + resources.getComponents().put(dynamic, opts); + + + // write the definitions + List updates = appState.updateResourceDefinitions(resources.confTree); + assertEquals(1, updates.size()); + ProviderRole updatedRole = updates.get(0); + assertEquals(updatedRole.placementPolicy, placementPolicy); + + // verify the new role was persisted + MapOperations snapshotDefinition = appState.getResourcesSnapshot() + .getMandatoryComponent(dynamic); + assertEquals(snapshotDefinition.getMandatoryOptionInt( + ResourceKeys.COMPONENT_PRIORITY), role_priority_8); + + // now look at the role map + assertNotNull(appState.getRoleMap().get(dynamic)); + ProviderRole mappedRole = appState.getRoleMap().get(dynamic); + assertEquals(mappedRole.id, role_priority_8); + + Map priorityMap = appState.getRolePriorityMap(); + assertEquals(priorityMap.size(), 4); + ProviderRole dynamicProviderRole = priorityMap.get(role_priority_8); + assertNotNull(dynamicProviderRole); + assertEquals(dynamicProviderRole.id, role_priority_8); + + assertNotNull(appState.getRoleStatusMap().get(role_priority_8)); + RoleStatus dynamicRoleStatus = + appState.getRoleStatusMap().get(role_priority_8); + assertEquals(dynamicRoleStatus.getDesired(), desired); + + + // before allocating the nodes, fill up the capacity of some of the + // hosts + engine.allocator.nextIndex(); + + int targetNode = 2; + assertEquals(targetNode, engine.allocator.nextIndex()); + String targetHostname = engine.cluster.nodeAt(targetNode).hostname; + + // clock is set to a small value + appState.time = 100000; + + // allocate the nodes + List actions = appState.reviewRequestAndReleaseNodes(); + assertEquals(1, actions.size()); + ContainerRequestOperation action0 = (ContainerRequestOperation)actions.get(0); + + ContainerRequest request = action0.getRequest(); + assertTrue(SliderUtils.isEmpty(request.getNodes())); + + List released = new ArrayList<>(); + List allocations = submitOperations(actions, released); + processSubmissionOperations(allocations, new ArrayList<>(), released); + assertEquals(1, allocations.size()); + RoleInstance ri = allocations.get(0); + + assertEquals(ri.role, dynamic); + assertEquals(ri.roleId, role_priority_8); + assertEquals(ri.host, targetHostname); + + // now look at the role history + + RoleHistory roleHistory = appState.getRoleHistory(); + List activeNodes = roleHistory.listActiveNodes(role_priority_8); + assertEquals(activeNodes.size(), 1); + NodeInstance activeNode = activeNodes.get(0); + assertNotNull(activeNode.get(role_priority_8)); + NodeEntry entry8 = activeNode.get(role_priority_8); + assertEquals(entry8.getActive(), 1); + + assertEquals(activeNode.hostname, targetHostname); + + NodeInstance activeNodeInstance = + roleHistory.getOrCreateNodeInstance(ri.container); + + assertEquals(activeNode, activeNodeInstance); + NodeEntry entry = activeNodeInstance.get(role_priority_8); + assertNotNull(entry); + assertTrue(entry.getActive() > 0); + assertTrue(entry.getLive() > 0); + + + // now trigger a termination event on that role + + // increment time for a long-lived failure event + appState.time = appState.time + 100000; + + log.debug("Triggering failure"); + ContainerId cid = ri.getContainerId(); + AppState.NodeCompletionResult result = appState.onCompletedNode( + containerStatus(cid, 1)); + assertEquals(result.roleInstance, ri); + assertTrue(result.containerFailed); + + roleHistory.dump(); + // values should have changed + assertEquals(1, entry.getFailed()); + assertEquals(0, entry.getStartFailed()); + assertEquals(0, entry.getActive()); + assertEquals(0, entry.getLive()); + + + List nodesForRoleId = + roleHistory.getRecentNodesForRoleId(role_priority_8); + assertNotNull(nodesForRoleId); + + // make sure new nodes will default to a different host in the engine + assertTrue(targetNode < engine.allocator.nextIndex()); + + actions = appState.reviewRequestAndReleaseNodes(); + assertEquals(1, actions.size()); + ContainerRequestOperation action1 = (ContainerRequestOperation) actions.get(0); + ContainerRequest request1 = action1.getRequest(); + assertTrue(SliderUtils.isNotEmpty(request1.getNodes())); + } + + @Test(expected = BadConfigException.class) + public void testRoleHistoryRoleAdditions() throws Throwable { + MockRoleHistory roleHistory = new MockRoleHistory(new ArrayList<>()); + roleHistory.addNewRole(new RoleStatus(new ProviderRole("one", 1))); + roleHistory.addNewRole(new RoleStatus(new ProviderRole("two", 1))); + roleHistory.dump(); + } + + @Test(expected = BadConfigException.class) + public void testRoleHistoryRoleStartupConflict() throws Throwable { + MockRoleHistory roleHistory = new MockRoleHistory(Arrays.asList( + new ProviderRole("one", 1), new ProviderRole("two", 1) + )); + roleHistory.dump(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicRoles.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicRoles.java new file mode 100644 index 0000000..32a82f6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicRoles.java @@ -0,0 +1,238 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.core.conf.AggregateConf; +import org.apache.slider.providers.PlacementPolicy; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.model.mock.MockYarnEngine; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.operations.ContainerRequestOperation; +import org.apache.slider.server.appmaster.state.AppState.NodeCompletionResult; +import org.apache.slider.server.appmaster.state.ContainerPriority; +import org.apache.slider.server.appmaster.state.RoleHistoryUtils; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Test that if you have >1 role, the right roles are chosen for release. + */ +public class TestMockAppStateDynamicRoles extends BaseMockAppStateTest + implements MockRoles { + private static final String ROLE4 = "4"; + private static final String ROLE5 = "5"; + private static final int ID4 = 4; + private static final int ID5 = 5; + + @Override + public String getTestName() { + return "TestMockAppStateDynamicRoles"; + } + + /** + * Small cluster with multiple containers per node, + * to guarantee many container allocations on each node + * @return + */ + @Override + public MockYarnEngine createYarnEngine() { + return new MockYarnEngine(8, 2); + } + + @Override + public AggregateConf buildInstanceDefinition() { + AggregateConf instance = factory.newInstanceDefinition(0, 0, 0); + Map opts = new HashMap<>(); + opts.put(ResourceKeys.COMPONENT_PRIORITY, ROLE4); + opts.put(ResourceKeys.COMPONENT_INSTANCES, "1"); + + instance.getResourceOperations().getOrAddComponent(ROLE4).putAll(opts); + + Map opts5 = new HashMap<>(); + opts5.put(ResourceKeys.COMPONENT_PRIORITY, ROLE5); + opts5.put(ResourceKeys.COMPONENT_INSTANCES, "1"); + opts5.put(ResourceKeys.COMPONENT_PLACEMENT_POLICY, Integer.toString + (PlacementPolicy.STRICT)); + opts5.put(ResourceKeys.NODE_FAILURE_THRESHOLD, Integer.toString(2)); + + instance.getResourceOperations().getOrAddComponent(ROLE5).putAll(opts5); + return instance; + } + + @Test + public void testAllocateReleaseRealloc() throws Throwable { + + createAndStartNodes(); + appState.reviewRequestAndReleaseNodes(); + appState.getRoleHistory().dump(); + } + + /** + * Find all allocations for a specific role + * @param role role Id/priority + * @param actions source list + * @return found list + */ + List findAllocationsForRole(int role, + List actions) { + List ops = new ArrayList<>(); + for (AbstractRMOperation op : actions) { + if (op instanceof ContainerRequestOperation && role == + ContainerPriority.extractRole(((ContainerRequestOperation) op) + .getRequest().getPriority())) { + ops.add((ContainerRequestOperation) op); + } + } + return ops; + } + + @Test + public void testStrictPlacementInitialRequest() throws Throwable { + log.info("Initial engine state = {}", engine); + List actions = appState.reviewRequestAndReleaseNodes(); + assertEquals(2, actions.size()); + + // neither have locality at this point + assertRelaxLocalityFlag(ID4, null, true, actions); + assertRelaxLocalityFlag(ID5, null, true, actions); + } + + @Test + public void testPolicyPropagation() throws Throwable { + assertEquals(0, (appState.lookupRoleStatus(ROLE4).getPlacementPolicy() & + PlacementPolicy.STRICT)); + assertNotEquals(0, (appState.lookupRoleStatus(ROLE5).getPlacementPolicy() & + PlacementPolicy.STRICT)); + + } + + @Test + public void testNodeFailureThresholdPropagation() throws Throwable { + assertEquals(3, appState.lookupRoleStatus(ROLE4).getNodeFailureThreshold()); + assertEquals(2, appState.lookupRoleStatus(ROLE5).getNodeFailureThreshold()); + } + + @Test + public void testLaxPlacementSecondRequestRole4() throws Throwable { + log.info("Initial engine state = {}", engine); + RoleStatus role4 = appState.lookupRoleStatus(ROLE4); + RoleStatus role5 = appState.lookupRoleStatus(ROLE5); + role4.setDesired(1); + role5.setDesired(0); + + List instances = createStartAndStopNodes(new ArrayList<>()); + assertEquals(1, instances.size()); + + RoleInstance instanceA = null; + for (RoleInstance instance : instances) { + if (instance.roleId == ID4) { + instanceA = instance; + } + } + assertNotNull(instanceA); + String hostname = RoleHistoryUtils.hostnameOf(instanceA.container); + + log.info("Allocated engine state = {}", engine); + assertEquals(1, engine.containerCount()); + + assertEquals(1, role4.getActual()); + // shrinking cluster + + role4.setDesired(0); + appState.lookupRoleStatus(ROLE4).setDesired(0); + List completionResults = new ArrayList<>(); + createStartAndStopNodes(completionResults); + assertEquals(0, engine.containerCount()); + assertEquals(1, completionResults.size()); + + // expanding: expect hostnames now + role4.setDesired(1); + List actions = appState.reviewRequestAndReleaseNodes(); + assertEquals(1, actions.size()); + + ContainerRequestOperation cro = (ContainerRequestOperation) actions.get(0); + List nodes = cro.getRequest().getNodes(); + assertEquals(1, nodes.size()); + assertEquals(hostname, nodes.get(0)); + } + + @Test + public void testStrictPlacementSecondRequestRole5() throws Throwable { + log.info("Initial engine state = {}", engine); + RoleStatus role4 = appState.lookupRoleStatus(ROLE4); + RoleStatus role5 = appState.lookupRoleStatus(ROLE5); + role4.setDesired(0); + role5.setDesired(1); + + List instances = createStartAndStopNodes(new ArrayList<>()); + assertEquals(1, instances.size()); + + RoleInstance instanceA = null; + for (RoleInstance instance : instances) { + if (instance.roleId == ID5) { + instanceA = instance; + } + } + assertNotNull(instanceA); + String hostname = RoleHistoryUtils.hostnameOf(instanceA.container); + + log.info("Allocated engine state = {}", engine); + assertEquals(1, engine.containerCount()); + + assertEquals(1, role5.getActual()); + + // shrinking cluster + role5.setDesired(0); + List completionResults = new ArrayList<>(); + createStartAndStopNodes(completionResults); + assertEquals(0, engine.containerCount()); + assertEquals(1, completionResults.size()); + assertEquals(0, role5.getActual()); + + role5.setDesired(1); + List actions = appState.reviewRequestAndReleaseNodes(); + assertEquals(1, actions.size()); + assertRelaxLocalityFlag(ID5, "", false, actions); + ContainerRequestOperation cro = (ContainerRequestOperation) actions.get(0); + List nodes = cro.getRequest().getNodes(); + assertEquals(1, nodes.size()); + assertEquals(hostname, nodes.get(0)); + } + + public void assertRelaxLocalityFlag( + int role, + String expectedHost, + boolean expectedRelaxFlag, + List actions) { + List requests = findAllocationsForRole( + role, actions); + assertEquals(1, requests.size()); + ContainerRequestOperation req = requests.get(0); + assertEquals(expectedRelaxFlag, req.getRequest().getRelaxLocality()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java new file mode 100644 index 0000000..9c8613c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.fs.Path; +import org.apache.slider.api.ClusterDescription; +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.core.conf.AggregateConf; +import org.apache.slider.core.conf.ConfTree; +import org.apache.slider.core.conf.ConfTreeOperations; +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.core.exceptions.SliderInternalStateException; +import org.apache.slider.core.exceptions.TriggerClusterTeardownException; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockAppState; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.model.mock.MockYarnEngine; +import org.apache.slider.server.appmaster.state.AppStateBindingInfo; +import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector; +import org.apache.slider.server.appmaster.state.RoleHistory; +import org.apache.slider.server.avro.LoadedRoleHistory; +import org.apache.slider.server.avro.RoleHistoryWriter; +import org.junit.Test; + +import java.io.File; +import java.util.HashMap; +import java.util.Map; + +/** + * Test that if you have more than one role, the right roles are chosen for release. + */ +public class TestMockAppStateFlexDynamicRoles extends BaseMockAppStateTest + implements MockRoles { + + @Override + public String getTestName() { + return "TestMockAppStateFlexDynamicRoles"; + } + + /** + * Small cluster with multiple containers per node, + * to guarantee many container allocations on each node + * @return + */ + @Override + public MockYarnEngine createYarnEngine() { + return new MockYarnEngine(4, 4); + } + + @Override + public AppStateBindingInfo buildBindingInfo() { + AppStateBindingInfo bindingInfo = super.buildBindingInfo(); + bindingInfo.releaseSelector = new MostRecentContainerReleaseSelector(); + return bindingInfo; + } + + @Override + public AggregateConf buildInstanceDefinition() { + AggregateConf instance = factory.newInstanceDefinition(0, 0, 0); + + Map opts = new HashMap<>(); + opts.put(ResourceKeys.COMPONENT_INSTANCES, "1"); + opts.put(ResourceKeys.COMPONENT_PRIORITY, "6"); + + instance.getResourceOperations().getOrAddComponent("dynamic-6") + .putAll(opts); + return instance; + } + + private ConfTreeOperations init() + throws TriggerClusterTeardownException, SliderInternalStateException { + createAndStartNodes(); + ConfTree resources = appState.getInstanceDefinition().getResources(); + return new ConfTreeOperations(resources); + } + + @Test + public void testDynamicFlexAddRole() throws Throwable { + ConfTreeOperations cd = init(); + Map opts = new HashMap<>(); + opts.put(ResourceKeys.COMPONENT_INSTANCES, "1"); + opts.put(ResourceKeys.COMPONENT_PRIORITY, "7"); + + cd.getOrAddComponent("dynamicAdd7").putAll(opts); + appState.updateResourceDefinitions(cd.confTree); + createAndStartNodes(); + dumpClusterDescription("updated CD", appState.getClusterStatus()); + appState.lookupRoleStatus(7); + appState.lookupRoleStatus(6); + //gaps are still there + try { + assertNull(appState.lookupRoleStatus(5)); + } catch (RuntimeException expected) { + } + } + + @Test + public void testDynamicFlexAddRoleConflictingPriority() throws Throwable { + ConfTreeOperations cd = init(); + Map opts = new HashMap<>(); + opts.put(ResourceKeys.COMPONENT_INSTANCES, "1"); + opts.put(ResourceKeys.COMPONENT_PRIORITY, "6"); + + cd.getOrAddComponent("conflictingPriority").putAll(opts); + try { + appState.updateResourceDefinitions(cd.confTree); + + ClusterDescription status = appState.getClusterStatus(); + dumpClusterDescription("updated CD", status); + fail("Expected an exception, got " + status); + } catch (BadConfigException expected) { + log.info("Expected: {}", expected); + log.debug("Expected: {}", expected, expected); + // expected + } + } + + @Test + public void testDynamicFlexDropRole() throws Throwable { + ConfTreeOperations cd = init(); + cd.getComponents().remove("dynamic"); + appState.updateResourceDefinitions(cd.confTree); + + ClusterDescription getCD = appState.getClusterStatus(); + dumpClusterDescription("updated CD", getCD); + //status is retained for future + appState.lookupRoleStatus(6); + } + + + @Test + public void testHistorySaveFlexLoad() throws Throwable { + ConfTreeOperations cd = init(); + RoleHistory roleHistory = appState.getRoleHistory(); + Path history = roleHistory.saveHistory(0x0001); + RoleHistoryWriter historyWriter = new RoleHistoryWriter(); + Map opts = new HashMap<>(); + opts.put(ResourceKeys.COMPONENT_INSTANCES, "1"); + opts.put(ResourceKeys.COMPONENT_PRIORITY, "9"); + + cd.getOrAddComponent("HistorySaveFlexLoad").putAll(opts); + appState.updateResourceDefinitions(cd.confTree); + createAndStartNodes(); + LoadedRoleHistory loadedRoleHistory = + historyWriter.read(fs, history); + assertEquals(0, appState.getRoleHistory().rebuild(loadedRoleHistory)); + } + + @Test + public void testHistoryFlexSaveResetLoad() throws Throwable { + ConfTreeOperations cd = init(); + Map opts = new HashMap<>(); + opts.put(ResourceKeys.COMPONENT_INSTANCES, "1"); + opts.put(ResourceKeys.COMPONENT_PRIORITY, "10"); + + cd.getOrAddComponent("HistoryFlexSaveLoad").putAll(opts); + appState.updateResourceDefinitions(cd.confTree); + createAndStartNodes(); + RoleHistoryWriter historyWriter = new RoleHistoryWriter(); + RoleHistory roleHistory = appState.getRoleHistory(); + Path history = roleHistory.saveHistory(0x0002); + //now reset the app state + File historyWorkDir2 = new File("target/history" + getTestName() + + "-0002"); + Path historyPath2 = new Path(historyWorkDir2.toURI()); + appState = new MockAppState(); + AppStateBindingInfo binding2 = buildBindingInfo(); + binding2.instanceDefinition = factory.newInstanceDefinition(0, 0, 0); + binding2.historyPath = historyPath2; + appState.buildInstance(binding2); + // on this read there won't be the right number of roles + LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history); + assertEquals(0, appState.getRoleHistory().rebuild(loadedRoleHistory)); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexing.java new file mode 100644 index 0000000..2e6244f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexing.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.slider.api.ClusterDescription; +import org.apache.slider.api.types.ApplicationLivenessInformation; +import org.apache.slider.core.exceptions.TriggerClusterTeardownException; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.operations.CancelSingleRequest; +import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.appmaster.state.ContainerAssignment; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +public class TestMockAppStateFlexing extends BaseMockAppStateTest implements + MockRoles { + + @Override + public String getTestName() { + return "TestMockAppStateFlexing"; + } + + @Test + public void testFlexDuringLaunchPhase() throws Throwable { + + // ask for one instance of role0 + getRole0Status().setDesired(1); + + List ops = appState.reviewRequestAndReleaseNodes(); + + // at this point there's now one request in the list + assertEquals(1, ops.size()); + // and in a liveness check, one outstanding + ApplicationLivenessInformation liveness = + appState.getApplicationLivenessInformation(); + assertEquals(1, liveness.requestsOutstanding); + assertFalse(liveness.allRequestsSatisfied); + + List allocations = engine.execute(ops); + List assignments = new ArrayList<>(); + List releases = new ArrayList<>(); + appState.onContainersAllocated(allocations, assignments, releases); + assertEquals(1, assignments.size()); + ContainerAssignment assigned = assignments.get(0); + Container target = assigned.container; + RoleInstance ri = roleInstance(assigned); + + ops = appState.reviewRequestAndReleaseNodes(); + assertTrue(ops.isEmpty()); + + liveness = appState.getApplicationLivenessInformation(); + assertEquals(0, liveness.requestsOutstanding); + assertTrue(liveness.allRequestsSatisfied); + + //now this is the start point. + appState.containerStartSubmitted(target, ri); + + ops = appState.reviewRequestAndReleaseNodes(); + assertTrue(ops.isEmpty()); + + appState.innerOnNodeManagerContainerStarted(target.getId()); + } + + @Test + public void testFlexBeforeAllocationPhase() throws Throwable { + getRole0Status().setDesired(1); + + List ops = appState.reviewRequestAndReleaseNodes(); + assertFalse(ops.isEmpty()); + + // second scan will find the first run outstanding, so not re-issue + // any more container requests + List ops2 = appState.reviewRequestAndReleaseNodes(); + assertTrue(ops2.isEmpty()); + + // and in a liveness check, one outstanding + ApplicationLivenessInformation liveness = appState + .getApplicationLivenessInformation(); + assertEquals(1, liveness.requestsOutstanding); + assertFalse(liveness.allRequestsSatisfied); + + appState.refreshClusterStatus(null); + ClusterDescription cd = appState.getClusterStatus(); + assertEquals(1, cd.liveness.requestsOutstanding); + + } + + + @Test + public void testFlexDownTwice() throws Throwable { + int r0 = 6; + int r1 = 0; + int r2 = 0; + getRole0Status().setDesired(r0); + getRole1Status().setDesired(r1); + getRole2Status().setDesired(r2); + List instances = createAndStartNodes(); + + int clusterSize = r0 + r1 + r2; + assertEquals(instances.size(), clusterSize); + log.info("shrinking cluster"); + r0 = 4; + getRole0Status().setDesired(r0); + List completionResults = new ArrayList<>(); + instances = createStartAndStopNodes(completionResults); + assertEquals(0, instances.size()); + // assert two nodes were released + assertEquals(2, completionResults.size()); + + // no-op review + completionResults = new ArrayList<>(); + instances = createStartAndStopNodes(completionResults); + assertEquals(0, instances.size()); + // assert two nodes were released + assertEquals(0, completionResults.size()); + + + // now shrink again + getRole0Status().setDesired(1); + completionResults = new ArrayList<>(); + instances = createStartAndStopNodes(completionResults); + assertEquals(0, instances.size()); + // assert two nodes were released + assertEquals(3, completionResults.size()); + + } + + @Test + public void testFlexNegative() throws Throwable { + int r0 = 6; + int r1 = 0; + int r2 = 0; + getRole0Status().setDesired(r0); + getRole1Status().setDesired(r1); + getRole2Status().setDesired(r2); + List instances = createAndStartNodes(); + + int clusterSize = r0 + r1 + r2; + assertEquals(instances.size(), clusterSize); + log.info("shrinking cluster"); + getRole0Status().setDesired(-2); + List completionResults = new ArrayList<>(); + try { + createStartAndStopNodes(completionResults); + fail("expected an exception"); + } catch (TriggerClusterTeardownException e) { + } + + } + + @Test + public void testCancelWithRequestsOutstanding() throws Throwable { + // flex cluster size before the original set were allocated + + + getRole0Status().setDesired(6); + // build the ops + List ops = appState.reviewRequestAndReleaseNodes(); + // here the data structures exist + + // go down + getRole0Status().setDesired(3); + List ops2 = appState.reviewRequestAndReleaseNodes(); + assertEquals(3, ops2.size()); + for (AbstractRMOperation op : ops2) { + assertTrue(op instanceof CancelSingleRequest); + } + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java new file mode 100644 index 0000000..391f742 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java @@ -0,0 +1,377 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.model.mock.MockYarnEngine; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.operations.CancelSingleRequest; +import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation; +import org.apache.slider.server.appmaster.operations.ContainerRequestOperation; +import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.appmaster.state.ContainerAssignment; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.apache.slider.server.appmaster.state.ContainerPriority.buildPriority; +import static org.apache.slider.server.appmaster.state.ContainerPriority.extractRole; + +public class TestMockAppStateRMOperations extends BaseMockAppStateTest + implements MockRoles { + + @Override + public String getTestName() { + return "TestMockAppStateRMOperations"; + } + + @Test + public void testPriorityOnly() throws Throwable { + assertEquals(5, extractRole(buildPriority(5, false))); + } + + @Test + public void testPriorityRoundTrip() throws Throwable { + assertEquals(5, extractRole(buildPriority(5, false))); + } + + @Test + public void testPriorityRoundTripWithRequest() throws Throwable { + int priority = buildPriority(5, false); + assertEquals(5, extractRole(priority)); + } + + @Test + public void testMockAddOp() throws Throwable { + getRole0Status().setDesired(1); + List ops = appState.reviewRequestAndReleaseNodes(); + assertListLength(ops, 1); + ContainerRequestOperation operation = (ContainerRequestOperation)ops.get(0); + int priority = operation.getRequest().getPriority().getPriority(); + assertEquals(extractRole(priority), MockFactory.PROVIDER_ROLE0.id); + MockRMOperationHandler handler = new MockRMOperationHandler(); + handler.execute(ops); + + AbstractRMOperation op = handler.operations.get(0); + assertTrue(op instanceof ContainerRequestOperation); + } + + /** + * Test of a flex up and down op which verifies that outstanding + * requests are cancelled first. + *
    + *
  1. request 5 nodes, assert 5 request made
  2. + *
  3. allocate 1 of them
  4. + *
  5. flex cluster size to 3
  6. + *
  7. assert this generates 2 cancel requests
  8. + *
+ */ + @Test + public void testRequestThenCancelOps() throws Throwable { + RoleStatus role0 = getRole0Status(); + role0.setDesired(5); + List ops = appState.reviewRequestAndReleaseNodes(); + assertListLength(ops, 5); + // now 5 outstanding requests. + assertEquals(5, role0.getRequested()); + + // allocate one + List processed = new ArrayList<>(); + processed.add(ops.get(0)); + List released = new ArrayList<>(); + List completionResults = new ArrayList<>(); + submitOperations(processed, released); + List instances = createAndSubmitNodes(released); + processSubmissionOperations(instances, completionResults, released); + + + // four outstanding + assertEquals(4, role0.getRequested()); + + // flex cluster to 3 + role0.setDesired(3); + ops = appState.reviewRequestAndReleaseNodes(); + + // expect two cancel operation from review + assertListLength(ops, 2); + for (AbstractRMOperation op : ops) { + assertTrue(op instanceof CancelSingleRequest); + } + + MockRMOperationHandler handler = new MockRMOperationHandler(); + handler.availableToCancel = 4; + handler.execute(ops); + assertEquals(2, handler.availableToCancel); + assertEquals(2, role0.getRequested()); + + // flex down one more + role0.setDesired(2); + ops = appState.reviewRequestAndReleaseNodes(); + assertListLength(ops, 1); + for (AbstractRMOperation op : ops) { + assertTrue(op instanceof CancelSingleRequest); + } + handler.execute(ops); + assertEquals(1, handler.availableToCancel); + assertEquals(1, role0.getRequested()); + } + + @Test + public void testCancelNoActualContainers() throws Throwable { + RoleStatus role0 = getRole0Status(); + role0.setDesired(5); + List ops = appState.reviewRequestAndReleaseNodes(); + assertListLength(ops, 5); + // now 5 outstanding requests. + assertEquals(5, role0.getRequested()); + role0.setDesired(0); + ops = appState.reviewRequestAndReleaseNodes(); + assertListLength(ops, 5); + + } + + + @Test + public void testFlexDownOutstandingRequests() throws Throwable { + // engine only has two nodes, so > 2 will be outstanding + engine = new MockYarnEngine(1, 2); + List ops; + // role: desired = 2, requested = 1, actual=1 + RoleStatus role0 = getRole0Status(); + role0.setDesired(4); + createAndSubmitNodes(); + + assertEquals(2, role0.getRequested()); + assertEquals(2, role0.getActual()); + // there are now two outstanding, two actual + // Release 3 and verify that the two + // cancellations were combined with a release + role0.setDesired(1); + assertEquals(-3, role0.getDelta()); + ops = appState.reviewRequestAndReleaseNodes(); + assertListLength(ops, 3); + int numCancel = 0; + int numRelease = 0; + for (AbstractRMOperation op : ops) { + if (op instanceof CancelSingleRequest) { + numCancel++; + } + if (op instanceof ContainerReleaseOperation) { + numRelease++; + } + } + assertEquals(2, numCancel); + assertEquals(1, numRelease); + assertEquals(0, role0.getRequested()); + assertEquals(1, role0.getReleasing()); + } + + @Test + public void testCancelAllOutstandingRequests() throws Throwable { + + // role: desired = 2, requested = 1, actual=1 + RoleStatus role0 = getRole0Status(); + role0.setDesired(2); + List ops; + ops = appState.reviewRequestAndReleaseNodes(); + int count = 0; + for (AbstractRMOperation op : ops) { + if (op instanceof ContainerRequestOperation) { + count++; + } + } + assertEquals(2, count); + + // there are now two outstanding, two actual + // Release 3 and verify that the two + // cancellations were combined with a release + role0.setDesired(0); + ops = appState.reviewRequestAndReleaseNodes(); + assertEquals(2, ops.size()); + + for (AbstractRMOperation op : ops) { + assertTrue(op instanceof CancelSingleRequest); + } + } + + + @Test + public void testFlexUpOutstandingRequests() throws Throwable { + + List ops; + // role: desired = 2, requested = 1, actual=1 + RoleStatus role0 = getRole0Status(); + role0.setDesired(2); + role0.incActual(); + role0.incRequested(); + + + + // flex up 2 nodes, yet expect only one node to be requested, + // as the outstanding request is taken into account + role0.setDesired(4); + role0.incRequested(); + + assertEquals(1, role0.getActual()); + assertEquals(2, role0.getRequested()); + assertEquals(3, role0.getActualAndRequested()); + assertEquals(1, role0.getDelta()); + ops = appState.reviewRequestAndReleaseNodes(); + assertListLength(ops, 1); + assertTrue(ops.get(0) instanceof ContainerRequestOperation); + assertEquals(3, role0.getRequested()); + } + + @Test + public void testFlexUpNoSpace() throws Throwable { + // engine only has two nodes, so > 2 will be outstanding + engine = new MockYarnEngine(1, 2); + // role: desired = 2, requested = 1, actual=1 + RoleStatus role0 = getRole0Status(); + role0.setDesired(4); + createAndSubmitNodes(); + + assertEquals(2, role0.getRequested()); + assertEquals(2, role0.getActual()); + role0.setDesired(8); + assertEquals(4, role0.getDelta()); + createAndSubmitNodes(); + assertEquals(6, role0.getRequested()); + } + + + @Test + public void testAllocateReleaseOp() throws Throwable { + getRole0Status().setDesired(1); + + List ops = appState.reviewRequestAndReleaseNodes(); + ContainerRequestOperation operation = (ContainerRequestOperation)ops.get(0); + AMRMClient.ContainerRequest request = operation.getRequest(); + Container cont = engine.allocateContainer(request); + List allocated = new ArrayList<>(); + allocated.add(cont); + List assignments = new ArrayList<>(); + List operations = new ArrayList<>(); + appState.onContainersAllocated(allocated, assignments, operations); + + assertListLength(ops, 1); + assertListLength(assignments, 1); + ContainerAssignment assigned = assignments.get(0); + Container target = assigned.container; + assertEquals(target.getId(), cont.getId()); + int roleId = assigned.role.getPriority(); + assertEquals(roleId, extractRole(request.getPriority())); + assertEquals(assigned.role.getName(), ROLE0); + RoleInstance ri = roleInstance(assigned); + //tell the app it arrived + appState.containerStartSubmitted(target, ri); + appState.innerOnNodeManagerContainerStarted(target.getId()); + assertEquals(1, getRole0Status().getStarted()); + + //now release it by changing the role status + getRole0Status().setDesired(0); + ops = appState.reviewRequestAndReleaseNodes(); + assertListLength(ops, 1); + + assertTrue(ops.get(0) instanceof ContainerReleaseOperation); + ContainerReleaseOperation release = (ContainerReleaseOperation) ops.get(0); + assertEquals(release.getContainerId(), cont.getId()); + } + + @Test + public void testComplexAllocation() throws Throwable { + getRole0Status().setDesired(1); + getRole1Status().setDesired(3); + + List ops = appState.reviewRequestAndReleaseNodes(); + List allocations = engine.execute(ops); + List assignments = new ArrayList<>(); + List releases = new ArrayList<>(); + appState.onContainersAllocated(allocations, assignments, releases); + // we expect four release requests here for all the allocated containers + assertListLength(releases, 4); + for (AbstractRMOperation op : releases) { + assertTrue(op instanceof CancelSingleRequest); + } + assertListLength(assignments, 4); + for (ContainerAssignment assigned : assignments) { + Container target = assigned.container; + RoleInstance ri = roleInstance(assigned); + appState.containerStartSubmitted(target, ri); + } + //insert some async operation here + for (ContainerAssignment assigned : assignments) { + Container target = assigned.container; + appState.innerOnNodeManagerContainerStarted(target.getId()); + } + assertEquals(4, engine.containerCount()); + getRole1Status().setDesired(0); + ops = appState.reviewRequestAndReleaseNodes(); + assertListLength(ops, 3); + allocations = engine.execute(ops); + assertEquals(1, engine.containerCount()); + + appState.onContainersAllocated(allocations, assignments, releases); + assertTrue(assignments.isEmpty()); + assertTrue(releases.isEmpty()); + } + + @Test + public void testDoubleNodeManagerStartEvent() throws Throwable { + getRole0Status().setDesired(1); + + List ops = appState.reviewRequestAndReleaseNodes(); + List allocations = engine.execute(ops); + List assignments = new ArrayList<>(); + List releases = new ArrayList<>(); + appState.onContainersAllocated(allocations, assignments, releases); + assertListLength(assignments, 1); + ContainerAssignment assigned = assignments.get(0); + Container target = assigned.container; + RoleInstance ri = roleInstance(assigned); + appState.containerStartSubmitted(target, ri); + RoleInstance ri2 = appState.innerOnNodeManagerContainerStarted(target + .getId()); + assertEquals(ri2, ri); + //try a second time, expect an error + try { + appState.innerOnNodeManagerContainerStarted(target.getId()); + fail("Expected an exception"); + } catch (RuntimeException expected) { + // expected + } + //and non-faulter should not downgrade to a null + log.warn("Ignore any exception/stack trace that appears below"); + log.warn("==============================================================="); + RoleInstance ri3 = appState.onNodeManagerContainerStarted(target.getId()); + log.warn("==============================================================="); + log.warn("Ignore any exception/stack trace that appeared above"); + assertNull(ri3); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java new file mode 100644 index 0000000..c9ffc2d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.slider.api.ClusterDescription; +import org.apache.slider.api.StatusKeys; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockAppState; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.state.AppStateBindingInfo; +import org.apache.slider.server.appmaster.state.NodeEntry; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.NodeMap; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +/** + * Test that app state is rebuilt on a restart + */ +public class TestMockAppStateRebuildOnAMRestart extends BaseMockAppStateTest + implements MockRoles { + + @Override + public String getTestName() { + return "TestMockAppStateRebuildOnAMRestart"; + } + + @Test + public void testRebuild() throws Throwable { + + int r0 = 1; + int r1 = 2; + int r2 = 3; + getRole0Status().setDesired(r0); + getRole1Status().setDesired(r1); + getRole2Status().setDesired(r2); + List instances = createAndStartNodes(); + + int clusterSize = r0 + r1 + r2; + assertEquals(instances.size(), clusterSize); + + //clone the list + List containers = new ArrayList<>(); + for (RoleInstance ri : instances) { + containers.add(ri.container); + } + NodeMap nodemap = appState.getRoleHistory().cloneNodemap(); + + //and rebuild + + AppStateBindingInfo bindingInfo = buildBindingInfo(); + bindingInfo.instanceDefinition = factory.newInstanceDefinition(r0, r1, r2); + bindingInfo.liveContainers = containers; + appState = new MockAppState(bindingInfo); + + assertEquals(appState.getStartedCountainerCount(), clusterSize); + + appState.getRoleHistory().dump(); + + //check that the app state direct structures match + List r0live = appState.enumLiveNodesInRole(ROLE0); + List r1live = appState.enumLiveNodesInRole(ROLE1); + List r2live = appState.enumLiveNodesInRole(ROLE2); + + assertEquals(r0, r0live.size()); + assertEquals(r1, r1live.size()); + assertEquals(r2, r2live.size()); + + //now examine the role history + NodeMap newNodemap = appState.getRoleHistory().cloneNodemap(); + + for (NodeInstance nodeInstance : newNodemap.values()) { + String hostname = nodeInstance.hostname; + NodeInstance orig = nodemap.get(hostname); + assertNotNull("Null entry in original nodemap for " + hostname, orig); + + for (int i = 0; i < ROLE_COUNT; i++) { + assertEquals(nodeInstance.getActiveRoleInstances(i), orig.getActiveRoleInstances(i)); + NodeEntry origRE = orig.getOrCreate(i); + NodeEntry newRE = nodeInstance.getOrCreate(i); + assertEquals(origRE.getLive(), newRE.getLive()); + assertEquals(0, newRE.getStarting()); + } + } + assertEquals(0, appState.reviewRequestAndReleaseNodes().size()); + + ClusterDescription status = appState.getClusterStatus(); + // verify the AM restart container count was set + String restarted = status.getInfo(StatusKeys.INFO_CONTAINERS_AM_RESTART); + assertNotNull(restarted); + //and that the count == 1 master + the region servers + assertEquals(Integer.parseInt(restarted), containers.size()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRolePlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRolePlacement.java new file mode 100644 index 0000000..ceb04c4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRolePlacement.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.operations.CancelSingleRequest; +import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation; +import org.apache.slider.server.appmaster.operations.ContainerRequestOperation; +import org.apache.slider.server.appmaster.state.ContainerAssignment; +import org.apache.slider.server.appmaster.state.RoleHistoryUtils; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.apache.slider.server.appmaster.state.ContainerPriority.extractRole; + +/** + * Test that the app state lets you ask for nodes, get a specific host, + * release it and then get that one back again. + */ +public class TestMockAppStateRolePlacement extends BaseMockAppStateTest + implements MockRoles { + + @Override + public String getTestName() { + return "TestMockAppStateRolePlacement"; + } + + + @Test + public void testAllocateReleaseRealloc() throws Throwable { + getRole0Status().setDesired(1); + + List ops = appState.reviewRequestAndReleaseNodes(); + ContainerRequestOperation operation = (ContainerRequestOperation) ops.get(0); + AMRMClient.ContainerRequest request = operation.getRequest(); + assertTrue(request.getRelaxLocality()); + assertNull(request.getNodes()); + assertNull(request.getRacks()); + assertNotNull(request.getCapability()); + + Container allocated = engine.allocateContainer(request); + List assignments = new ArrayList<>(); + List releaseOperations = new ArrayList<>(); + appState.onContainersAllocated(Arrays.asList((Container)allocated), + assignments, releaseOperations); + // verify the release matches the allocation + assertEquals(releaseOperations.size(), 1); + CancelSingleRequest cancelOp = (CancelSingleRequest)releaseOperations.get(0); + assertNotNull(cancelOp.getRequest()); + assertNotNull(cancelOp.getRequest().getCapability()); + assertEquals(cancelOp.getRequest().getCapability(), allocated.getResource()); + // now the assignment + assertEquals(assignments.size(), 1); + ContainerAssignment assigned = assignments.get(0); + Container container = assigned.container; + assertEquals(container.getId(), allocated.getId()); + int roleId = assigned.role.getPriority(); + assertEquals(roleId, extractRole(request.getPriority())); + assertEquals(assigned.role.getName(), ROLE0); + String containerHostname = RoleHistoryUtils.hostnameOf(container); + RoleInstance ri = roleInstance(assigned); + //tell the app it arrived + appState.containerStartSubmitted(container, ri); + assertNotNull(appState.onNodeManagerContainerStarted(container.getId())); + assertEquals(getRole0Status().getStarted(), 1); + ops = appState.reviewRequestAndReleaseNodes(); + assertEquals(ops.size(), 0); + + //now it is surplus + getRole0Status().setDesired(0); + ops = appState.reviewRequestAndReleaseNodes(); + ContainerReleaseOperation release = (ContainerReleaseOperation) ops.get(0); + + assertEquals(release.getContainerId(), container.getId()); + engine.execute(ops); + assertNotNull(appState.onCompletedNode(containerStatus(container.getId())) + .roleInstance); + + //view the world + appState.getRoleHistory().dump(); + + //now ask for a new one + getRole0Status().setDesired(1); + ops = appState.reviewRequestAndReleaseNodes(); + assertEquals(ops.size(), 1); + operation = (ContainerRequestOperation) ops.get(0); + AMRMClient.ContainerRequest request2 = operation.getRequest(); + assertNotNull(request2); + assertEquals(request2.getNodes().get(0), containerHostname); + assertFalse(request2.getRelaxLocality()); + engine.execute(ops); + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRoleRelease.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRoleRelease.java new file mode 100644 index 0000000..175c834 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRoleRelease.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.model.mock.MockYarnEngine; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +/** + * Test that if you have >1 role, the right roles are chosen for release. + */ +public class TestMockAppStateRoleRelease extends BaseMockAppStateTest + implements MockRoles { + + @Override + public String getTestName() { + return "TestMockAppStateRoleRelease"; + } + + /** + * Small cluster with multiple containers per node, + * to guarantee many container allocations on each node + * @return + */ + @Override + public MockYarnEngine createYarnEngine() { + return new MockYarnEngine(4, 4); + } + + @Test + public void testAllocateReleaseRealloc() throws Throwable { + /** + * Allocate to all nodes + */ + getRole0Status().setDesired(6); + getRole1Status().setDesired(5); + getRole2Status().setDesired(4); + List instances = createAndStartNodes(); + assertEquals(instances.size(), 15); + + //now it is surplus + getRole0Status().setDesired(0); + List ops = appState.reviewRequestAndReleaseNodes(); + + List released = new ArrayList<>(); + engine.execute(ops, released); + List ids = extractContainerIds(instances, 0); + for (ContainerId cid : released) { + assertNotNull(appState.onCompletedNode(containerStatus(cid)) + .roleInstance); + assertTrue(ids.contains(cid)); + } + + //view the world + appState.getRoleHistory().dump(); + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java new file mode 100644 index 0000000..e9229cb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.api.RoleKeys; +import org.apache.slider.core.conf.AggregateConf; +import org.apache.slider.core.conf.ConfTree; +import org.apache.slider.core.conf.ConfTreeOperations; +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.core.exceptions.SliderInternalStateException; +import org.apache.slider.core.exceptions.TriggerClusterTeardownException; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockAppState; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.model.mock.MockYarnEngine; +import org.apache.slider.server.appmaster.state.AppStateBindingInfo; +import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +/** + * Test that if you have more than one role, the right roles are chosen for release. + */ +public class TestMockAppStateUniqueNames extends BaseMockAppStateTest + implements MockRoles { + + @Override + public String getTestName() { + return "TestMockAppStateUniqueNames"; + } + + /** + * Small cluster with multiple containers per node, + * to guarantee many container allocations on each node + * @return + */ + @Override + public MockYarnEngine createYarnEngine() { + return new MockYarnEngine(4, 4); + } + + @Override + public AppStateBindingInfo buildBindingInfo() { + AppStateBindingInfo bindingInfo = super.buildBindingInfo(); + bindingInfo.releaseSelector = new MostRecentContainerReleaseSelector(); + return bindingInfo; + } + + @Override + public AggregateConf buildInstanceDefinition() { + AggregateConf instance = factory.newInstanceDefinition(0, 0, 0); + + Map opts = new HashMap<>(); + opts.put(ResourceKeys.COMPONENT_INSTANCES, "1"); + opts.put(ResourceKeys.COMPONENT_PRIORITY, "6"); + opts.put(ResourceKeys.YARN_MEMORY, "1024"); + opts.put(ResourceKeys.YARN_CORES, "2"); + opts.put(ResourceKeys.UNIQUE_NAMES, "true"); + + instance.getResourceOperations().getOrAddComponent("group1").putAll(opts); + return instance; + } + + private ConfTreeOperations init() + throws TriggerClusterTeardownException, SliderInternalStateException { + createAndStartNodes(); + ConfTree resources = appState.getInstanceDefinition().getResources(); + return new ConfTreeOperations(resources); + } + + private static void checkRole(MockAppState appState, + String roleName, + String roleGroup, + Map expectedOpts) + throws BadConfigException { + + for (String key : expectedOpts.keySet()) { + if (ResourceKeys.COMPONENT_PRIORITY.equals(key) || + ResourceKeys.COMPONENT_INSTANCES.equals(key)) { + continue; + } + assertEquals(expectedOpts.get(key), appState.getClusterStatus() + .getMandatoryRoleOpt(roleName, key)); + } + assertEquals(1, appState.getClusterStatus().getMandatoryRoleOptInt( + roleName, ResourceKeys.COMPONENT_INSTANCES)); + assertEquals(roleGroup, appState.getClusterStatus().getMandatoryRoleOpt( + roleName, RoleKeys.ROLE_GROUP)); + } + + @Test + public void testDynamicFlexAddRole() throws Throwable { + ConfTreeOperations cd = init(); + Map opts = new HashMap<>(); + opts.put(ResourceKeys.COMPONENT_INSTANCES, "2"); + opts.put(ResourceKeys.COMPONENT_PRIORITY, "7"); + opts.put(ResourceKeys.YARN_MEMORY, "384"); + opts.put(ResourceKeys.YARN_CORES, "4"); + opts.put(ResourceKeys.UNIQUE_NAMES, "true"); + + cd.getOrAddComponent("group2").putAll(opts); + appState.updateResourceDefinitions(cd.confTree); + createAndStartNodes(); + dumpClusterDescription("updated CD", appState.getClusterStatus()); + assertEquals(1, appState.lookupRoleStatus("group11").getDesired()); + assertEquals(1, appState.lookupRoleStatus("group21").getDesired()); + assertEquals(1, appState.lookupRoleStatus("group22").getDesired()); + assertEquals(6, appState.lookupRoleStatus("group11").getPriority()); + assertEquals(7, appState.lookupRoleStatus("group21").getPriority()); + assertEquals(8, appState.lookupRoleStatus("group22").getPriority()); + assertEquals(1024, appState.lookupRoleStatus("group11").getResourceRequirements().getMemory()); + assertEquals(384, appState.lookupRoleStatus("group21").getResourceRequirements().getMemory()); + assertEquals(384, appState.lookupRoleStatus("group22").getResourceRequirements().getMemory()); + assertEquals(2, appState.lookupRoleStatus("group11").getResourceRequirements().getVirtualCores()); + assertEquals(4, appState.lookupRoleStatus("group21").getResourceRequirements().getVirtualCores()); + assertEquals(4, appState.lookupRoleStatus("group22").getResourceRequirements().getVirtualCores()); + + appState.refreshClusterStatus(); + checkRole(appState, "group21", "group2", opts); + checkRole(appState, "group22", "group2", opts); + } + + @Test + public void testDynamicFlexDown() throws Throwable { + ConfTreeOperations cd = init(); + Map opts = new HashMap<>(); + opts.put(ResourceKeys.COMPONENT_INSTANCES, "0"); + opts.put(ResourceKeys.COMPONENT_PRIORITY, "6"); + opts.put(ResourceKeys.YARN_MEMORY, "384"); + opts.put(ResourceKeys.YARN_CORES, "4"); + opts.put(ResourceKeys.UNIQUE_NAMES, "true"); + + cd.getOrAddComponent("group1").putAll(opts); + appState.updateResourceDefinitions(cd.confTree); + createAndStartNodes(); + dumpClusterDescription("updated CD", appState.getClusterStatus()); + appState.lookupRoleStatus(6); + assertEquals(0, appState.lookupRoleStatus("group11").getDesired()); + assertEquals(6, appState.lookupRoleStatus("group11").getPriority()); + assertEquals(384, appState.lookupRoleStatus("group11").getResourceRequirements().getMemory()); + assertEquals(4, appState.lookupRoleStatus("group11").getResourceRequirements().getVirtualCores()); + } + + @Test + public void testDynamicFlexUp() throws Throwable { + ConfTreeOperations cd = init(); + Map opts = new HashMap<>(); + opts.put(ResourceKeys.COMPONENT_INSTANCES, "3"); + opts.put(ResourceKeys.COMPONENT_PRIORITY, "6"); + opts.put(ResourceKeys.YARN_MEMORY, "640"); + opts.put(ResourceKeys.YARN_CORES, "8"); + opts.put(ResourceKeys.UNIQUE_NAMES, "true"); + + cd.getOrAddComponent("group1").putAll(opts); + appState.updateResourceDefinitions(cd.confTree); + createAndStartNodes(); + dumpClusterDescription("updated CD", appState.getClusterStatus()); + appState.lookupRoleStatus(6); + appState.lookupRoleStatus(7); + appState.lookupRoleStatus(8); + assertEquals(1, appState.lookupRoleStatus("group11").getDesired()); + assertEquals(1, appState.lookupRoleStatus("group12").getDesired()); + assertEquals(1, appState.lookupRoleStatus("group13").getDesired()); + assertEquals(6, appState.lookupRoleStatus("group11").getPriority()); + assertEquals(7, appState.lookupRoleStatus("group12").getPriority()); + assertEquals(8, appState.lookupRoleStatus("group13").getPriority()); + assertEquals(640, appState.lookupRoleStatus("group11").getResourceRequirements().getMemory()); + assertEquals(640, appState.lookupRoleStatus("group12").getResourceRequirements().getMemory()); + assertEquals(640, appState.lookupRoleStatus("group13").getResourceRequirements().getMemory()); + assertEquals(8, appState.lookupRoleStatus("group11").getResourceRequirements().getVirtualCores()); + assertEquals(8, appState.lookupRoleStatus("group12").getResourceRequirements().getVirtualCores()); + assertEquals(8, appState.lookupRoleStatus("group13").getResourceRequirements().getVirtualCores()); + + appState.refreshClusterStatus(); + checkRole(appState, "group11", "group1", opts); + checkRole(appState, "group12", "group1", opts); + checkRole(appState, "group13", "group1", opts); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java new file mode 100644 index 0000000..e8b83a8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.core.conf.ConfTree; +import org.apache.slider.core.conf.ConfTreeOperations; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockAppState; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.operations.ContainerRequestOperation; +import org.junit.Test; + +import java.util.List; + +/** + * Test the container resource allocation logic + */ +public class TestMockContainerResourceAllocations extends BaseMockAppStateTest { + + @Test + public void testNormalAllocations() throws Throwable { + ConfTree clusterSpec = factory.newConfTree(1, 0, 0); + ConfTreeOperations cto = new ConfTreeOperations(clusterSpec); + + cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_MEMORY, 512); + cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_CORES, 2); + appState.updateResourceDefinitions(clusterSpec); + List ops = appState.reviewRequestAndReleaseNodes(); + assertEquals(ops.size(), 1); + ContainerRequestOperation operation = (ContainerRequestOperation) ops.get(0); + Resource requirements = operation.getRequest().getCapability(); + assertEquals(requirements.getMemory(), 512); + assertEquals(requirements.getVirtualCores(), 2); + } + + @Test + public void testMaxMemAllocations() throws Throwable { + ConfTree clusterSpec = factory.newConfTree(1, 0, 0); + ConfTreeOperations cto = new ConfTreeOperations(clusterSpec); + + cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_MEMORY, + ResourceKeys.YARN_RESOURCE_MAX); + cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_CORES, 2); + appState.updateResourceDefinitions(clusterSpec); + List ops = appState.reviewRequestAndReleaseNodes(); + assertEquals(ops.size(), 1); + ContainerRequestOperation operation = (ContainerRequestOperation) ops.get(0); + Resource requirements = operation.getRequest().getCapability(); + assertEquals(requirements.getMemory(), MockAppState.RM_MAX_RAM); + assertEquals(requirements.getVirtualCores(), 2); + } + + @Test + public void testMaxCoreAllocations() throws Throwable { + ConfTree clusterSpec = factory.newConfTree(1, 0, 0); + ConfTreeOperations cto = new ConfTreeOperations(clusterSpec); + cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_MEMORY, + 512); + cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_CORES, + ResourceKeys.YARN_RESOURCE_MAX); + appState.updateResourceDefinitions(clusterSpec); + List ops = appState.reviewRequestAndReleaseNodes(); + assertEquals(ops.size(), 1); + ContainerRequestOperation operation = (ContainerRequestOperation) ops.get(0); + Resource requirements = operation.getRequest().getCapability(); + assertEquals(requirements.getMemory(), 512); + assertEquals(requirements.getVirtualCores(), MockAppState.RM_MAX_CORES); + } + + @Test + public void testMaxDefaultAllocations() throws Throwable { + + ConfTree clusterSpec = factory.newConfTree(1, 0, 0); + appState.updateResourceDefinitions(clusterSpec); + List ops = appState.reviewRequestAndReleaseNodes(); + assertEquals(ops.size(), 1); + ContainerRequestOperation operation = (ContainerRequestOperation) ops.get(0); + Resource requirements = operation.getRequest().getCapability(); + assertEquals(requirements.getMemory(), ResourceKeys.DEF_YARN_MEMORY); + assertEquals(requirements.getVirtualCores(), ResourceKeys.DEF_YARN_CORES); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockLabelledAAPlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockLabelledAAPlacement.java new file mode 100644 index 0000000..0fffe52 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockLabelledAAPlacement.java @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.model.mock.MockRoles; +import org.apache.slider.server.appmaster.model.mock.MockYarnEngine; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.appmaster.state.AppState.NodeUpdatedOutcome; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +/** + * Test Anti-affine placement + */ +public class TestMockLabelledAAPlacement extends BaseMockAppStateAATest + implements MockRoles { + + private int NODES = 3; + private int GPU_NODES = 2; + private String HOST0 = "00000000"; + private String HOST1 = "00000001"; + + + @Override + public void setup() throws Exception { + super.setup(); + // node 1 is GPU + + updateNodes(MockFactory.instance.newNodeReport(HOST0, NodeState.RUNNING, + LABEL_GPU)); + updateNodes(MockFactory.instance.newNodeReport(HOST1, NodeState.RUNNING, + LABEL_GPU)); + } + + @Override + public MockYarnEngine createYarnEngine() { + return new MockYarnEngine(NODES, 8); + } + + void assertAllContainersAA() { + assertAllContainersAA(gpuRole.getKey()); + } + + /** + * + * @throws Throwable + */ + @Test + public void testAskForTooMany() throws Throwable { + + describe("Ask for 1 more than the no of available nodes;" + + " expect the final request to be unsatisfied until the cluster " + + "changes size"); + //more than expected + int size = GPU_NODES; + gpuRole.setDesired(size + 1); + + List operations = appState.reviewRequestAndReleaseNodes(); + assertTrue(gpuRole.isAARequestOutstanding()); + + assertEquals(gpuRole.getPendingAntiAffineRequests(), size); + for (int i = 0; i < size; i++) { + String iter = "Iteration " + i + " role = " + aaRole; + describe(iter); + List operationsOut = new ArrayList<>(); + + List roleInstances = submitOperations(operations, + EMPTY_ID_LIST, operationsOut); + // one instance per request + assertEquals(1, roleInstances.size()); + appState.onNodeManagerContainerStarted(roleInstances.get(0) + .getContainerId()); + assertAllContainersAA(); + // there should be none left + log.debug(nodeInformationSnapshotAsString()); + operations = operationsOut; + if (i + 1 < size) { + assertEquals(2, operations.size()); + } else { + assertEquals(1, operations.size()); + } + } + // expect an outstanding AA request to be unsatisfied + assertTrue(gpuRole.getActual() < gpuRole.getDesired()); + assertEquals(0, gpuRole.getRequested()); + assertFalse(gpuRole.isAARequestOutstanding()); + List allocatedContainers = engine.execute(operations, EMPTY_ID_LIST); + assertEquals(0, allocatedContainers.size()); + // in a review now, no more requests can be generated, as there is no space for AA placements, + // even though there is cluster capacity + assertEquals(0, appState.reviewRequestAndReleaseNodes().size()); + + // switch node 2 into being labelled + NodeUpdatedOutcome outcome = updateNodes(MockFactory.instance. + newNodeReport("00000002", NodeState.RUNNING, "gpu")); + + assertEquals(NODES, cloneNodemap().size()); + assertTrue(outcome.clusterChanged); + // no active calls to empty + assertTrue(outcome.operations.isEmpty()); + assertEquals(1, appState.reviewRequestAndReleaseNodes().size()); + } + + protected AppState.NodeUpdatedOutcome addNewNode() { + return updateNodes(MockFactory.instance.newNodeReport("00000004", + NodeState.RUNNING, "gpu")); + } + + @Test + public void testClusterSizeChangesDuringRequestSequence() throws Throwable { + describe("Change the cluster size where the cluster size changes during " + + "a test sequence."); + gpuRole.setDesired(GPU_NODES + 1); + List operations = appState + .reviewRequestAndReleaseNodes(); + assertTrue(gpuRole.isAARequestOutstanding()); + assertEquals(GPU_NODES, gpuRole.getPendingAntiAffineRequests()); + NodeUpdatedOutcome outcome = addNewNode(); + assertTrue(outcome.clusterChanged); + // one call to cancel + assertEquals(1, outcome.operations.size()); + // and on a review, one more to rebuild + assertEquals(1, appState.reviewRequestAndReleaseNodes().size()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestOutstandingRequestValidation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestOutstandingRequestValidation.java new file mode 100644 index 0000000..e23b6dc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestOutstandingRequestValidation.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.appstate; + +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; +import org.apache.slider.server.appmaster.state.ContainerPriority; +import org.apache.slider.server.appmaster.state.OutstandingRequest; +import org.apache.slider.test.SliderTestBase; +import org.junit.Test; + +public class TestOutstandingRequestValidation extends SliderTestBase { + + final String[] H1 = hosts("one"); + + @Test + public void testRelaxedNohostsOrLabels() throws Throwable { + createAndValidate(null, null, true); + } + + @Test + public void testRelaxedLabels() throws Throwable { + createAndValidate(null, "gpu", true); + } + + @Test + public void testNonRelaxedLabels() throws Throwable { + expectCreationFailure(null, "gpu", false); + } + + @Test + public void testRelaxedHostNoLabel() throws Throwable { + createAndValidate(H1, "", true); + } + + /** + * Use varargs for simple list to array conversion + * @param hostnames host names + * @return + */ + public static String[] hosts(String...hostnames) { + return hostnames; + } + + void expectCreationFailure( + String[] hosts, + String labels, + boolean relaxLocality) { + try { + ContainerRequest result = createAndValidate(hosts, labels, relaxLocality); + fail("Expected an exception, got " + result); + } catch (IllegalArgumentException expected) { + assertTrue(expected.toString() + .contains("Can't turn off locality relaxation on a request with no " + + "location constraints")); + } + } + + + AMRMClient.ContainerRequest createAndValidate( + String[] hosts, + String labels, + boolean relaxLocality) { + int cores = 1; + int memory = 64; + int p = 1; + Priority pri = ContainerPriority.createPriority(p, !relaxLocality); + ContainerRequest issuedRequest = + newRequest(pri, hosts, labels, relaxLocality); + OutstandingRequest.validateContainerRequest(issuedRequest, p, ""); + return issuedRequest; + } + + AMRMClient.ContainerRequest newRequest( + Priority pri, + String[] hosts, + String labels, + boolean relaxLocality) { + int cores = 1; + int memory = 64; + Resource resource = Resource.newInstance(memory, cores); + return new AMRMClient.ContainerRequest(resource, + hosts, + null, + pri, + relaxLocality, + labels); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryAA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryAA.java new file mode 100644 index 0000000..d7f9bc6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryAA.java @@ -0,0 +1,266 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.history; + +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.slider.api.proto.Messages; +import org.apache.slider.api.types.NodeInformation; +import org.apache.slider.api.types.NodeInformationList; +import org.apache.slider.api.types.RestTypeMarshalling; +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.model.mock.MockRoleHistory; +import org.apache.slider.server.appmaster.state.NodeEntry; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.NodeMap; +import org.apache.slider.server.appmaster.state.RoleHistory; +import org.apache.slider.test.SliderTestBase; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Test anti-affine + */ +public class TestRoleHistoryAA extends SliderTestBase { + + List hostnames = Arrays.asList("1", "2", "3"); + NodeMap nodeMap, gpuNodeMap; + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + + public TestRoleHistoryAA() throws BadConfigException { + } + + + @Override + public void setup() throws Exception { + super.setup(); + nodeMap = createNodeMap(hostnames, NodeState.RUNNING, ""); + gpuNodeMap = createNodeMap(hostnames, NodeState.RUNNING, "GPU"); + } + + @Test + public void testFindNodesInFullCluster() throws Throwable { + // all three will surface at first + verifyResultSize(3, nodeMap.findAllNodesForRole(1, "")); + } + + @Test + public void testFindNodesInUnhealthyCluster() throws Throwable { + // all three will surface at first + markNodeOneUnhealthy(); + verifyResultSize(2, nodeMap.findAllNodesForRole(1, "")); + } + + public boolean markNodeOneUnhealthy() { + return setNodeState(nodeMap.get("1"), NodeState.UNHEALTHY); + } + + protected boolean setNodeState(NodeInstance node, NodeState state) { + return node.updateNode(MockFactory.instance.newNodeReport(node.hostname, + state, "")); + } + + @Test + public void testFindNoNodesWrongLabel() throws Throwable { + // all three will surface at first + verifyResultSize(0, nodeMap.findAllNodesForRole(1, "GPU")); + } + + @Test + public void testFindSomeNodesSomeLabel() throws Throwable { + // all three will surface at first + update(nodeMap, + Arrays.asList(MockFactory.instance.newNodeReport("1", NodeState.RUNNING, + "GPU"))); + List gpuNodes = nodeMap.findAllNodesForRole(1, "GPU"); + verifyResultSize(1, gpuNodes); + NodeInstance instance = gpuNodes.get(0); + instance.getOrCreate(1).onStarting(); + assertFalse(instance.canHost(1, "GPU")); + assertFalse(instance.canHost(1, "")); + verifyResultSize(0, nodeMap.findAllNodesForRole(1, "GPU")); + + } + + @Test + public void testFindNoNodesRightLabel() throws Throwable { + // all three will surface at first + verifyResultSize(3, gpuNodeMap.findAllNodesForRole(1, "GPU")); + } + + @Test + public void testFindNoNodesNoLabel() throws Throwable { + // all three will surface at first + verifyResultSize(3, gpuNodeMap.findAllNodesForRole(1, "")); + } + + @Test + public void testFindNoNodesClusterRequested() throws Throwable { + // all three will surface at first + for (NodeInstance ni : nodeMap.values()) { + ni.getOrCreate(1).request(); + } + assertNoAvailableNodes(1); + } + + @Test + public void testFindNoNodesClusterBusy() throws Throwable { + // all three will surface at first + for (NodeInstance ni : nodeMap.values()) { + ni.getOrCreate(1).request(); + } + assertNoAvailableNodes(1); + } + + /** + * Tag all nodes as starting, then walk one through a bit + * more of its lifecycle + */ + @Test + public void testFindNoNodesLifecycle() throws Throwable { + // all three will surface at first + for (NodeInstance ni : nodeMap.values()) { + ni.getOrCreate(1).onStarting(); + } + assertNoAvailableNodes(1); + + // walk one of the nodes through the lifecycle + NodeInstance node1 = nodeMap.get("1"); + assertFalse(node1.canHost(1,"")); + node1.get(1).onStartCompleted(); + assertFalse(node1.canHost(1,"")); + assertNoAvailableNodes(1); + node1.get(1).release(); + assertTrue(node1.canHost(1,"")); + List list2 = + verifyResultSize(1, nodeMap.findAllNodesForRole(1, "")); + assertEquals(list2.get(0).hostname, "1"); + + // now tag that node as unhealthy and expect it to go away + markNodeOneUnhealthy(); + assertNoAvailableNodes(1); + } + + @Test + public void testRolesIndependent() throws Throwable { + NodeInstance node1 = nodeMap.get("1"); + NodeEntry role1 = node1.getOrCreate(1); + NodeEntry role2 = node1.getOrCreate(2); + for (NodeInstance ni : nodeMap.values()) { + ni.updateNode(MockFactory.instance.newNodeReport("0", NodeState + .UNHEALTHY, "")); + } + assertNoAvailableNodes(1); + assertNoAvailableNodes(2); + assertTrue(setNodeState(node1, NodeState.RUNNING)); + // tag role 1 as busy + role1.onStarting(); + assertNoAvailableNodes(1); + + verifyResultSize(1, nodeMap.findAllNodesForRole(2, "")); + assertTrue(node1.canHost(2,"")); + } + + @Test + public void testNodeEntryAvailablity() throws Throwable { + NodeEntry entry = new NodeEntry(1); + assertTrue(entry.isAvailable()); + entry.onStarting(); + assertFalse(entry.isAvailable()); + entry.onStartCompleted(); + assertFalse(entry.isAvailable()); + entry.release(); + assertTrue(entry.isAvailable()); + entry.onStarting(); + assertFalse(entry.isAvailable()); + entry.onStartFailed(); + assertTrue(entry.isAvailable()); + } + + @Test + public void testNodeInstanceSerialization() throws Throwable { + MockRoleHistory rh2 = new MockRoleHistory(new ArrayList<>()); + rh2.getOrCreateNodeInstance("localhost"); + NodeInstance instance = rh2.getOrCreateNodeInstance("localhost"); + instance.getOrCreate(1).onStartCompleted(); + Map naming = Collections.singletonMap(1, "manager"); + NodeInformation ni = instance.serialize(naming); + assertEquals(1, ni.entries.get("manager").live); + NodeInformation ni2 = rh2.getNodeInformation("localhost", naming); + assertEquals(1, ni2.entries.get("manager").live); + Map info = rh2.getNodeInformationSnapshot(naming); + assertEquals(1, info.get("localhost").entries.get("manager").live); + NodeInformationList nil = new NodeInformationList(info.values()); + assertEquals(1, nil.get(0).entries.get("manager").live); + + Messages.NodeInformationProto nodeInformationProto = + RestTypeMarshalling.marshall(ni); + Messages.NodeEntryInformationProto entryProto = nodeInformationProto + .getEntries(0); + assertNotNull(entryProto); + assertEquals(1, entryProto.getPriority()); + NodeInformation unmarshalled = + RestTypeMarshalling.unmarshall(nodeInformationProto); + assertEquals(unmarshalled.hostname, ni.hostname); + assertTrue(unmarshalled.entries.keySet().containsAll(ni.entries.keySet())); + + } + + @Test + public void testBuildRolenames() throws Throwable { + + } + public List assertNoAvailableNodes(int role) { + String label = ""; + return verifyResultSize(0, nodeMap.findAllNodesForRole(role, label)); + } + + List verifyResultSize(int size, List list) { + if (list.size() != size) { + for (NodeInstance ni : list) { + log.error(ni.toFullString()); + } + } + assertEquals(size, list.size()); + return list; + } + + NodeMap createNodeMap(List nodeReports) + throws BadConfigException { + NodeMap nodeMap = new NodeMap(1); + update(nodeMap, nodeReports); + return nodeMap; + } + + protected boolean update(NodeMap nodeMap, List nodeReports) { + return nodeMap.buildOrUpdate(nodeReports); + } + + NodeMap createNodeMap(List hosts, NodeState state, + String label) throws BadConfigException { + return createNodeMap(MockFactory.instance.createNodeReports(hosts, state, + label)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryContainerEvents.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryContainerEvents.java new file mode 100644 index 0000000..4f3bef8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryContainerEvents.java @@ -0,0 +1,446 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.history; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockContainer; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.model.mock.MockNodeId; +import org.apache.slider.server.appmaster.state.ContainerOutcome; +import org.apache.slider.server.appmaster.state.ContainerPriority; +import org.apache.slider.server.appmaster.state.NodeEntry; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.NodeMap; +import org.apache.slider.server.appmaster.state.RoleHistory; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Test container events at the role history level -one below + * the App State + */ +public class TestRoleHistoryContainerEvents extends BaseMockAppStateTest { + + @Override + public String getTestName() { + return "TestRoleHistoryContainerEvents"; + } + + NodeInstance age1Active4 = nodeInstance(1, 4, 0, 0); + NodeInstance age2Active2 = nodeInstance(2, 2, 0, 1); + NodeInstance age3Active0 = nodeInstance(3, 0, 0, 0); + NodeInstance age4Active1 = nodeInstance(4, 1, 0, 0); + NodeInstance age2Active0 = nodeInstance(2, 0, 0, 0); + NodeInstance empty = new NodeInstance("empty", MockFactory.ROLE_COUNT); + + String roleName = "test"; + + List nodes = Arrays.asList(age2Active2, age2Active0, + age4Active1, age1Active4, age3Active0); + RoleHistory roleHistory; + + Resource resource; + + AMRMClient.ContainerRequest requestContainer(RoleStatus roleStatus) { + return roleHistory.requestContainerForRole(roleStatus).getIssuedRequest(); + } + + @Override + public void setup() throws Exception { + super.setup(); + roleHistory = appState.getRoleHistory(); + roleHistory.insert(nodes); + roleHistory.buildRecentNodeLists(); + resource = Resource.newInstance(ResourceKeys.DEF_YARN_CORES, + ResourceKeys.DEF_YARN_MEMORY); + } + + @Test + public void testFindAndCreate() throws Throwable { + int role = 0; + RoleStatus roleStatus = appState.lookupRoleStatus(role); + + AMRMClient.ContainerRequest request = + requestContainer(roleStatus); + + List nodes = request.getNodes(); + assertNotNull(nodes); + assertEquals(1, nodes.size()); + String hostname = nodes.get(0); + assertEquals(hostname, age3Active0.hostname); + + //build a container + MockContainer container = factory.newContainer(); + container.setNodeId(new MockNodeId(hostname, 0)); + container.setPriority(request.getPriority()); + roleHistory.onContainerAssigned(container); + + NodeMap nodemap = roleHistory.cloneNodemap(); + NodeInstance allocated = nodemap.get(hostname); + NodeEntry roleEntry = allocated.get(role); + assertEquals(1, roleEntry.getStarting()); + assertFalse(roleEntry.isAvailable()); + RoleInstance ri = new RoleInstance(container); + //start it + roleHistory.onContainerStartSubmitted(container, ri); + //later, declare that it started + roleHistory.onContainerStarted(container); + assertEquals(0, roleEntry.getStarting()); + assertFalse(roleEntry.isAvailable()); + assertEquals(1, roleEntry.getActive()); + assertEquals(1, roleEntry.getLive()); + } + + @Test + public void testCreateAndRelease() throws Throwable { + int role = 1; + RoleStatus roleStatus = appState.lookupRoleStatus(role); + + //verify it is empty + assertTrue(roleHistory.listActiveNodes(role).isEmpty()); + + AMRMClient.ContainerRequest request = + requestContainer(roleStatus); + + assertNull(request.getNodes()); + + //pick an idle host + String hostname = age3Active0.hostname; + + //build a container + MockContainer container = factory.newContainer(new MockNodeId(hostname, + 0), request.getPriority()); + roleHistory.onContainerAssigned(container); + + NodeMap nodemap = roleHistory.cloneNodemap(); + NodeInstance allocated = nodemap.get(hostname); + NodeEntry roleEntry = allocated.get(role); + assertEquals(1, roleEntry.getStarting()); + assertFalse(roleEntry.isAvailable()); + RoleInstance ri = new RoleInstance(container); + //start it + roleHistory.onContainerStartSubmitted(container, ri); + //later, declare that it started + roleHistory.onContainerStarted(container); + assertEquals(0, roleEntry.getStarting()); + assertFalse(roleEntry.isAvailable()); + assertEquals(1, roleEntry.getActive()); + assertEquals(1, roleEntry.getLive()); + + // now pick that instance to destroy + List activeNodes = roleHistory.listActiveNodes(role); + + + assertEquals(1, activeNodes.size()); + NodeInstance target = activeNodes.get(0); + assertEquals(target, allocated); + roleHistory.onContainerReleaseSubmitted(container); + assertEquals(1, roleEntry.getReleasing()); + assertEquals(1, roleEntry.getLive()); + assertEquals(0, roleEntry.getActive()); + + // release completed + roleHistory.onReleaseCompleted(container); + assertEquals(0, roleEntry.getReleasing()); + assertEquals(0, roleEntry.getLive()); + assertEquals(0, roleEntry.getActive()); + + // verify it is empty + assertTrue(roleHistory.listActiveNodes(role).isEmpty()); + + // ask for a container and expect to get the recently released one + AMRMClient.ContainerRequest request2 = + requestContainer(roleStatus); + + List nodes2 = request2.getNodes(); + assertNotNull(nodes2); + String hostname2 = nodes2.get(0); + + //pick an idle host + assertEquals(hostname2, age3Active0.hostname); + } + + + @Test + public void testStartWithoutWarning() throws Throwable { + int role = 0; + //pick an idle host + String hostname = age3Active0.hostname; + //build a container + MockContainer container = factory.newContainer( + new MockNodeId(hostname, 0), + ContainerPriority.createPriority(0, false)); + + NodeMap nodemap = roleHistory.cloneNodemap(); + NodeInstance allocated = nodemap.get(hostname); + NodeEntry roleEntry = allocated.get(role); + + //tell RH that it started + roleHistory.onContainerStarted(container); + assertEquals(0, roleEntry.getStarting()); + assertFalse(roleEntry.isAvailable()); + assertEquals(1, roleEntry.getActive()); + assertEquals(1, roleEntry.getLive()); + } + + @Test + public void testStartFailed() throws Throwable { + int role = 0; + RoleStatus roleStatus = appState.lookupRoleStatus(role); + + AMRMClient.ContainerRequest request = + requestContainer(roleStatus); + + String hostname = request.getNodes().get(0); + assertEquals(hostname, age3Active0.hostname); + + //build a container + MockContainer container = factory.newContainer(new MockNodeId(hostname, + 0), request.getPriority()); + roleHistory.onContainerAssigned(container); + + NodeMap nodemap = roleHistory.cloneNodemap(); + NodeInstance allocated = nodemap.get(hostname); + NodeEntry roleEntry = allocated.get(role); + assertEquals(1, roleEntry.getStarting()); + assertFalse(roleEntry.isAvailable()); + RoleInstance ri = new RoleInstance(container); + //start it + roleHistory.onContainerStartSubmitted(container, ri); + //later, declare that it failed on startup + assertFalse(roleHistory.onNodeManagerContainerStartFailed(container)); + assertEquals(0, roleEntry.getStarting()); + assertEquals(1, roleEntry.getStartFailed()); + assertEquals(1, roleEntry.getFailed()); + assertTrue(roleEntry.isAvailable()); + assertEquals(0, roleEntry.getActive()); + assertEquals(0, roleEntry.getLive()); + } + + @Test + public void testStartFailedWithoutWarning() throws Throwable { + int role = 0; + RoleStatus roleStatus = appState.lookupRoleStatus(role); + + AMRMClient.ContainerRequest request = + requestContainer(roleStatus); + + String hostname = request.getNodes().get(0); + assertEquals(hostname, age3Active0.hostname); + + //build a container + MockContainer container = factory.newContainer(); + container.setNodeId(new MockNodeId(hostname, 0)); + container.setPriority(request.getPriority()); + + NodeMap nodemap = roleHistory.cloneNodemap(); + NodeInstance allocated = nodemap.get(hostname); + NodeEntry roleEntry = allocated.get(role); + + assertFalse(roleHistory.onNodeManagerContainerStartFailed(container)); + assertEquals(0, roleEntry.getStarting()); + assertEquals(1, roleEntry.getStartFailed()); + assertEquals(1, roleEntry.getFailed()); + assertTrue(roleEntry.isAvailable()); + assertEquals(0, roleEntry.getActive()); + assertEquals(0, roleEntry.getLive()); + } + + @Test + public void testContainerFailed() throws Throwable { + describe("fail a container without declaring it as starting"); + + int role = 0; + RoleStatus roleStatus = appState.lookupRoleStatus(role); + + AMRMClient.ContainerRequest request = + requestContainer(roleStatus); + + String hostname = request.getNodes().get(0); + assertEquals(hostname, age3Active0.hostname); + + //build a container + MockContainer container = factory.newContainer(); + container.setNodeId(new MockNodeId(hostname, 0)); + container.setPriority(request.getPriority()); + roleHistory.onContainerAssigned(container); + + NodeMap nodemap = roleHistory.cloneNodemap(); + NodeInstance allocated = nodemap.get(hostname); + NodeEntry roleEntry = allocated.get(role); + assertEquals(1, roleEntry.getStarting()); + assertFalse(roleEntry.isAvailable()); + RoleInstance ri = new RoleInstance(container); + //start it + roleHistory.onContainerStartSubmitted(container, ri); + roleHistory.onContainerStarted(container); + + //later, declare that it failed + roleHistory.onFailedContainer( + container, + false, + ContainerOutcome.Failed); + assertEquals(0, roleEntry.getStarting()); + assertTrue(roleEntry.isAvailable()); + assertEquals(0, roleEntry.getActive()); + assertEquals(0, roleEntry.getLive()); + } + + @Test + public void testContainerFailedWithoutWarning() throws Throwable { + describe( "fail a container without declaring it as starting"); + int role = 0; + RoleStatus roleStatus = appState.lookupRoleStatus(role); + + AMRMClient.ContainerRequest request = + requestContainer(roleStatus); + + String hostname = request.getNodes().get(0); + assertEquals(hostname, age3Active0.hostname); + + //build a container + MockContainer container = factory.newContainer(); + container.setNodeId(new MockNodeId(hostname, 0)); + container.setPriority(request.getPriority()); + + + NodeMap nodemap = roleHistory.cloneNodemap(); + NodeInstance allocated = nodemap.get(hostname); + NodeEntry roleEntry = allocated.get(role); + assertTrue(roleEntry.isAvailable()); + roleHistory.onFailedContainer( + container, + false, + ContainerOutcome.Failed); + assertEquals(0, roleEntry.getStarting()); + assertEquals(1, roleEntry.getFailed()); + assertTrue(roleEntry.isAvailable()); + assertEquals(0, roleEntry.getActive()); + assertEquals(0, roleEntry.getLive()); + } + + @Test + public void testAllocationListPrep() throws Throwable { + describe("test prepareAllocationList"); + int role = 0; + RoleStatus roleStatus = appState.lookupRoleStatus(role); + + AMRMClient.ContainerRequest request = + requestContainer(roleStatus); + + String hostname = request.getNodes().get(0); + assertEquals(hostname, age3Active0.hostname); + + MockContainer container1 = factory.newContainer(); + container1.setNodeId(new MockNodeId(hostname, 0)); + container1.setPriority(Priority.newInstance(0)); + + MockContainer container2 = factory.newContainer(); + container2.setNodeId(new MockNodeId(hostname, 0)); + container2.setPriority(Priority.newInstance(1)); + + // put containers in List with role == 1 first + List containers = Arrays.asList((Container) container2, + (Container) container1); + List sortedContainers = roleHistory.prepareAllocationList( + containers); + + // verify that the first container has role == 0 after sorting + MockContainer c1 = (MockContainer) sortedContainers.get(0); + assertEquals(0, c1.getPriority().getPriority()); + MockContainer c2 = (MockContainer) sortedContainers.get(1); + assertEquals(1, c2.getPriority().getPriority()); + } + + @Test + public void testNodeUpdated() throws Throwable { + describe("fail a node"); + + int role = 0; + RoleStatus roleStatus = appState.lookupRoleStatus(role); + + AMRMClient.ContainerRequest request = + roleHistory.requestContainerForRole(roleStatus).getIssuedRequest(); + + String hostname = request.getNodes().get(0); + assertEquals(age3Active0.hostname, hostname); + + // build a container + MockContainer container = factory.newContainer(new MockNodeId(hostname, + 0), request.getPriority()); + + roleHistory.onContainerAssigned(container); + + NodeMap nodemap = roleHistory.cloneNodemap(); + NodeInstance allocated = nodemap.get(hostname); + NodeEntry roleEntry = allocated.get(role); + assertEquals(1, roleEntry.getStarting()); + assertFalse(roleEntry.isAvailable()); + RoleInstance ri = new RoleInstance(container); + // start it + roleHistory.onContainerStartSubmitted(container, ri); + roleHistory.onContainerStarted(container); + + int startSize = nodemap.size(); + + // now send a list of updated (failed) nodes event + List nodesUpdated = new ArrayList<>(); + NodeReport nodeReport = NodeReport.newInstance( + NodeId.newInstance(hostname, 0), + NodeState.LOST, + null, null, null, null, 1, null, 0); + nodesUpdated.add(nodeReport); + roleHistory.onNodesUpdated(nodesUpdated); + + nodemap = roleHistory.cloneNodemap(); + int endSize = nodemap.size(); + // as even unused nodes are added to the list, we expect the map size to be >1 + assertTrue(startSize <= endSize); + assertNotNull(nodemap.get(hostname)); + assertFalse(nodemap.get(hostname).isOnline()); + + // add a failure of a node we've never head of + String newhost = "newhost"; + nodesUpdated = Arrays.asList( + NodeReport.newInstance( + NodeId.newInstance(newhost, 0), + NodeState.LOST, + null, null, null, null, 1, null, 0) + ); + roleHistory.onNodesUpdated(nodesUpdated); + + NodeMap nodemap2 = roleHistory.cloneNodemap(); + assertNotNull(nodemap2.get(newhost)); + assertFalse(nodemap2.get(newhost).isOnline()); + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryFindNodesForNewInstances.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryFindNodesForNewInstances.java new file mode 100644 index 0000000..a7a9134 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryFindNodesForNewInstances.java @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.history; + +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.providers.ProviderRole; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.model.mock.MockRoleHistory; +import org.apache.slider.server.appmaster.state.ContainerOutcome; +import org.apache.slider.server.appmaster.state.NodeEntry; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.RoleHistory; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Testing finding nodes for new instances. + * + * This stresses the non-AA codepath + */ +public class TestRoleHistoryFindNodesForNewInstances extends BaseMockAppStateTest { + + public TestRoleHistoryFindNodesForNewInstances() throws BadConfigException { + } + + @Override + public String getTestName() { + return "TestFindNodesForNewInstances"; + } + + NodeInstance age1Active4 = nodeInstance(1, 4, 0, 0); + NodeInstance age2Active2 = nodeInstance(2, 2, 0, 1); + NodeInstance age3Active0 = nodeInstance(3, 0, 0, 0); + NodeInstance age4Active1 = nodeInstance(4, 1, 0, 0); + NodeInstance age2Active0 = nodeInstance(2, 0, 0, 0); + NodeInstance empty = new NodeInstance("empty", MockFactory.ROLE_COUNT); + + List nodes = Arrays.asList(age2Active2, age2Active0, + age4Active1, age1Active4, age3Active0); + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + + String roleName = "test"; + RoleStatus roleStat = new RoleStatus(new ProviderRole(roleName, 0)); + RoleStatus roleStat2 = new RoleStatus(new ProviderRole(roleName, 2)); + + @Before + public void setupNodeMap() { + roleHistory.insert(nodes); + roleHistory.buildRecentNodeLists(); + } + + public List findNodes(int count) { + return findNodes(count, roleStat); + } + + public List findNodes(int count, RoleStatus roleStatus) { + List found = new ArrayList<>(); + for (int i = 0; i < count; i++) { + NodeInstance f = roleHistory.findRecentNodeForNewInstance(roleStatus); + if (f != null) { + found.add(f); + }; + } + return found; + } + + @Test + public void testFind1NodeR0() throws Throwable { + NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat); + log.info("found: {}", found); + assertTrue(Arrays.asList(age3Active0).contains(found)); + } + + @Test + public void testFind2NodeR0() throws Throwable { + NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat); + log.info("found: {}", found); + assertTrue(Arrays.asList(age2Active0, age3Active0).contains(found)); + NodeInstance found2 = roleHistory.findRecentNodeForNewInstance(roleStat); + log.info("found: {}", found2); + assertTrue(Arrays.asList(age2Active0, age3Active0).contains(found2)); + assertNotEquals(found, found2); + } + + @Test + public void testFind3NodeR0ReturnsNull() throws Throwable { + assertEquals(2, findNodes(2).size()); + NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat); + assertNull(found); + } + + @Test + public void testFindNodesOneEntry() throws Throwable { + List nodes = findNodes(4, roleStat2); + assertEquals(0, nodes.size()); + } + + @Test + public void testFindNodesIndependent() throws Throwable { + assertEquals(2, findNodes(2).size()); + roleHistory.dump(); + assertEquals(0, findNodes(3, roleStat2).size()); + } + + @Test + public void testFindNodesFallsBackWhenUsed() throws Throwable { + // mark age2 and active 0 as busy, expect a null back + age2Active0.get(0).onStartCompleted(); + assertNotEquals(0, age2Active0.getActiveRoleInstances(0)); + age3Active0.get(0).onStartCompleted(); + assertNotEquals(0, age3Active0.getActiveRoleInstances(0)); + NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat); + if (found != null) { + log.info(found.toFullString()); + } + assertNull(found); + } + @Test + public void testFindNodesSkipsFailingNode() throws Throwable { + // mark age2 and active 0 as busy, expect a null back + + NodeEntry entry0 = age2Active0.get(0); + entry0.containerCompleted( + false, + ContainerOutcome.Failed); + assertTrue(entry0.getFailed() > 0); + assertTrue(entry0.getFailedRecently() > 0); + entry0.containerCompleted( + false, + ContainerOutcome.Failed); + assertFalse(age2Active0.exceedsFailureThreshold(roleStat)); + // set failure to 1 + roleStat.getProviderRole().nodeFailureThreshold = 1; + // threshold is now exceeded + assertTrue(age2Active0.exceedsFailureThreshold(roleStat)); + + // get the role & expect age3 to be picked up, even though it is older + NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat); + assertEquals(age3Active0, found); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryNIComparators.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryNIComparators.java new file mode 100644 index 0000000..d595dfe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryNIComparators.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.history; + +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.junit.Before; +import org.junit.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +/** + * Unit test to verify the comparators sort as expected + */ +public class TestRoleHistoryNIComparators extends BaseMockAppStateTest { + + NodeInstance age1Active4 = nodeInstance(1001, 4, 0, 0); + NodeInstance age2Active2 = nodeInstance(1002, 2, 0, 0); + NodeInstance age3Active0 = nodeInstance(1003, 0, 0, 0); + NodeInstance age4Active1 = nodeInstance(1004, 1, 0, 0); + NodeInstance empty = new NodeInstance("empty", MockFactory.ROLE_COUNT); + NodeInstance age6failing = nodeInstance(1006, 0, 0, 0); + NodeInstance age1failing = nodeInstance(1001, 0, 0, 0); + + List nodes = Arrays.asList(age2Active2, age4Active1, + age1Active4, age3Active0); + List nodesPlusEmpty = Arrays.asList(age2Active2, age4Active1, + age1Active4, age3Active0, empty); + List allnodes = Arrays.asList(age6failing, age2Active2, + age4Active1, age1Active4, age3Active0, age1failing); + + @Before + public void setup() { + age6failing.get(0).setFailedRecently(2); + age1failing.get(0).setFailedRecently(1); + } + + @Override + public String getTestName() { + return "TestNIComparators"; + } + + @Test + public void testPreferred() throws Throwable { + Collections.sort(nodes, new NodeInstance.Preferred(0)); + assertListEquals(nodes, Arrays.asList(age4Active1, age3Active0, + age2Active2, age1Active4)); + } + + /** + * The preferred sort still includes failures; up to next phase in process + * to handle that + * @throws Throwable + */ + @Test + public void testPreferredWithFailures() throws Throwable { + Collections.sort(allnodes, new NodeInstance.Preferred(0)); + assertEquals(allnodes.get(0), age6failing); + assertEquals(allnodes.get(1), age4Active1); + } + + @Test + public void testPreferredComparatorDowngradesFailures() throws Throwable { + NodeInstance.Preferred preferred = new NodeInstance.Preferred(0); + assertEquals(-1, preferred.compare(age6failing, age1failing)); + assertEquals(1, preferred.compare(age1failing, age6failing)); + } + + @Test + public void testNewerThanNoRole() throws Throwable { + Collections.sort(nodesPlusEmpty, new NodeInstance.Preferred(0)); + assertListEquals(nodesPlusEmpty, Arrays.asList(age4Active1, age3Active0, + age2Active2, age1Active4, empty)); + } + + @Test + public void testMoreActiveThan() throws Throwable { + + Collections.sort(nodes, new NodeInstance.MoreActiveThan(0)); + assertListEquals(nodes, Arrays.asList(age1Active4, age2Active2, + age4Active1, age3Active0)); + } + + @Test + public void testMoreActiveThanEmpty() throws Throwable { + + Collections.sort(nodesPlusEmpty, new NodeInstance.MoreActiveThan(0)); + assertListEquals(nodesPlusEmpty, Arrays.asList(age1Active4, age2Active2, + age4Active1, age3Active0, empty)); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java new file mode 100644 index 0000000..7c853fe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java @@ -0,0 +1,374 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.history; + +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; +import org.apache.hadoop.yarn.util.resource.Resources; +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.common.tools.SliderUtils; +import org.apache.slider.core.conf.ConfTreeOperations; +import org.apache.slider.providers.PlacementPolicy; +import org.apache.slider.providers.ProviderRole; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockAppState; +import org.apache.slider.server.appmaster.model.mock.MockContainer; +import org.apache.slider.server.appmaster.model.mock.MockNodeId; +import org.apache.slider.server.appmaster.model.mock.MockPriority; +import org.apache.slider.server.appmaster.model.mock.MockResource; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.operations.CancelSingleRequest; +import org.apache.slider.server.appmaster.operations.ContainerRequestOperation; +import org.apache.slider.server.appmaster.state.AppStateBindingInfo; +import org.apache.slider.server.appmaster.state.ContainerAllocationOutcome; +import org.apache.slider.server.appmaster.state.ContainerAllocationResults; +import org.apache.slider.server.appmaster.state.ContainerPriority; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.OutstandingRequest; +import org.apache.slider.server.appmaster.state.OutstandingRequestTracker; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class TestRoleHistoryOutstandingRequestTracker extends BaseMockAppStateTest { + + public static final String WORKERS_LABEL = "workers"; + NodeInstance host1 = new NodeInstance("host1", 3); + NodeInstance host2 = new NodeInstance("host2", 3); + MockResource resource = factory.newResource(48, 1); + + OutstandingRequestTracker tracker = new OutstandingRequestTracker(); + + public static final ProviderRole WORKER = new ProviderRole( + "worker", + 5, + PlacementPolicy.NONE, + 2, + 1, + WORKERS_LABEL); + + @Override + public AppStateBindingInfo buildBindingInfo() { + AppStateBindingInfo bindingInfo = super.buildBindingInfo(); + bindingInfo.roles.add(WORKER); + return bindingInfo; + } + + @Test + public void testAddRetrieveEntry() throws Throwable { + OutstandingRequest request = tracker.newRequest(host1, 0); + assertEquals(tracker.lookupPlacedRequest(0, "host1"), request); + assertEquals(tracker.removePlacedRequest(request), request); + assertNull(tracker.lookupPlacedRequest(0, "host1")); + } + + @Test + public void testAddCompleteEntry() throws Throwable { + OutstandingRequest req1 = tracker.newRequest(host1, 0); + req1.buildContainerRequest(resource, getRole0Status(), 0); + + tracker.newRequest(host2, 0).buildContainerRequest(resource, + getRole0Status(), 0); + tracker.newRequest(host1, 1).buildContainerRequest(resource, + getRole0Status(), 0); + + ContainerAllocationResults allocation = tracker.onContainerAllocated(1, + "host1", null); + assertEquals(allocation.outcome, ContainerAllocationOutcome.Placed); + assertTrue(allocation.operations.get(0) instanceof CancelSingleRequest); + + assertNull(tracker.lookupPlacedRequest(1, "host1")); + assertNotNull(tracker.lookupPlacedRequest(0, "host1")); + } + + @Test + public void testResetOpenRequests() throws Throwable { + OutstandingRequest req1 = tracker.newRequest(null, 0); + assertFalse(req1.isLocated()); + tracker.newRequest(host1, 0); + List openRequests = tracker.listOpenRequests(); + assertEquals(1, openRequests.size()); + tracker.resetOutstandingRequests(0); + assertTrue(tracker.listOpenRequests().isEmpty()); + assertTrue(tracker.listPlacedRequests().isEmpty()); + } + + @Test + public void testRemoveOpenRequestUnissued() throws Throwable { + OutstandingRequest req1 = tracker.newRequest(null, 0); + req1.buildContainerRequest(resource, getRole0Status(), 0); + assertEquals(1, tracker.listOpenRequests().size()); + MockContainer c1 = factory.newContainer(null, new MockPriority(0)); + c1.setResource(resource); + + ContainerAllocationResults allocation = + tracker.onContainerAllocated(0, "host1", c1); + ContainerAllocationOutcome outcome = allocation.outcome; + assertEquals(outcome, ContainerAllocationOutcome.Unallocated); + assertTrue(allocation.operations.isEmpty()); + assertEquals(1, tracker.listOpenRequests().size()); + } + + @Test + public void testIssuedOpenRequest() throws Throwable { + OutstandingRequest req1 = tracker.newRequest(null, 0); + req1.buildContainerRequest(resource, getRole0Status(), 0); + assertEquals(1, tracker.listOpenRequests().size()); + + int pri = ContainerPriority.buildPriority(0, false); + assertTrue(pri > 0); + MockNodeId nodeId = factory.newNodeId("hostname-1"); + MockContainer c1 = factory.newContainer(nodeId, new MockPriority(pri)); + + c1.setResource(resource); + + ContainerRequest issued = req1.getIssuedRequest(); + assertEquals(issued.getCapability(), resource); + assertEquals(issued.getPriority().getPriority(), c1.getPriority() + .getPriority()); + assertTrue(req1.resourceRequirementsMatch(resource)); + + ContainerAllocationResults allocation = + tracker.onContainerAllocated(0, nodeId.getHost(), c1); + assertEquals(0, tracker.listOpenRequests().size()); + assertTrue(allocation.operations.get(0) instanceof CancelSingleRequest); + + assertEquals(allocation.outcome, ContainerAllocationOutcome.Open); + assertEquals(allocation.origin, req1); + } + + @Test + public void testResetEntries() throws Throwable { + tracker.newRequest(host1, 0); + tracker.newRequest(host2, 0); + tracker.newRequest(host1, 1); + List canceled = tracker.resetOutstandingRequests(0); + assertEquals(2, canceled.size()); + assertTrue(canceled.contains(host1)); + assertTrue(canceled.contains(host2)); + assertNotNull(tracker.lookupPlacedRequest(1, "host1")); + assertNull(tracker.lookupPlacedRequest(0, "host1")); + canceled = tracker.resetOutstandingRequests(0); + assertEquals(0, canceled.size()); + assertEquals(1, tracker.resetOutstandingRequests(1).size()); + } + + @Test + public void testEscalation() throws Throwable { + + // first request: default placement + assertEquals(getRole0Status().getPlacementPolicy(), PlacementPolicy.DEFAULT); + Resource res0 = newResource(getRole0Status()); + OutstandingRequest outstanding0 = tracker.newRequest(host1, + getRole0Status().getKey()); + ContainerRequest initialRequest = + outstanding0.buildContainerRequest(res0, getRole0Status + (), 0); + assertNotNull(outstanding0.getIssuedRequest()); + assertTrue(outstanding0.isLocated()); + assertFalse(outstanding0.isEscalated()); + assertFalse(initialRequest.getRelaxLocality()); + assertEquals(1, tracker.listPlacedRequests().size()); + + // second. This one doesn't get launched. This is to verify that the escalation + // process skips entries which are in the list but have not been issued. + // ...which can be a race condition between request issuance & escalation. + // (not one observed outside test authoring, but retained for completeness) + Resource res2 = newResource(getRole2Status()); + OutstandingRequest outstanding2 = tracker.newRequest(host1, + getRole2Status().getKey()); + + // simulate some time escalation of role 1 MUST now be triggered + long interval = getRole0Status().getPlacementTimeoutSeconds() * 1000 + 500; + long now = interval; + final List escalations = tracker + .escalateOutstandingRequests(now); + + assertTrue(outstanding0.isEscalated()); + assertFalse(outstanding2.isEscalated()); + + // two entries + assertEquals(2, escalations.size()); + AbstractRMOperation e1 = escalations.get(0); + assertTrue(e1 instanceof CancelSingleRequest); + final CancelSingleRequest cancel = (CancelSingleRequest) e1; + assertEquals(initialRequest, cancel.getRequest()); + AbstractRMOperation e2 = escalations.get(1); + assertTrue(e2 instanceof ContainerRequestOperation); + ContainerRequestOperation escRequest = (ContainerRequestOperation) e2; + assertTrue(escRequest.getRequest().getRelaxLocality()); + + // build that second request from an anti-affine entry + // these get placed as well + now += interval; + ContainerRequest containerReq2 = + outstanding2.buildContainerRequest(res2, getRole2Status(), now); + // escalate a little bit more + final List escalations2 = tracker + .escalateOutstandingRequests(now); + // and expect no new entries + assertEquals(0, escalations2.size()); + + // go past the role2 timeout + now += getRole2Status().getPlacementTimeoutSeconds() * 1000 + 500; + // escalate a little bit more + final List escalations3 = tracker + .escalateOutstandingRequests(now); + // and expect another escalation + assertEquals(2, escalations3.size()); + assertTrue(outstanding2.isEscalated()); + + // finally add a strict entry to the mix + Resource res3 = newResource(getRole1Status()); + OutstandingRequest outstanding3 = tracker.newRequest(host1, + getRole1Status().getKey()); + + final ProviderRole providerRole1 = getRole1Status().getProviderRole(); + assertEquals(providerRole1.placementPolicy, PlacementPolicy.STRICT); + now += interval; + assertFalse(outstanding3.mayEscalate()); + final List escalations4 = tracker + .escalateOutstandingRequests(now); + assertTrue(escalations4.isEmpty()); + + } + + /** + * If the placement does include a label, the initial request must + * not include it. + * The escalation request will contain the label, while + * leaving out the node list. + * retains the node list, but sets relaxLocality==true + * @throws Throwable + */ + @Test + public void testRequestLabelledPlacement() throws Throwable { + NodeInstance ni = new NodeInstance("host1", 0); + OutstandingRequest req1 = tracker.newRequest(ni, 0); + Resource resource = factory.newResource(48, 1); + + RoleStatus workerRole = lookupRole(WORKER.name); + // initial request + ContainerRequest yarnRequest = + req1.buildContainerRequest(resource, workerRole, 0); + assertEquals(req1.label, WORKERS_LABEL); + + assertNull(yarnRequest.getNodeLabelExpression()); + assertFalse(yarnRequest.getRelaxLocality()); + // escalation + ContainerRequest yarnRequest2 = req1.escalate(); + assertNull(yarnRequest2.getNodes()); + assertTrue(yarnRequest2.getRelaxLocality()); + assertEquals(yarnRequest2.getNodeLabelExpression(), WORKERS_LABEL); + } + + /** + * If the placement doesnt include a label, then the escalation request + * retains the node list, but sets relaxLocality==true + * @throws Throwable + */ + @Test + public void testRequestUnlabelledPlacement() throws Throwable { + NodeInstance ni = new NodeInstance("host1", 0); + OutstandingRequest req1 = tracker.newRequest(ni, 0); + Resource resource = factory.newResource(48, 1); + + // initial request + ContainerRequest yarnRequest = req1.buildContainerRequest(resource, + getRole0Status(), 0); + assertNotNull(yarnRequest.getNodes()); + assertTrue(SliderUtils.isUnset(yarnRequest.getNodeLabelExpression())); + assertFalse(yarnRequest.getRelaxLocality()); + ContainerRequest yarnRequest2 = req1.escalate(); + assertNotNull(yarnRequest2.getNodes()); + assertTrue(yarnRequest2.getRelaxLocality()); + } + + @Test(expected = IllegalArgumentException.class) + public void testAARequestNoNodes() throws Throwable { + tracker.newAARequest(getRole0Status().getKey(), new ArrayList<>(), ""); + } + + @Test + public void testAARequest() throws Throwable { + int role0 = getRole0Status().getKey(); + OutstandingRequest request = tracker.newAARequest(role0, Arrays + .asList(host1), ""); + assertEquals(host1.hostname, request.hostname); + assertFalse(request.isLocated()); + } + + @Test + public void testAARequestPair() throws Throwable { + int role0 = getRole0Status().getKey(); + OutstandingRequest request = tracker.newAARequest(role0, Arrays.asList(host1, + host2), ""); + assertEquals(host1.hostname, request.hostname); + assertFalse(request.isLocated()); + ContainerRequest yarnRequest = request.buildContainerRequest( + getRole0Status().copyResourceRequirements(new MockResource(0, 0)), + getRole0Status(), + 0); + assertFalse(yarnRequest.getRelaxLocality()); + assertFalse(request.mayEscalate()); + + assertEquals(2, yarnRequest.getNodes().size()); + } + + @Test + public void testBuildResourceRequirements() throws Throwable { + // Store original values + ConfTreeOperations resources = appState.getResourcesSnapshot(); + String origMem = resources.getComponentOpt(getRole0Status().getGroup(), + ResourceKeys.YARN_MEMORY, null); + String origVcores = resources.getComponentOpt(getRole0Status().getGroup(), + ResourceKeys.YARN_CORES, null); + + // Resource values to be used for this test + int testMem = 32768; + int testVcores = 2; + resources.setComponentOpt(getRole0Status().getGroup(), ResourceKeys.YARN_MEMORY, + Integer.toString(testMem)); + resources.setComponentOpt(getRole0Status().getGroup(), ResourceKeys.YARN_CORES, + Integer.toString(testVcores)); + + MockResource requestedRes = new MockResource(testMem, testVcores); + MockResource expectedRes = new MockResource(MockAppState.RM_MAX_RAM, testVcores); + log.info("Resource requested: " + requestedRes); + Resource resFinal = appState.buildResourceRequirements(getRole0Status(), + new MockResource(0, 0)); + log.info("Resource actual: " + resFinal); + assertTrue(Resources.equals(expectedRes, resFinal)); + + // revert resource configuration to original value + resources.setComponentOpt(getRole0Status().getGroup(), ResourceKeys.YARN_MEMORY, + origMem); + resources.setComponentOpt(getRole0Status().getGroup(), ResourceKeys.YARN_CORES, + origVcores); + } + + public Resource newResource(RoleStatus r) { + final Resource res2 = new MockResource(0, 0); + appState.buildResourceRequirements(r, res2); + return res2; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRW.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRW.java new file mode 100644 index 0000000..c9510d3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRW.java @@ -0,0 +1,351 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.history; + +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.providers.PlacementPolicy; +import org.apache.slider.providers.ProviderRole; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.model.mock.MockRoleHistory; +import org.apache.slider.server.appmaster.state.NodeEntry; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.RoleHistory; +import org.apache.slider.server.avro.LoadedRoleHistory; +import org.apache.slider.server.avro.RoleHistoryWriter; +import org.junit.Test; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class TestRoleHistoryRW extends BaseMockAppStateTest { + + static long time = System.currentTimeMillis(); + public static final String HISTORY_V1_6_ROLE = + "org/apache/slider/server/avro/history-v01-6-role.json"; + public static final String HISTORY_V1_3_ROLE = + "org/apache/slider/server/avro/history-v01-3-role.json"; + public static final String HISTORY_V1b_1_ROLE = + "org/apache/slider/server/avro/history_v01b_1_role.json"; + + + static final ProviderRole PROVIDER_ROLE3 = new ProviderRole( + "role3", + 3, + PlacementPolicy.STRICT, + 3, + 3, + ResourceKeys.DEF_YARN_LABEL_EXPRESSION); + + @Override + public String getTestName() { + return "TestHistoryRW"; + } + + @Test + public void testWriteReadEmpty() throws Throwable { + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + roleHistory.onStart(fs, historyPath); + Path history = roleHistory.saveHistory(time++); + assertTrue(fs.isFile(history)); + RoleHistoryWriter historyWriter = new RoleHistoryWriter(); + historyWriter.read(fs, history); + } + + @Test + public void testWriteReadData() throws Throwable { + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + assertFalse(roleHistory.onStart(fs, historyPath)); + String addr = "localhost"; + NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr); + NodeEntry ne1 = instance.getOrCreate(0); + ne1.setLastUsed(0xf00d); + + Path history = roleHistory.saveHistory(time++); + assertTrue(fs.isFile(history)); + RoleHistoryWriter historyWriter = new RoleHistoryWriter(); + RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES); + + + LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history); + assertTrue(0 < loadedRoleHistory.size()); + rh2.rebuild(loadedRoleHistory); + NodeInstance ni2 = rh2.getExistingNodeInstance(addr); + assertNotNull(ni2); + NodeEntry ne2 = ni2.get(0); + assertNotNull(ne2); + assertEquals(ne2.getLastUsed(), ne1.getLastUsed()); + } + + @Test + public void testWriteReadActiveData() throws Throwable { + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + roleHistory.onStart(fs, historyPath); + String addr = "localhost"; + String addr2 = "rack1server5"; + NodeInstance localhost = roleHistory.getOrCreateNodeInstance(addr); + NodeEntry orig1 = localhost.getOrCreate(0); + orig1.setLastUsed(0x10); + NodeInstance rack1server5 = roleHistory.getOrCreateNodeInstance(addr2); + NodeEntry orig2 = rack1server5.getOrCreate(1); + orig2.setLive(3); + assertFalse(orig2.isAvailable()); + NodeEntry orig3 = localhost.getOrCreate(1); + orig3.setLastUsed(0x20); + orig3.setLive(1); + assertFalse(orig3.isAvailable()); + orig3.release(); + assertTrue(orig3.isAvailable()); + roleHistory.dump(); + + long savetime = 0x0001000; + Path history = roleHistory.saveHistory(savetime); + assertTrue(fs.isFile(history)); + describe("Loaded"); + log.info("testWriteReadActiveData in {}", history); + RoleHistoryWriter historyWriter = new RoleHistoryWriter(); + RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES); + LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history); + assertEquals(3, loadedRoleHistory.size()); + rh2.rebuild(loadedRoleHistory); + rh2.dump(); + + assertEquals(2, rh2.getClusterSize()); + NodeInstance ni2 = rh2.getExistingNodeInstance(addr); + assertNotNull(ni2); + NodeEntry loadedNE = ni2.get(0); + assertEquals(loadedNE.getLastUsed(), orig1.getLastUsed()); + NodeInstance ni2b = rh2.getExistingNodeInstance(addr2); + assertNotNull(ni2b); + NodeEntry loadedNE2 = ni2b.get(1); + assertNotNull(loadedNE2); + assertEquals(loadedNE2.getLastUsed(), savetime); + assertEquals(rh2.getThawedDataTime(), savetime); + + // now start it + rh2.buildRecentNodeLists(); + describe("starting"); + rh2.dump(); + List available0 = rh2.cloneRecentNodeList(0); + assertEquals(1, available0.size()); + + NodeInstance entry = available0.get(0); + assertEquals(entry.hostname, "localhost"); + assertEquals(entry, localhost); + List available1 = rh2.cloneRecentNodeList(1); + assertEquals(2, available1.size()); + //and verify that even if last used was set, the save time is picked up + assertEquals(entry.get(1).getLastUsed(), roleHistory.getSaveTime()); + + } + + @Test + public void testWriteThaw() throws Throwable { + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + assertFalse(roleHistory.onStart(fs, historyPath)); + String addr = "localhost"; + NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr); + NodeEntry ne1 = instance.getOrCreate(0); + ne1.setLastUsed(0xf00d); + + Path history = roleHistory.saveHistory(time++); + long savetime =roleHistory.getSaveTime(); + assertTrue(fs.isFile(history)); + RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES); + assertTrue(rh2.onStart(fs, historyPath)); + NodeInstance ni2 = rh2.getExistingNodeInstance(addr); + assertNotNull(ni2); + NodeEntry ne2 = ni2.get(0); + assertNotNull(ne2); + assertEquals(ne2.getLastUsed(), ne1.getLastUsed()); + assertEquals(rh2.getThawedDataTime(), savetime); + } + + + @Test + public void testPurgeOlderEntries() throws Throwable { + RoleHistoryWriter historyWriter = new RoleHistoryWriter(); + time = 1; + Path file1 = touch(historyWriter, time++); + Path file2 = touch(historyWriter, time++); + Path file3 = touch(historyWriter, time++); + Path file4 = touch(historyWriter, time++); + Path file5 = touch(historyWriter, time++); + Path file6 = touch(historyWriter, time++); + + assertEquals(0, historyWriter.purgeOlderHistoryEntries(fs, file1)); + assertEquals(1, historyWriter.purgeOlderHistoryEntries(fs, file2)); + assertEquals(0, historyWriter.purgeOlderHistoryEntries(fs, file2)); + assertEquals(3, historyWriter.purgeOlderHistoryEntries(fs, file5)); + assertEquals(1, historyWriter.purgeOlderHistoryEntries(fs, file6)); + try { + // make an impossible assertion that will fail if the method + // actually completes + assertEquals(-1, historyWriter.purgeOlderHistoryEntries(fs, file1)); + } catch (FileNotFoundException ignored) { + // expected + } + + } + + public Path touch(RoleHistoryWriter historyWriter, long time) + throws IOException { + Path path = historyWriter.createHistoryFilename(historyPath, time); + FSDataOutputStream out = fs.create(path); + out.close(); + return path; + } + + @Test + public void testSkipEmptyFileOnRead() throws Throwable { + describe("verify that empty histories are skipped on read; old histories " + + "purged"); + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + roleHistory.onStart(fs, historyPath); + time = 0; + Path oldhistory = roleHistory.saveHistory(time++); + + String addr = "localhost"; + NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr); + NodeEntry ne1 = instance.getOrCreate(0); + ne1.setLastUsed(0xf00d); + + Path goodhistory = roleHistory.saveHistory(time++); + + RoleHistoryWriter historyWriter = new RoleHistoryWriter(); + Path touched = touch(historyWriter, time++); + + RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES); + assertTrue(rh2.onStart(fs, historyPath)); + NodeInstance ni2 = rh2.getExistingNodeInstance(addr); + assertNotNull(ni2); + + //and assert the older file got purged + assertFalse(fs.exists(oldhistory)); + assertTrue(fs.exists(goodhistory)); + assertTrue(fs.exists(touched)); + } + + @Test + public void testSkipBrokenFileOnRead() throws Throwable { + describe("verify that empty histories are skipped on read; old histories " + + "purged"); + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + roleHistory.onStart(fs, historyPath); + time = 0; + Path oldhistory = roleHistory.saveHistory(time++); + + String addr = "localhost"; + NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr); + NodeEntry ne1 = instance.getOrCreate(0); + ne1.setLastUsed(0xf00d); + + Path goodhistory = roleHistory.saveHistory(time++); + + RoleHistoryWriter historyWriter = new RoleHistoryWriter(); + Path badfile = historyWriter.createHistoryFilename(historyPath, time++); + FSDataOutputStream out = fs.create(badfile); + out.writeBytes("{broken:true}"); + out.close(); + + RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES); + describe("IGNORE STACK TRACE BELOW"); + + assertTrue(rh2.onStart(fs, historyPath)); + + describe( "IGNORE STACK TRACE ABOVE"); + NodeInstance ni2 = rh2.getExistingNodeInstance(addr); + assertNotNull(ni2); + + //and assert the older file got purged + assertFalse(fs.exists(oldhistory)); + assertTrue(fs.exists(goodhistory)); + assertTrue(fs.exists(badfile)); + } + + /** + * Test that a v1 JSON file can be read. Here the number of roles + * matches the current state. + * @throws Throwable + */ + @Test + public void testReloadDataV1_3_role() throws Throwable { + String source = HISTORY_V1_3_ROLE; + RoleHistoryWriter writer = new RoleHistoryWriter(); + + LoadedRoleHistory loadedRoleHistory = writer.read(source); + assertEquals(4, loadedRoleHistory.size()); + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + assertEquals(0, roleHistory.rebuild(loadedRoleHistory)); + } + + /** + * Test that a v1 JSON file can be read. Here more roles than expected + * @throws Throwable + */ + @Test + public void testReloadDataV1_6_role() throws Throwable { + String source = HISTORY_V1_6_ROLE; + RoleHistoryWriter writer = new RoleHistoryWriter(); + + LoadedRoleHistory loadedRoleHistory = writer.read(source); + assertEquals(6, loadedRoleHistory.size()); + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + assertEquals(3, roleHistory.rebuild(loadedRoleHistory)); + } + + /** + * Test that a v1 JSON file can be read. Here the number of roles + * is less than the current state. + * @throws Throwable + */ + @Test + public void testReload_less_roles() throws Throwable { + String source = HISTORY_V1_3_ROLE; + RoleHistoryWriter writer = new RoleHistoryWriter(); + + LoadedRoleHistory loadedRoleHistory = writer.read(source); + assertEquals(4, loadedRoleHistory.size()); + List expandedRoles = new ArrayList(MockFactory.ROLES); + expandedRoles.add(PROVIDER_ROLE3); + RoleHistory roleHistory = new MockRoleHistory(expandedRoles); + assertEquals(0, roleHistory.rebuild(loadedRoleHistory)); + } + + /** + * Test that a v1b JSON file can be read. Here more roles than expected + * @throws Throwable + */ + @Test + public void testReloadDataV1b_1_role() throws Throwable { + String source = HISTORY_V1b_1_ROLE; + RoleHistoryWriter writer = new RoleHistoryWriter(); + + LoadedRoleHistory loadedRoleHistory = writer.read(source); + assertEquals(1, loadedRoleHistory.size()); + assertEquals(2, loadedRoleHistory.roleMap.size()); + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + assertEquals(0, roleHistory.rebuild(loadedRoleHistory)); + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRWOrdering.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRWOrdering.java new file mode 100644 index 0000000..05eb4bc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRWOrdering.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.history; + +import org.apache.hadoop.fs.Path; +import org.apache.slider.common.SliderKeys; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.model.mock.MockRoleHistory; +import org.apache.slider.server.appmaster.state.NodeEntry; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.RoleHistory; +import org.apache.slider.server.avro.NewerFilesFirst; +import org.apache.slider.server.avro.RoleHistoryWriter; +import org.junit.Test; + +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public class TestRoleHistoryRWOrdering extends BaseMockAppStateTest { + + List paths = pathlist( + Arrays.asList( + "hdfs://localhost/history-0406c.json", + "hdfs://localhost/history-5fffa.json", + "hdfs://localhost/history-0001a.json", + "hdfs://localhost/history-0001f.json" + ) + ); + Path h_0406c = paths.get(0); + Path h_5fffa = paths.get(1); + Path h_0001a = paths.get(3); + + public TestRoleHistoryRWOrdering() throws URISyntaxException { + } + + + List pathlist(List pathnames) throws URISyntaxException { + List paths = new ArrayList<>(); + for (String p : pathnames) { + paths.add(new Path(new URI(p))); + } + return paths; + } + + @Override + public String getTestName() { + return "TestHistoryRWOrdering"; + } + + /** + * This tests regexp pattern matching. It uses the current time so isn't + * repeatable -but it does test a wider range of values in the process + * @throws Throwable + */ + @Test + public void testPatternRoundTrip() throws Throwable { + describe("test pattern matching of names"); + long value=System.currentTimeMillis(); + String name = String.format(SliderKeys.HISTORY_FILENAME_CREATION_PATTERN, + value); + String matchpattern = SliderKeys.HISTORY_FILENAME_MATCH_PATTERN; + Pattern pattern = Pattern.compile(matchpattern); + Matcher matcher = pattern.matcher(name); + if (!matcher.find()) { + throw new Exception("No match for pattern $matchpattern in $name"); + } + } + + @Test + public void testWriteSequenceReadData() throws Throwable { + describe("test that if multiple entries are written, the newest is picked" + + " up"); + long time = System.currentTimeMillis(); + + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + assertFalse(roleHistory.onStart(fs, historyPath)); + String addr = "localhost"; + NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr); + NodeEntry ne1 = instance.getOrCreate(0); + ne1.setLastUsed(0xf00d); + + Path history1 = roleHistory.saveHistory(time++); + Path history2 = roleHistory.saveHistory(time++); + Path history3 = roleHistory.saveHistory(time); + + //inject a later file with a different name + sliderFileSystem.cat(new Path(historyPath, "file.json"), true, "hello," + + " world"); + + + RoleHistoryWriter historyWriter = new RoleHistoryWriter(); + + List entries = historyWriter.findAllHistoryEntries( + fs, + historyPath, + false); + assertEquals(entries.size(), 3); + assertEquals(entries.get(0), history3); + assertEquals(entries.get(1), history2); + assertEquals(entries.get(2), history1); + } + + @Test + public void testPathStructure() throws Throwable { + assertEquals(h_5fffa.getName(), "history-5fffa.json"); + } + + @Test + public void testPathnameComparator() throws Throwable { + + NewerFilesFirst newerName = new NewerFilesFirst(); + + log.info("{} name is {}", h_5fffa, h_5fffa.getName()); + log.info("{} name is {}", h_0406c, h_0406c.getName()); + assertEquals( newerName.compare(h_5fffa, h_5fffa), 0); + assertTrue(newerName.compare(h_5fffa, h_0406c) < 0); + assertTrue(newerName.compare(h_5fffa, h_0001a) < 0); + assertTrue(newerName.compare(h_0001a, h_5fffa) > 0); + + } + + @Test + public void testPathSort() throws Throwable { + List paths2 = new ArrayList<>(paths); + RoleHistoryWriter.sortHistoryPaths(paths2); + assertListEquals(paths2, + Arrays.asList( + paths.get(1), + paths.get(0), + paths.get(3), + paths.get(2) + )); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRequestTracking.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRequestTracking.java new file mode 100644 index 0000000..48a758f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRequestTracking.java @@ -0,0 +1,286 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.history; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.providers.PlacementPolicy; +import org.apache.slider.providers.ProviderRole; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockContainer; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.model.mock.MockRoleHistory; +import org.apache.slider.server.appmaster.state.ContainerAllocationOutcome; +import org.apache.slider.server.appmaster.state.NodeEntry; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.OutstandingRequest; +import org.apache.slider.server.appmaster.state.RoleHistory; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.junit.Before; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +/** + * Test the RH availability list and request tracking: that hosts + * get removed and added + */ +public class TestRoleHistoryRequestTracking extends BaseMockAppStateTest { + + String roleName = "test"; + + NodeInstance age1Active4 = nodeInstance(1, 4, 0, 0); + NodeInstance age2Active2 = nodeInstance(2, 2, 0, 1); + NodeInstance age2Active0 = nodeInstance(2, 0, 0, 0); + NodeInstance age3Active0 = nodeInstance(3, 0, 0, 0); + NodeInstance age4Active1 = nodeInstance(4, 1, 0, 0); + NodeInstance empty = new NodeInstance("empty", MockFactory.ROLE_COUNT); + + List nodes = Arrays.asList(age2Active2, age2Active0, + age4Active1, age1Active4, age3Active0); + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + /** 1MB, 1 vcore*/ + Resource resource = Resource.newInstance(1, 1); + + ProviderRole provRole = new ProviderRole(roleName, 0); + RoleStatus roleStatus = new RoleStatus(provRole); + + public TestRoleHistoryRequestTracking() throws BadConfigException { + } + + AMRMClient.ContainerRequest requestContainer(RoleStatus roleStatus) { + return roleHistory.requestContainerForRole(roleStatus).getIssuedRequest(); + } + + @Override + public String getTestName() { + return "TestRoleHistoryAvailableList"; + } + + @Before + public void setupNodeMap() { + roleHistory.insert(nodes); + roleHistory.buildRecentNodeLists(); + roleStatus.setResourceRequirements(Resource.newInstance(1, 1)); + } + + @Test + public void testAvailableListBuiltForRoles() throws Throwable { + List available0 = roleHistory.cloneRecentNodeList(0); + assertListEquals(Arrays.asList(age3Active0, age2Active0), available0); + } + + @Test + public void testRequestedNodeOffList() throws Throwable { + NodeInstance ni = roleHistory.findRecentNodeForNewInstance(roleStatus); + assertEquals(age3Active0, ni); + assertListEquals(Arrays.asList(age2Active0), + roleHistory.cloneRecentNodeList(0)); + roleHistory.requestInstanceOnNode(ni, + roleStatus, + resource + ); + } + + @Test + public void testRequestedNodeOffListWithFailures() throws Throwable { + assertEquals(0, roleStatus.getKey()); + assertFalse(roleHistory.cloneRecentNodeList(0).isEmpty()); + + NodeEntry age3role0 = recordAsFailed(age3Active0, 0, 4); + assertTrue(age3Active0.isConsideredUnreliable(0, roleStatus + .getNodeFailureThreshold())); + recordAsFailed(age2Active0, 0, 4); + assertTrue(age2Active0.isConsideredUnreliable(0, roleStatus + .getNodeFailureThreshold())); + // expect to get a null node back + NodeInstance ni = roleHistory.findRecentNodeForNewInstance(roleStatus); + assertNull(ni); + + // which is translated to a no-location request + AMRMClient.ContainerRequest req = roleHistory.requestInstanceOnNode(ni, + roleStatus, + resource).getIssuedRequest(); + + assertNull(req.getNodes()); + + log.info("resetting failure count"); + age3role0.resetFailedRecently(); + roleHistory.dump(); + assertEquals(0, age3role0.getFailedRecently()); + assertFalse(age3Active0.isConsideredUnreliable(0, roleStatus + .getNodeFailureThreshold())); + assertFalse(roleHistory.cloneRecentNodeList(0).isEmpty()); + // looking for a node should now find one + ni = roleHistory.findRecentNodeForNewInstance(roleStatus); + assertEquals(ni, age3Active0); + req = roleHistory.requestInstanceOnNode(ni, roleStatus, resource) + .getIssuedRequest(); + assertEquals(1, req.getNodes().size()); + } + + /** + * verify that strict placement policies generate requests for nodes irrespective + * of their failed status + * @throws Throwable + */ + @Test + public void testStrictPlacementIgnoresFailures() throws Throwable { + + RoleStatus targetRole = getRole1Status(); + final ProviderRole providerRole1 = targetRole.getProviderRole(); + assertEquals(providerRole1.placementPolicy, PlacementPolicy.STRICT); + int key1 = targetRole.getKey(); + int key0 = getRole0Status().getKey(); + + List nodes = Arrays.asList(age1Active4, age2Active0, + age2Active2, age3Active0, age4Active1); + recordAllFailed(key0, 4, nodes); + recordAllFailed(key1, 4, nodes); + + // trigger a list rebuild + roleHistory.buildRecentNodeLists(); + List recentRole0 = roleHistory.cloneRecentNodeList(key0); + assertTrue(recentRole0.indexOf(age3Active0) < recentRole0.indexOf(age2Active0)); + + // the non-strict role has no suitable nodes + assertNull(roleHistory.findRecentNodeForNewInstance(getRole0Status())); + + + NodeInstance ni = roleHistory.findRecentNodeForNewInstance(targetRole); + assertNotNull(ni); + + NodeInstance ni2 = roleHistory.findRecentNodeForNewInstance(targetRole); + assertNotNull(ni2); + assertNotEquals(ni, ni2); + } + + @Test + public void testFindAndRequestNode() throws Throwable { + AMRMClient.ContainerRequest req = requestContainer(roleStatus); + + assertEquals(age3Active0.hostname, req.getNodes().get(0)); + List a2 = roleHistory.cloneRecentNodeList(0); + assertListEquals(Arrays.asList(age2Active0), a2); + } + + @Test + public void testRequestedNodeIntoReqList() throws Throwable { + requestContainer(roleStatus); + List requests = roleHistory.listPlacedRequests(); + assertEquals(1, requests.size()); + assertEquals(age3Active0.hostname, requests.get(0).hostname); + } + + @Test + public void testCompletedRequestDropsNode() throws Throwable { + AMRMClient.ContainerRequest req = requestContainer(roleStatus); + List requests = roleHistory.listPlacedRequests(); + assertEquals(1, requests.size()); + String hostname = requests.get(0).hostname; + assertEquals(age3Active0.hostname, hostname); + assertEquals(hostname, req.getNodes().get(0)); + MockContainer container = factory.newContainer(req, hostname); + assertOnContainerAllocated(container, 2, 1); + assertNoOutstandingPlacedRequests(); + } + + public void assertOnContainerAllocated(Container c1, int p1, int p2) { + assertNotEquals(ContainerAllocationOutcome.Open, roleHistory + .onContainerAllocated(c1, p1, p2).outcome); + } + + public void assertOnContainerAllocationOpen(Container c1, int p1, int p2) { + assertEquals(ContainerAllocationOutcome.Open, roleHistory + .onContainerAllocated(c1, p1, p2).outcome); + } + + void assertNoOutstandingPlacedRequests() { + assertTrue(roleHistory.listPlacedRequests().isEmpty()); + } + + public void assertOutstandingPlacedRequests(int i) { + assertEquals(i, roleHistory.listPlacedRequests().size()); + } + + @Test + public void testTwoRequests() throws Throwable { + AMRMClient.ContainerRequest req = requestContainer(roleStatus); + AMRMClient.ContainerRequest req2 = requestContainer(roleStatus); + List requests = roleHistory.listPlacedRequests(); + assertEquals(2, requests.size()); + MockContainer container = factory.newContainer(req, req.getNodes().get(0)); + assertOnContainerAllocated(container, 2, 1); + assertOutstandingPlacedRequests(1); + container = factory.newContainer(req2, req2.getNodes().get(0)); + assertOnContainerAllocated(container, 2, 2); + assertNoOutstandingPlacedRequests(); + } + + @Test + public void testThreeRequestsOneUnsatisified() throws Throwable { + AMRMClient.ContainerRequest req = requestContainer(roleStatus); + AMRMClient.ContainerRequest req2 = requestContainer(roleStatus); + AMRMClient.ContainerRequest req3 = requestContainer(roleStatus); + List requests = roleHistory.listPlacedRequests(); + assertEquals(2, requests.size()); + MockContainer container = factory.newContainer(req, req.getNodes().get(0)); + assertOnContainerAllocated(container, 2, 1); + assertOutstandingPlacedRequests(1); + + container = factory.newContainer(req3, "three"); + assertOnContainerAllocationOpen(container, 3, 2); + assertOutstandingPlacedRequests(1); + + // the final allocation will trigger a cleanup + container = factory.newContainer(req2, "four"); + // no node dropped + assertEquals(ContainerAllocationOutcome.Unallocated, + roleHistory.onContainerAllocated(container, 3, 3).outcome); + // yet the list is now empty + assertNoOutstandingPlacedRequests(); + roleHistory.listOpenRequests().isEmpty(); + + // and the remainder goes onto the available list + List a2 = roleHistory.cloneRecentNodeList(0); + assertListEquals(Arrays.asList(age2Active0), a2); + } + + @Test + public void testThreeRequests() throws Throwable { + AMRMClient.ContainerRequest req = requestContainer(roleStatus); + AMRMClient.ContainerRequest req2 = requestContainer(roleStatus); + AMRMClient.ContainerRequest req3 = requestContainer(roleStatus); + assertOutstandingPlacedRequests(2); + assertNull(req3.getNodes()); + MockContainer container = factory.newContainer(req, req.getNodes().get(0)); + assertOnContainerAllocated(container, 3, 1); + assertOutstandingPlacedRequests(1); + container = factory.newContainer(req2, req2.getNodes().get(0)); + assertOnContainerAllocated(container, 3, 2); + assertNoOutstandingPlacedRequests(); + container = factory.newContainer(req3, "three"); + assertOnContainerAllocationOpen(container, 3, 3); + assertNoOutstandingPlacedRequests(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryUpdateBlacklist.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryUpdateBlacklist.java new file mode 100644 index 0000000..c18c47b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryUpdateBlacklist.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.history; + +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.server.appmaster.actions.ResetFailureWindow; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockAM; +import org.apache.slider.server.appmaster.model.mock.MockFactory; +import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler; +import org.apache.slider.server.appmaster.model.mock.MockRoleHistory; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.operations.UpdateBlacklistOperation; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.RoleHistory; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.junit.Before; +import org.junit.Test; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +public class TestRoleHistoryUpdateBlacklist extends BaseMockAppStateTest { + RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES); + Collection roleStatuses = Arrays.asList(new RoleStatus(MockFactory + .PROVIDER_ROLE0)); + NodeInstance ni = nodeInstance(1, 0, 0, 0); + List nodes = Arrays.asList(ni); + + public TestRoleHistoryUpdateBlacklist() throws BadConfigException { + } + + @Override + public String getTestName() { + return "TestUpdateBlacklist"; + } + + @Before + public void setupNodeMap() { + roleHistory.insert(nodes); + roleHistory.buildRecentNodeLists(); + appState.setRoleHistory(roleHistory); + } + + @Test + public void testUpdateBlacklist() { + assertFalse(ni.isBlacklisted()); + + // at threshold, blacklist is unmodified + recordAsFailed(ni, 0, MockFactory.NODE_FAILURE_THRESHOLD); + UpdateBlacklistOperation op = roleHistory.updateBlacklist(roleStatuses); + assertNull(op); + assertFalse(ni.isBlacklisted()); + + // threshold is reached, node goes on blacklist + recordAsFailed(ni, 0, 1); + op = roleHistory.updateBlacklist(roleStatuses); + assertNotNull(op); + assertTrue(ni.isBlacklisted()); + + // blacklist remains unmodified + op = roleHistory.updateBlacklist(roleStatuses); + assertNull(op); + assertTrue(ni.isBlacklisted()); + + // failure threshold reset, node goes off blacklist + ni.resetFailedRecently(); + op = roleHistory.updateBlacklist(roleStatuses); + assertNotNull(op); + assertFalse(ni.isBlacklisted()); + } + + @Test + public void testBlacklistOperations() + throws Exception { + recordAsFailed(ni, 0, MockFactory.NODE_FAILURE_THRESHOLD + 1); + + List ops = appState.reviewRequestAndReleaseNodes(); + assertListLength(ops, 1); + AbstractRMOperation op = ops.get(0); + assertTrue(op instanceof UpdateBlacklistOperation); + assertTrue(ni.isBlacklisted()); + + MockRMOperationHandler handler = new MockRMOperationHandler(); + assertEquals(0, handler.blacklisted); + handler.execute(ops); + assertEquals(1, handler.blacklisted); + + ResetFailureWindow resetter = new ResetFailureWindow(handler); + resetter.execute(new MockAM(), null, appState); + assertEquals(0, handler.blacklisted); + assertFalse(ni.isBlacklisted()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/Allocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/Allocator.java new file mode 100644 index 0000000..8dd9ac8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/Allocator.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.slider.common.tools.SliderUtils; + +/** + * Provides allocation services to a cluster -both random and placed. + * + * Important: container allocations need an app attempt ID put into the container ID + */ +public class Allocator { + + final MockYarnCluster cluster; + /** + * Rolling index into the cluster used for the + * next "random" assignment + */ + private int rollingIndex = 0; + + Allocator(MockYarnCluster cluster) { + this.cluster = cluster; + } + + /** + * Allocate a node using the list of nodes in the container as the + * hints. + * @param request request + * @return the allocated container -or null for none + */ + MockContainer allocate(AMRMClient.ContainerRequest request) { + MockYarnCluster.MockYarnClusterNode node = null; + MockYarnCluster.MockYarnClusterContainer allocated = null; + if (SliderUtils.isNotEmpty(request.getNodes())) { + for (String host : request.getNodes()) { + node = cluster.lookup(host); + allocated = node.allocate(); + if (allocated != null) { + break; + } + } + } + + if (allocated != null) { + return createContainerRecord(request, allocated, node); + } else { + if (request.getRelaxLocality() || request.getNodes().isEmpty()) { + // fallback to anywhere + return allocateRandom(request); + } else { + //no match and locality can't be requested + return null; + } + } + } + + /** + * Allocate a node without any positioning -use whatever policy this allocator + * chooses. + * @param request request + * @return the allocated container -or null for none + */ + MockContainer allocateRandom(AMRMClient.ContainerRequest request) { + int start = rollingIndex; + MockYarnCluster.MockYarnClusterNode node = cluster.nodeAt(rollingIndex); + MockYarnCluster.MockYarnClusterContainer allocated = node.allocate(); + // if there is no space, try again -but stop when all the nodes + // have failed + while (allocated == null && start != nextIndex()) { + node = cluster.nodeAt(rollingIndex); + allocated = node.allocate(); + } + + //here the allocation is set, so create the response + return createContainerRecord(request, allocated, node); + } + + /** + * Create a container record -if one was allocated + * @param allocated allocation -may be null + * @param node node with the container + * @return a container record, or null if there was no allocation + */ + public MockContainer createContainerRecord( + AMRMClient.ContainerRequest request, + MockYarnCluster.MockYarnClusterContainer allocated, + MockYarnCluster.MockYarnClusterNode node) { + if (allocated == null) { + // no space + return null; + } + MockContainer container = new MockContainer(); + container.id = new MockContainerId(allocated.cid); + container.nodeId = node.nodeId; + container.nodeHttpAddress = node.httpAddress(); + container.priority = request.getPriority(); + container.resource = request.getCapability(); + return container; + } + + public int nextIndex() { + rollingIndex = (rollingIndex + 1) % cluster.clusterSize; + return rollingIndex; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java new file mode 100644 index 0000000..40c4308 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/BaseMockAppStateTest.java @@ -0,0 +1,504 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import com.fasterxml.jackson.core.JsonProcessingException; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.slider.common.tools.SliderFileSystem; +import org.apache.slider.common.tools.SliderUtils; +import org.apache.slider.core.conf.AggregateConf; +import org.apache.slider.core.exceptions.BadClusterStateException; +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.core.exceptions.SliderInternalStateException; +import org.apache.slider.core.exceptions.TriggerClusterTeardownException; +import org.apache.slider.core.main.LauncherExitCodes; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.operations.CancelSingleRequest; +import org.apache.slider.server.appmaster.operations.ContainerRequestOperation; +import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.appmaster.state.AppStateBindingInfo; +import org.apache.slider.server.appmaster.state.ContainerAssignment; +import org.apache.slider.server.appmaster.state.ContainerOutcome; +import org.apache.slider.server.appmaster.state.NodeEntry; +import org.apache.slider.server.appmaster.state.NodeInstance; +import org.apache.slider.server.appmaster.state.NodeMap; +import org.apache.slider.server.appmaster.state.ProviderAppState; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.apache.slider.server.appmaster.state.StateAccessForProviders; +import org.apache.slider.test.SliderTestBase; + +import java.io.File; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map.Entry; + +public abstract class BaseMockAppStateTest extends SliderTestBase implements MockRoles { + protected static final List EMPTY_ID_LIST = Collections + .emptyList(); + + protected MockFactory factory = MockFactory.instance; + protected MockAppState appState; + protected MockYarnEngine engine; + protected FileSystem fs; + protected SliderFileSystem sliderFileSystem; + protected File historyWorkDir; + protected Path historyPath; + protected MockApplicationId applicationId; + protected MockApplicationAttemptId applicationAttemptId; + protected StateAccessForProviders stateAccess; + + /** + * Override point: called in setup() to create the YARN engine; can + * be changed for different sizes and options + * @return + */ + public MockYarnEngine createYarnEngine() { + return new MockYarnEngine(8, 8); + } + + @Override + public void setup() throws Exception { + super.setup(); + YarnConfiguration conf = SliderUtils.createConfiguration(); + fs = FileSystem.get(new URI("file:///"), conf); + sliderFileSystem = new SliderFileSystem(fs, conf); + engine = createYarnEngine(); + initApp(); + } + + /** + * Initialize the application. + * This uses the binding information supplied by {@link #buildBindingInfo()}. + */ + protected void initApp() + throws IOException, BadConfigException, BadClusterStateException { + String historyDirName = getTestName(); + applicationId = new MockApplicationId(1, 0); + applicationAttemptId = new MockApplicationAttemptId(applicationId, 1); + + historyWorkDir = new File("target/history", historyDirName); + historyPath = new Path(historyWorkDir.toURI()); + fs.delete(historyPath, true); + appState = new MockAppState(buildBindingInfo()); + stateAccess = new ProviderAppState(getTestName(), appState); + } + + /** + * Build the binding info from the default constructor values, + * the roles from {@link #factory}, and an instance definition + * from {@link #buildInstanceDefinition()} + * @return + */ + protected AppStateBindingInfo buildBindingInfo() { + AppStateBindingInfo binding = new AppStateBindingInfo(); + binding.instanceDefinition = buildInstanceDefinition(); + binding.roles = new ArrayList<>(factory.ROLES); + binding.fs = fs; + binding.historyPath = historyPath; + binding.nodeReports = engine.getNodeReports(); + return binding; + } + + /** + * Override point, define the instance definition + * @return the instance definition + */ + public AggregateConf buildInstanceDefinition() { + return factory.newInstanceDefinition(0, 0, 0); + } + + /** + * Get the test name ... defaults to method name + * @return the method name + */ + public String getTestName() { + return methodName.getMethodName(); + } + + public RoleStatus getRole0Status() { + return lookupRole(ROLE0); + } + + public RoleStatus lookupRole(String role) { + return appState.lookupRoleStatus(role); + } + + public RoleStatus getRole1Status() { + return lookupRole(ROLE1); + } + + public RoleStatus getRole2Status() { + return lookupRole(ROLE2); + } + + /** + * Build a role instance from a container assignment + * @param assigned + * @return the instance + */ + public RoleInstance roleInstance(ContainerAssignment assigned) { + Container target = assigned.container; + RoleInstance ri = new RoleInstance(target); + ri.roleId = assigned.role.getPriority(); + ri.role = assigned.role.getName(); + return ri; + } + + public NodeInstance nodeInstance(long age, int live0, int live1, int live2) { + NodeInstance ni = new NodeInstance(String.format("age%d-[%d,%d,%d]", age, + live0, live1, live2), MockFactory.ROLE_COUNT); + ni.getOrCreate(0).setLastUsed(age); + ni.getOrCreate(0).setLive(live0); + if (live1 > 0) { + ni.getOrCreate(1).setLive(live1); + } + if (live2 > 0) { + ni.getOrCreate(2).setLive(live2); + } + return ni; + } + + /** + * Create a container status event + * @param c container + * @return a status + */ + ContainerStatus containerStatus(Container c) { + return containerStatus(c.getId()); + } + + /** + * Create a container status instance for the given ID, declaring + * that it was shut down by the application itself + * @param cid container Id + * @return the instance + */ + public ContainerStatus containerStatus(ContainerId cid) { + ContainerStatus status = containerStatus(cid, + LauncherExitCodes.EXIT_CLIENT_INITIATED_SHUTDOWN); + return status; + } + + public ContainerStatus containerStatus(ContainerId cid, int exitCode) { + ContainerStatus status = ContainerStatus.newInstance( + cid, + ContainerState.COMPLETE, + "", + exitCode); + return status; + } + + /** + * Create nodes and bring them to the started state + * @return a list of roles + */ + protected List createAndStartNodes() + throws TriggerClusterTeardownException, SliderInternalStateException { + return createStartAndStopNodes(new ArrayList<>()); + } + + /** + * Create, Start and stop nodes + * @param completionResults List filled in with the status on all completed nodes + * @return the nodes + */ + public List createStartAndStopNodes( + List completionResults) + throws TriggerClusterTeardownException, SliderInternalStateException { + List released = new ArrayList<>(); + List instances = createAndSubmitNodes(released); + processSubmissionOperations(instances, completionResults, released); + return instances; + } + + /** + * Process the start/stop operations from + * @param instances + * @param completionResults + * @param released + */ + public void processSubmissionOperations( + List instances, + List completionResults, + List released) { + for (RoleInstance instance : instances) { + log.debug("Started {} on {}", instance.role, instance.id); + assertNotNull(appState.onNodeManagerContainerStarted(instance + .getContainerId())); + } + releaseContainers(completionResults, + released, + ContainerState.COMPLETE, + "released", + 0 + ); + } + + /** + * Release a list of containers, updating the completion results + * @param completionResults + * @param containerIds + * @param containerState + * @param exitText + * @param containerExitCode + * @return + */ + public void releaseContainers( + List completionResults, + List containerIds, + ContainerState containerState, + String exitText, + int containerExitCode) { + for (ContainerId id : containerIds) { + ContainerStatus status = ContainerStatus.newInstance(id, + containerState, + exitText, + containerExitCode); + completionResults.add(appState.onCompletedNode(status)); + } + } + + /** + * Create nodes and submit them + * @return a list of roles + */ + public List createAndSubmitNodes() + throws TriggerClusterTeardownException, SliderInternalStateException { + return createAndSubmitNodes(new ArrayList<>()); + } + + /** + * Create nodes and submit them + * @return a list of roles + */ + public List createAndSubmitNodes(List containerIds) + throws TriggerClusterTeardownException, SliderInternalStateException { + return createAndSubmitNodes(containerIds, new ArrayList<>()); + } + + /** + * Create nodes and submit them + * @return a list of roles allocated + */ + public List createAndSubmitNodes( + List containerIds, + List operationsOut) + throws TriggerClusterTeardownException, SliderInternalStateException { + List ops = appState.reviewRequestAndReleaseNodes(); + return submitOperations(ops, containerIds, operationsOut); + } + + public List submitOperations( + List operationsIn, + List released) { + return submitOperations(operationsIn, released, new ArrayList<>()); + } + + /** + * Process the RM operations and send onContainersAllocated + * events to the app state + * @param operationsIn list of incoming ops + * @param released released containers + * @return list of outbound operations + */ + public List submitOperations( + List operationsIn, + List released, + List operationsOut) { + List allocatedContainers = engine.execute(operationsIn, + released); + List assignments = new ArrayList<>(); + appState.onContainersAllocated(allocatedContainers, assignments, + operationsOut); + + List roles = new ArrayList<>(); + for (ContainerAssignment assigned : assignments) { + Container container = assigned.container; + RoleInstance ri = roleInstance(assigned); + //tell the app it arrived + log.debug("Start submitted {} on ${}", ri.role, container.getId()); + appState.containerStartSubmitted(container, ri); + roles.add(ri); + } + return roles; + } + + /** + * Add the AM to the app state + */ + protected void addAppMastertoAppState() { + appState.buildAppMasterNode( + new MockContainerId(applicationAttemptId, 999999L), + "appmaster", + 0, + null); + } + + /** + * Extract the list of container IDs from the list of role instances + * @param instances instance list + * @param role role to look up + * @return the list of CIDs + */ + public List extractContainerIds( + List instances, + int role) { + List ids = new ArrayList<>(); + for (RoleInstance ri : instances) { + if (ri.roleId == role) { + ids.add(ri.getContainerId()); + } + } + return ids; + } + + /** + * Record a node as failing + * @param node + * @param id + * @param count + * @return the entry + */ + public NodeEntry recordAsFailed(NodeInstance node, int id, int count) { + NodeEntry entry = node.getOrCreate(id); + for (int i = 1; i <= count; i++) { + entry.containerCompleted( + false, + ContainerOutcome.Failed); + } + return entry; + } + + protected void recordAllFailed(int id, int count, List nodes) { + for (NodeInstance node : nodes) { + recordAsFailed(node, id, count); + } + } + + /** + * Get the container request of an indexed entry. Includes some assertions for better diagnostics + * @param ops operation list + * @param index index in the list + * @return the request. + */ + public AMRMClient.ContainerRequest getRequest(List ops, + int index) { + assertTrue(index < ops.size()); + AbstractRMOperation op = ops.get(index); + assertTrue(op instanceof ContainerRequestOperation); + return ((ContainerRequestOperation) op).getRequest(); + } + + /** + * Get the cancel request of an indexed entry. Includes some assertions for better diagnostics + * @param ops operation list + * @param index index in the list + * @return the request. + */ + public AMRMClient.ContainerRequest getCancel(List ops, + int index) { + assertTrue(index < ops.size()); + AbstractRMOperation op = ops.get(index); + assertTrue(op instanceof CancelSingleRequest); + return ((CancelSingleRequest) op).getRequest(); + } + + /** + * Get the single request of a list of operations; includes the check for the size + * @param ops operations list of size 1 + * @return the request within the first ContainerRequestOperation + */ + public AMRMClient.ContainerRequest getSingleRequest(List ops) { + assertEquals(1, ops.size()); + return getRequest(ops, 0); + } + + /** + * Get the node information as a large JSON String + * @return + */ + protected String nodeInformationSnapshotAsString() + throws UnsupportedEncodingException, JsonProcessingException { + return prettyPrintAsJson(stateAccess.getNodeInformationSnapshot()); + } + + /** + * Scan through all containers and assert that the assignment is AA + * @param index role index + */ + protected void assertAllContainersAA(int index) { + for (Entry nodeMapEntry : cloneNodemap().entrySet()) { + String name = nodeMapEntry.getKey(); + NodeInstance ni = nodeMapEntry.getValue(); + NodeEntry nodeEntry = ni.get(index); + assertTrue("too many instances on node " + name, nodeEntry == null || + nodeEntry.isAntiAffinityConstraintHeld()); + } + } + + List verifyNodeInstanceCount(int size, List list) { + if (list.size() != size) { + for (NodeInstance ni : list) { + log.error(ni.toFullString()); + } + } + assertEquals(size, list.size()); + return list; + } + + /** + * Get the single request of a list of operations; includes the check for the size + * @param ops operations list of size 1 + * @return the request within the first operation + */ + public AMRMClient.ContainerRequest getSingleCancel(List ops) { + assertEquals(1, ops.size()); + return getCancel(ops, 0); + } + + /** + * Get a snapshot of the nodemap of the application state + * @return a cloned nodemap + */ + protected NodeMap cloneNodemap() { + return appState.getRoleHistory().cloneNodemap(); + } + + /** + * Issue a nodes updated event + * @param report report to notify + * @return response of AM + */ + protected AppState.NodeUpdatedOutcome updateNodes(NodeReport report) { + return appState.onNodesUpdated(Collections.singletonList(report)); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAM.java new file mode 100644 index 0000000..90874a7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAM.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.slider.server.appmaster.SliderAppMaster; + +public class MockAM extends SliderAppMaster { +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAppState.java new file mode 100644 index 0000000..92d28c4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockAppState.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.slider.core.exceptions.BadClusterStateException; +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.providers.ProviderRole; +import org.apache.slider.server.appmaster.management.MetricsAndMonitoring; +import org.apache.slider.server.appmaster.state.AbstractClusterServices; +import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.appmaster.state.AppStateBindingInfo; + +import java.io.IOException; +import java.util.Map; + +/** + * Extended app state that makes more things public + */ +public class MockAppState extends AppState { + public static final int RM_MAX_RAM = 4096; + public static final int RM_MAX_CORES = 64; + + public long time = -1; + + public MockAppState(AbstractClusterServices recordFactory) { + super(recordFactory, new MetricsAndMonitoring()); + setContainerLimits(1, RM_MAX_RAM, 1, RM_MAX_CORES); + } + + /** + * Instance with a mock record factory + */ + public MockAppState() { + this(new MockClusterServices()); + } + + public MockAppState(AppStateBindingInfo bindingInfo) + throws BadClusterStateException, IOException, BadConfigException { + this(); + buildInstance(bindingInfo); + } + + public Map getRoleMap() { + return super.getRoleMap(); + } + + /** + * Current time. if the time field + * is set, that value is returned + * @return the current time. + */ + protected long now() { + if (time > 0) { + return time; + } + return System.currentTimeMillis(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationAttemptId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationAttemptId.java new file mode 100644 index 0000000..3455afe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationAttemptId.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; + +class MockApplicationAttemptId extends ApplicationAttemptId { + + ApplicationId applicationId; + int attemptId; + + public MockApplicationAttemptId() { + } + + public MockApplicationAttemptId(ApplicationId applicationId, int attemptId) { + this.applicationId = applicationId; + this.attemptId = attemptId; + } + + @Override + public ApplicationId getApplicationId() { + return applicationId; + } + + @Override + public void setApplicationId(ApplicationId applicationId) { + this.applicationId = applicationId; + } + + @Override + public int getAttemptId() { + return attemptId; + } + + @Override + public void setAttemptId(int attemptId) { + this.attemptId = attemptId; + } + + @Override + protected void build() { + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationId.java new file mode 100644 index 0000000..419ac98 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockApplicationId.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.ApplicationId; + +public class MockApplicationId extends ApplicationId { + + private int id; + private long clusterTimestamp; + + public MockApplicationId() { + } + + public MockApplicationId(int id) { + this.id = id; + } + + public MockApplicationId(int id, long clusterTimestamp) { + this.id = id; + this.clusterTimestamp = clusterTimestamp; + } + + @Override + public int getId() { + return id; + } + + @Override + public void setId(int id) { + this.id = id; + } + + @Override + public long getClusterTimestamp() { + return clusterTimestamp; + } + + @Override + public void setClusterTimestamp(long clusterTimestamp) { + this.clusterTimestamp = clusterTimestamp; + } + + @Override + public void build() { + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockClusterServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockClusterServices.java new file mode 100644 index 0000000..70a94c7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockClusterServices.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.slider.server.appmaster.state.AbstractClusterServices; + +public class MockClusterServices extends AbstractClusterServices { + + @Override + public Resource newResource() { + return new MockResource(0, 0); + } + + @Override + public Resource newResource(int memory, int cores) { + return new MockResource(memory, cores); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainer.java new file mode 100644 index 0000000..01b6b16 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainer.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ExecutionType; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.Token; + +public class MockContainer extends Container { + + ContainerId id; + NodeId nodeId; + String nodeHttpAddress; + Resource resource; + Priority priority; + Token containerToken; + + @Override + public int compareTo(Container other) { + if (this.getId().compareTo(other.getId()) == 0) { + if (this.getNodeId().compareTo(other.getNodeId()) == 0) { + return this.getResource().compareTo(other.getResource()); + } else { + return this.getNodeId().compareTo(other.getNodeId()); + } + } else { + return this.getId().compareTo(other.getId()); + } + } + + @Override + public String toString() { + return "MockContainer{ id=" + id + + ", nodeHttpAddress='" + nodeHttpAddress + "'," + + " priority=" + priority + " }"; + } + + @Override + public ContainerId getId() { + return id; + } + + @Override + public void setId(ContainerId id) { + this.id = id; + } + + @Override + public NodeId getNodeId() { + return nodeId; + } + + @Override + public void setNodeId(NodeId nodeId) { + this.nodeId = nodeId; + } + + @Override + public String getNodeHttpAddress() { + return nodeHttpAddress; + } + + @Override + public void setNodeHttpAddress(String nodeHttpAddress) { + this.nodeHttpAddress = nodeHttpAddress; + } + + @Override + public Resource getResource() { + return resource; + } + + @Override + public void setResource(Resource resource) { + this.resource = resource; + } + + @Override + public Priority getPriority() { + return priority; + } + + @Override + public void setPriority(Priority priority) { + this.priority = priority; + } + + @Override + public Token getContainerToken() { + return containerToken; + } + + @Override + public void setContainerToken(Token containerToken) { + this.containerToken = containerToken; + } + + @Override + public ExecutionType getExecutionType() { + return null; + } + + @Override + public void setExecutionType(ExecutionType executionType) { + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainerId.java new file mode 100644 index 0000000..e2f5c2d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockContainerId.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ContainerId; + +public class MockContainerId extends ContainerId implements Cloneable { + + private static final MockApplicationAttemptId defaultAppAttemptId = + new MockApplicationAttemptId(new MockApplicationId(1), 1); + + long containerId; + ApplicationAttemptId applicationAttemptId; + + MockContainerId() { + } + + /** + * Sets up a default app Attempt ID + * @param containerId + */ + @Deprecated + MockContainerId(long containerId) { + this.containerId = containerId; + this.applicationAttemptId = defaultAppAttemptId; + } + + public MockContainerId(ApplicationAttemptId applicationAttemptId, + long containerId) { + this.containerId = containerId; + this.applicationAttemptId = applicationAttemptId; + } + + MockContainerId(ContainerId that) { + containerId = that.getContainerId(); + applicationAttemptId = that.getApplicationAttemptId(); + } + + + public int getId() { + return (int) containerId; + } + + // TODO: Temporarily adding it back + void setId(int id) { + containerId = (long) id; + } + + public long getContainerId() { + return this.containerId; + } + + public void setContainerId(long id) { + this.containerId = id; + } + + public ApplicationAttemptId getApplicationAttemptId() { + return applicationAttemptId; + } + + public void setApplicationAttemptId(ApplicationAttemptId + applicationAttemptId) { + this.applicationAttemptId = applicationAttemptId; + } + + @Override + public void build() { + + } + + @Override + public String toString() { + return "mockcontainer_" + containerId; + } + + @Override + protected Object clone() throws CloneNotSupportedException { + return super.clone(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java new file mode 100644 index 0000000..958ebb3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFactory.java @@ -0,0 +1,310 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.impl.pb.NodeReportPBImpl; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.slider.api.ClusterDescription; +import org.apache.slider.api.ResourceKeys; +import org.apache.slider.core.conf.AggregateConf; +import org.apache.slider.core.conf.ConfTree; +import org.apache.slider.providers.PlacementPolicy; +import org.apache.slider.providers.ProviderRole; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + +/** + * Factory for creating things + */ +public class MockFactory implements MockRoles { + + public static final int NODE_FAILURE_THRESHOLD = 2; + + public static MockFactory instance = new MockFactory(); + + /* + Ignore any IDE hints about needless references to the ROLE values; groovyc fails without them. + */ + + /** + * basic role + */ + public static final ProviderRole PROVIDER_ROLE0 = new ProviderRole( + MockRoles.ROLE0, + 0, + PlacementPolicy.DEFAULT, + NODE_FAILURE_THRESHOLD, + 1, + ResourceKeys.DEF_YARN_LABEL_EXPRESSION); + /** + * role 1 is strict. timeout should be irrelevant; same as failures + */ + public static final ProviderRole PROVIDER_ROLE1 = new ProviderRole( + MockRoles.ROLE1, + 1, + PlacementPolicy.STRICT, + NODE_FAILURE_THRESHOLD, + 1, + ResourceKeys.DEF_YARN_LABEL_EXPRESSION); + + /** + * role 2: longer delay + */ + public static final ProviderRole PROVIDER_ROLE2 = new ProviderRole( + MockRoles.ROLE2, + 2, + PlacementPolicy.ANYWHERE, + NODE_FAILURE_THRESHOLD, + 2, + ResourceKeys.DEF_YARN_LABEL_EXPRESSION); + + /** + * Patch up a "role2" role to have anti-affinity set + */ + public static final ProviderRole AAROLE_2 = new ProviderRole( + MockRoles.ROLE2, + 2, + PlacementPolicy.ANTI_AFFINITY_REQUIRED, + NODE_FAILURE_THRESHOLD, + 2, + null); + + /** + * Patch up a "role1" role to have anti-affinity set and GPI as the label + */ + public static final ProviderRole AAROLE_1_GPU = new ProviderRole( + MockRoles.ROLE1, + 1, + PlacementPolicy.ANTI_AFFINITY_REQUIRED, + NODE_FAILURE_THRESHOLD, + 1, + MockRoles.LABEL_GPU); + + int appIdCount; + int attemptIdCount; + int containerIdCount; + + ApplicationId appId = newAppId(); + ApplicationAttemptId attemptId = newApplicationAttemptId(appId); + + /** + * List of roles + */ + public static final List ROLES = Arrays.asList( + PROVIDER_ROLE0, + PROVIDER_ROLE1, + PROVIDER_ROLE2 + ); + + public static final int ROLE_COUNT = ROLES.size(); + + MockContainerId newContainerId() { + return newContainerId(attemptId); + } + + MockContainerId newContainerId(ApplicationAttemptId attemptId) { + MockContainerId cid = new MockContainerId(); + cid.containerId = containerIdCount++; + cid.applicationAttemptId = attemptId; + return cid; + } + + MockApplicationAttemptId newApplicationAttemptId(ApplicationId appId) { + MockApplicationAttemptId id = new MockApplicationAttemptId(); + id.attemptId = attemptIdCount++; + id.applicationId = appId; + return id; + } + + MockApplicationId newAppId() { + MockApplicationId id = new MockApplicationId(); + id.setId(appIdCount++); + return id; + } + + public MockNodeId newNodeId(String host) { + return new MockNodeId(host); + } + + MockContainer newContainer(ContainerId cid) { + MockContainer c = new MockContainer(); + c.id = cid; + return c; + } + + public MockContainer newContainer() { + return newContainer(newContainerId()); + } + + public MockContainer newContainer(NodeId nodeId, Priority priority) { + MockContainer container = newContainer(newContainerId()); + container.nodeId = nodeId; + container.priority = priority; + return container; + } + + /** + * Build a new container using the request to suppy priority and resource + * @param req request + * @param host hostname to assign to + * @return the container + */ + public MockContainer newContainer(AMRMClient.ContainerRequest req, String + host) { + MockContainer container = newContainer(newContainerId()); + container.resource = req.getCapability(); + container.priority = req.getPriority(); + container.nodeId = new MockNodeId(host); + return container; + } + + /** + * Create a cluster spec with the given desired role counts + * @param r1 + * @param r2 + * @param r3 + * @return + */ + ClusterDescription newClusterSpec(int r1, int r2, int r3) { + ClusterDescription cd = new ClusterDescription(); + cd.roles = newComponentsSection(r1, r2, r3); + + return cd; + } + + public Map> newComponentsSection( + int r1, + int r2, + int r3) { + Map> map = new HashMap<>(); + map.put(ROLE0, roleMap(r1)); + map.put(ROLE1, roleMap(r2)); + map.put(ROLE2, roleMap(r3)); + return map; + } + + /** + * Create a cluster spec with the given desired role counts + * @param r1 + * @param r2 + * @param r3 + * @return + */ + public ConfTree newConfTree(int r1, int r2, int r3) { + ConfTree cd = new ConfTree(); + + cd.components = newComponentsSection(r1, r2, r3); + + return cd; + } + + /** + * Create a new instance with the given components definined in the + * resources section + * @param r1 + * @param r2 + * @param r3 + * @return + */ + public AggregateConf newInstanceDefinition(int r1, int r2, int r3) { + AggregateConf instance = new AggregateConf(); + instance.setResources(newConfTree(r1, r2, r3)); + return instance; + } + + Map roleMap(int count) { + Map map = new HashMap<>(); + map.put(ResourceKeys.COMPONENT_INSTANCES, Integer.toString(count)); + return map; + } + + public MockResource newResource(int memory, int vcores) { + return new MockResource(memory, vcores); + } + + ContainerStatus newContainerStatus() { + return newContainerStatus(null, null, "", 0); + } + + ContainerStatus newContainerStatus(ContainerId containerId, + ContainerState containerState, String diagnostics, int exitStatus) { + return ContainerStatus.newInstance(containerId, containerState, + diagnostics, exitStatus); + } + + /** + * Create a single instance + * @param hostname + * @param nodeState + * @param label + */ + public NodeReport newNodeReport(String hostname, NodeState nodeState, + String label) { + NodeId nodeId = NodeId.newInstance(hostname, 80); + Integer.valueOf(hostname, 16); + return newNodeReport(hostname, nodeId, nodeState, label); + } + + NodeReport newNodeReport( + String hostname, + NodeId nodeId, + NodeState nodeState, + String label) { + NodeReport report = new NodeReportPBImpl(); + HashSet nodeLabels = new HashSet<>(); + nodeLabels.add(label); + report.setNodeId(nodeId); + report.setNodeLabels(nodeLabels); + report.setNodeState(nodeState); + report.setHttpAddress("http$hostname:80"); + return report; + } + + /** + * Create a list of instances -one for each hostname + * @param hostnames hosts + * @return + */ + public List createNodeReports( + List hostnames, NodeState nodeState, String label) { + if (nodeState == null) { + nodeState = NodeState.RUNNING; + } + List reports = new ArrayList<>(); + for (String name : hostnames) { + reports.add(newNodeReport(name, nodeState, label)); + } + return reports; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFileSystem.java new file mode 100644 index 0000000..72d1665 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockFileSystem.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.fs.FilterFileSystem; +import org.apache.hadoop.fs.Path; + +import java.io.IOException; + +/** + * + */ +class MockFileSystem extends FilterFileSystem{ + @Override + public Path resolvePath(Path p) throws IOException { + return new Path("hdfs://localhost/", p); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockNodeId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockNodeId.java new file mode 100644 index 0000000..c2fbf28 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockNodeId.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.NodeId; + +public class MockNodeId extends NodeId { + String host; + int port; + + public MockNodeId() { + } + + MockNodeId(String host) { + this.host = host; + } + + public MockNodeId(String host, int port) { + this.host = host; + this.port = port; + } + + public String getHost() { + return host; + } + + public void setHost(String host) { + this.host = host; + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + @Override + protected void build() { + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockPriority.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockPriority.java new file mode 100644 index 0000000..ae14b38 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockPriority.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.Priority; + +public class MockPriority extends Priority { + + private int priority; + + public MockPriority(int priority) { + this.priority = priority; + } + + MockPriority() { + } + + @Override + public int getPriority() { + return priority; + } + + @Override + public void setPriority(int priority) { + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java new file mode 100644 index 0000000..e9b631a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockProviderService.java @@ -0,0 +1,294 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.service.LifecycleEvent; +import org.apache.hadoop.service.ServiceStateChangeListener; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.slider.api.ClusterDescription; +import org.apache.slider.common.tools.SliderFileSystem; +import org.apache.slider.core.conf.AggregateConf; +import org.apache.slider.core.conf.MapOperations; +import org.apache.slider.core.exceptions.BadCommandArgumentsException; +import org.apache.slider.core.exceptions.SliderException; +import org.apache.slider.core.launch.ContainerLauncher; +import org.apache.slider.providers.MonitorDetail; +import org.apache.slider.providers.ProviderCompleted; +import org.apache.slider.providers.ProviderRole; +import org.apache.slider.providers.ProviderService; +import org.apache.slider.server.appmaster.actions.QueueAccess; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.state.ContainerReleaseSelector; +import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector; +import org.apache.slider.server.appmaster.state.StateAccessForProviders; +import org.apache.slider.server.services.yarnregistry.YarnRegistryViewForProviders; + +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.util.List; +import java.util.Map; + +public class MockProviderService implements ProviderService { + + @Override + public String getHumanName() { + return null; + } + + @Override + public String getName() { + return null; + } + + @Override + public List getRoles() { + return null; + } + + @Override + public Configuration getConf() { + return null; + } + + @Override + public void validateInstanceDefinition(AggregateConf instanceDefinition) + throws SliderException { + } + + @Override + public void init(Configuration config) { + } + + @Override + public void start() { + } + + @Override + public void stop() { + } + + @Override + public void close() throws IOException { + } + + @Override + public void registerServiceListener(ServiceStateChangeListener listener) { + } + + @Override + public void unregisterServiceListener(ServiceStateChangeListener listener) { + } + + @Override + public Configuration getConfig() { + return null; + } + + public STATE getServiceState() { + return null; + } + + @Override + public long getStartTime() { + return 0; + } + + @Override + public boolean isInState(STATE state) { + return false; + } + + @Override + public Throwable getFailureCause() { + return null; + } + + @Override + public STATE getFailureState() { + return null; + } + + @Override + public boolean waitForServiceToStop(long timeout) { + return false; + } + + @Override + public List getLifecycleHistory() { + return null; + } + + @Override + public Map getBlockers() { + return null; + } + + @Override + public int getExitCode() { + return 0; + } + + + @Override + public boolean exec( + AggregateConf instanceDefinition, + File confDir, + Map env, + ProviderCompleted execInProgress) throws IOException, SliderException { + return false; + } + + @Override + public boolean isSupportedRole(String role) { + return false; + } + + @Override + public Configuration loadProviderConfigurationInformation(File confDir) + throws BadCommandArgumentsException, IOException { + return null; + } + + @Override + public void initializeApplicationConfiguration(AggregateConf + instanceDefinition, + SliderFileSystem fileSystem, String roleGroup) + throws IOException, SliderException { + } + + @Override + public void validateApplicationConfiguration( + AggregateConf instanceDefinition, + File confDir, + boolean secure) throws IOException, SliderException { + } + + + @Override + public Map buildProviderStatus() { + return null; + } + + @Override + public void buildContainerLaunchContext( + ContainerLauncher containerLauncher, + AggregateConf instanceDefinition, + Container container, + ProviderRole role, + SliderFileSystem sliderFileSystem, + Path generatedConfPath, + MapOperations resourceComponent, + MapOperations appComponent, + Path containerTmpDirPath) throws IOException, SliderException { + + } + + @Override + public Map buildMonitorDetails( + ClusterDescription clusterSpec) { + return null; + } + + @Override + public void bind( + StateAccessForProviders stateAccessor, + QueueAccess queueAccess, + List liveContainers) { + + } + + @Override + public void bindToYarnRegistry(YarnRegistryViewForProviders yarnRegistry) { + + } + + @Override + public void buildEndpointDetails(Map details) { + + } + + @Override + public void applyInitialRegistryDefinitions( + URL amWebURI, + ServiceRecord serviceRecord) throws IOException { + + } + + @Override + public void notifyContainerCompleted(ContainerId containerId) { + } + + @Override + public ContainerReleaseSelector createContainerReleaseSelector() { + return new MostRecentContainerReleaseSelector(); + } + + @Override + public void releaseAssignedContainer(ContainerId containerId) { + // no-op + } + + @Override + public void addContainerRequest(AMRMClient.ContainerRequest req) { + // no-op + } + + @Override + public void cancelSingleRequest(AMRMClient.ContainerRequest request) { + + } + + @Override + public void updateBlacklist(List blacklistAdditions, + List blacklistRemovals) { + + } + + @Override + public void execute(List operations) { + + } + + @Override + public int cancelContainerRequests( + Priority priority1, + Priority priority2, + int count) { + return 0; + } + + @Override + public void rebuildContainerDetails(List liveContainers, String + applicationId, Map roleProviderMap) { + } + + @Override + public boolean processContainerStatus(ContainerId containerId, + ContainerStatus status) { + return false; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRMOperationHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRMOperationHandler.java new file mode 100644 index 0000000..e21aaa8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRMOperationHandler.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation; +import org.apache.slider.server.appmaster.operations.ContainerRequestOperation; +import org.apache.slider.server.appmaster.operations.RMOperationHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; + +public class MockRMOperationHandler extends RMOperationHandler { + protected static final Logger log = + LoggerFactory.getLogger(MockRMOperationHandler.class); + + public List operations = new ArrayList<>(); + int requests; + public int releases; + // number available to cancel + public int availableToCancel = 0; + // count of cancelled values. This must be explicitly set + int cancelled; + // number blacklisted + public int blacklisted = 0; + + @Override + public void releaseAssignedContainer(ContainerId containerId) { + operations.add(new ContainerReleaseOperation(containerId)); + log.info("Releasing container ID " + containerId.getContainerId()); + releases++; + } + + @Override + public void addContainerRequest(AMRMClient.ContainerRequest req) { + operations.add(new ContainerRequestOperation(req)); + log.info("Requesting container role #" + req.getPriority()); + requests++; + } + + @Override + public int cancelContainerRequests( + Priority priority1, + Priority priority2, + int count) { + int releaseable = Math.min(count, availableToCancel); + availableToCancel -= releaseable; + cancelled += releaseable; + return releaseable; + } + + @Override + public void cancelSingleRequest(AMRMClient.ContainerRequest request) { + // here assume that there is a copy of this request in the list + if (availableToCancel > 0) { + availableToCancel--; + cancelled++; + } + } + + @Override + public void updateBlacklist(List blacklistAdditions, List + blacklistRemovals) { + blacklisted += blacklistAdditions.size(); + blacklisted -= blacklistRemovals.size(); + } + + /** + * clear the history + */ + public void clear() { + operations.clear(); + releases = 0; + requests = 0; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRecordFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRecordFactory.java new file mode 100644 index 0000000..86d775a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRecordFactory.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +/** + * Node report for testing + */ +class MockRecordFactory { + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRegistryOperations.java new file mode 100644 index 0000000..7a017f7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRegistryOperations.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.fs.PathNotFoundException; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.registry.client.types.RegistryPathStatus; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.service.AbstractService; + +import java.util.List; + +/** + * Simple stub registry for when one is needed for its API, but the operations + * are not actually required + */ +class MockRegistryOperations extends AbstractService implements RegistryOperations{ + + MockRegistryOperations() { + super("mock"); + } + + @Override + public boolean mknode(String path, boolean createParents) { + return true; + } + + @Override + public void bind(String path, ServiceRecord record, int flags) { + } + + @Override + public ServiceRecord resolve(String path) throws PathNotFoundException { + throw new PathNotFoundException(path); + } + + @Override + public RegistryPathStatus stat(String path) throws PathNotFoundException { + throw new PathNotFoundException(path); + } + + @Override + public boolean exists(String path) { + return false; + } + + @Override + public List list(String path) throws PathNotFoundException { + throw new PathNotFoundException(path); + } + + @Override + public void delete(String path, boolean recursive) { + + } + + @Override + public boolean addWriteAccessor(String id, String pass) { + return true; + } + + @Override + public void clearWriteAccessors() { + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockResource.java new file mode 100644 index 0000000..4717ee6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockResource.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.Resource; + +public class MockResource extends Resource { + int memory; + int virtualCores; + + public MockResource(int memory, int vcores) { + this.memory = memory; + this.virtualCores = vcores; + } + + @Override + public int compareTo(Resource other) { + int diff = this.getMemory() - other.getMemory(); + if (diff == 0) { + diff = this.getVirtualCores() - other.getVirtualCores(); + } + return diff; + } + + @Override + public long getMemorySize() { + return memory; + } + + @Override + public void setMemorySize(long memory) { + } + + @Override + public int getVirtualCores() { + return virtualCores; + } + + @Override + public void setVirtualCores(int vCores) { + this.virtualCores = vCores; + } + + @Override + public int getMemory() { + return memory; + } + + @Override + public void setMemory(int memory) { + this.memory = memory; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoleHistory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoleHistory.java new file mode 100644 index 0000000..3d94d4b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoleHistory.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.slider.core.exceptions.BadConfigException; +import org.apache.slider.providers.ProviderRole; +import org.apache.slider.server.appmaster.state.RoleHistory; +import org.apache.slider.server.appmaster.state.RoleStatus; + +import java.util.ArrayList; +import java.util.List; + +/** + * subclass to enable access to some of the protected methods + */ +public class MockRoleHistory extends RoleHistory { + + /** + * Take a list of provider roles and build the history from them, dynamically creating + * the role status entries on the way + * @param providerRoles provider role list + * @throws BadConfigException configuration problem with the role list + */ + public MockRoleHistory(List providerRoles) throws + BadConfigException { + super(convertRoles(providerRoles), new MockClusterServices()); + } + + static List convertRoles(List providerRoles) { + List statuses = new ArrayList<>(); + for (ProviderRole role : providerRoles) { + statuses.add(new RoleStatus(role)); + } + return statuses; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoles.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoles.java new file mode 100644 index 0000000..3ed39fb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockRoles.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.slider.server.appmaster.model.mock; + +public interface MockRoles { + + String ROLE0 = "role0"; + String ROLE1 = "role1"; + String ROLE2 = "role2"; + int ROLE_COUNT = 3; + String LABEL_GPU = "gpu"; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnCluster.java new file mode 100644 index 0000000..bf52528 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnCluster.java @@ -0,0 +1,325 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.NodeState; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +/** + * Models the cluster itself: a set of mock cluster nodes. + * + * nodes retain the slot model with a limit of 2^8 slots/host -this + * lets us use 24 bits of the container ID for hosts, and so simulate + * larger hosts. + * + * upper 32: index into nodes in the cluster + * NodeID hostname is the index in hex format; this is parsed down to the index + * to resolve the host + * + * Important: container IDs will be reused as containers get recycled. This + * is not an attempt to realistically mimic a real YARN cluster, just + * simulate it enough for Slider to explore node re-use and its handling + * of successful and unsuccessful allocations. + * + * There is little or no checking of valid parameters in here -this is for + * test use, not production. + */ +public class MockYarnCluster { + protected static final Logger log = + LoggerFactory.getLogger(MockYarnCluster.class); + + public final int clusterSize; + final int containersPerNode; + MockYarnClusterNode[] nodes; + + MockYarnCluster(int clusterSize, int containersPerNode) { + this.clusterSize = clusterSize; + this.containersPerNode = containersPerNode; + build(); + } + + @Override + public String toString() { + return "MockYarnCluster size=" + clusterSize + ", capacity=" + + totalClusterCapacity()+ ", in use=" + containersInUse(); + } + + /** + * Build the cluster. + */ + private void build() { + nodes = new MockYarnClusterNode[clusterSize]; + for (int i = 0; i < clusterSize; i++) { + nodes[i] = new MockYarnClusterNode(i, containersPerNode); + } + } + + public MockYarnClusterNode nodeAt(int index) { + return nodes[index]; + } + + MockYarnClusterNode lookup(String hostname) { + int index = Integer.valueOf(hostname, 16); + return nodeAt(index); + } + + MockYarnClusterNode lookup(NodeId nodeId) { + return lookup(nodeId.getHost()); + } + + MockYarnClusterNode lookupOwner(ContainerId cid) { + return nodeAt(extractHost(cid.getId())); + } + + /** + * Release a container: return true if it was actually in use + * @param cid container ID + * @return the container released + */ + MockYarnClusterContainer release(ContainerId cid) { + int host = extractHost(cid.getId()); + MockYarnClusterContainer inUse = nodeAt(host).release(cid.getId()); + log.debug("Released {} inuse={}", cid, inUse); + return inUse; + } + + int containersInUse() { + int count = 0; + for (MockYarnClusterNode it : nodes) { + count += it.containersInUse(); + } + return count; + } + + /** + * Containers free + * @return + */ + int containersFree() { + return totalClusterCapacity() - containersInUse(); + } + + int totalClusterCapacity() { + return clusterSize * containersPerNode; + } + + /** + * Reset all the containers + */ + public void reset() { + for (MockYarnClusterNode node : nodes) { + node.reset(); + } + } + + /** + * Bulk allocate the specific number of containers on a range of the cluster + * @param startNode start of the range + * @param endNode end of the range + * @param count count + * @return the number actually allocated -it will be less the count supplied + * if the node was full + */ + public int bulkAllocate(int startNode, int endNode, int count) { + int total = 0; + for (int i = startNode; i <= endNode; i++) { + total += nodeAt(i).bulkAllocate(count).size(); + } + return total; + } + + /** + * Get the list of node reports. These are not cloned; updates will persist in the nodemap + * @return current node report list + */ + List getNodeReports() { + List reports = new ArrayList<>(); + + for (MockYarnClusterNode n : nodes) { + reports.add(n.nodeReport); + } + return reports; + } + +/** + * Model cluster nodes on the simpler "slot" model than the YARN-era + * resource allocation model. Why? Easier to implement scheduling. + * Of course, if someone does want to implement the full process... + * + */ + public static class MockYarnClusterNode { + + public final int nodeIndex; + public final String hostname; + public List labels = new ArrayList<>(); + public final MockNodeId nodeId; + public final MockYarnClusterContainer[] containers; + private boolean offline; + public NodeReport nodeReport; + + public MockYarnClusterNode(int index, int size) { + nodeIndex = index; + hostname = String.format(Locale.ENGLISH, "%08x", index); + nodeId = new MockNodeId(hostname, 0); + + containers = new MockYarnClusterContainer[size]; + for (int i = 0; i < size; i++) { + int cid = makeCid(index, i); + MockContainerId mci = new MockContainerId(cid); + containers[i] = new MockYarnClusterContainer(mci); + } + + nodeReport = MockFactory.instance.newNodeReport(hostname, nodeId, + NodeState.RUNNING, ""); + } + + /** + * Look up a container + * @param containerId + * @return + */ + public MockYarnClusterContainer lookup(int containerId) { + return containers[extractContainer(containerId)]; + } + + /** + * Go offline; release all containers + */ + public void goOffline() { + if (!offline) { + offline = true; + reset(); + } + } + + public void goOnline() { + offline = false; + } + + /** + * allocate a container -if one is available + * @return the container or null for none free + * -or the cluster node is offline + */ + public MockYarnClusterContainer allocate() { + if (!offline) { + for (int i = 0; i < containers.length; i++) { + MockYarnClusterContainer c = containers[i]; + if (!c.busy) { + c.busy = true; + return c; + } + } + } + return null; + } + + /** + * Bulk allocate the specific number of containers + * @param count count + * @return the list actually allocated -it will be less the count supplied + * if the node was full + */ + public List bulkAllocate(int count) { + List result = new ArrayList<>(); + for (int i = 0; i < count; i++) { + MockYarnClusterContainer allocation = allocate(); + if (allocation == null) { + break; + } + result.add(allocation); + } + return result; + } + + /** + * Release a container + * @param cid container ID + * @return the container if the container was busy before the release + */ + public MockYarnClusterContainer release(int cid) { + MockYarnClusterContainer container = containers[extractContainer(cid)]; + boolean b = container.busy; + container.busy = false; + return b? container: null; + } + + public String httpAddress() { + return "http://$hostname/"; + } + + /** + * Reset all the containers + */ + public void reset() { + for (MockYarnClusterContainer cont : containers) { + cont.reset(); + } + } + + public int containersInUse() { + int c = 0; + for (MockYarnClusterContainer cont : containers) { + c += cont.busy ? 1 : 0; + } + return c; + } + + public int containersFree() { + return containers.length - containersInUse(); + } + } + + /** + * Cluster container + */ + public static class MockYarnClusterContainer { + MockContainerId cid; + boolean busy; + + MockYarnClusterContainer(MockContainerId cid) { + this.cid = cid; + } + + void reset() { + busy = false; + } + } + + public static int makeCid(int hostIndex, int containerIndex) { + return (hostIndex << 8) | containerIndex & 0xff; + } + + public static final int extractHost(long cid) { + return (int)((cid >>> 8) & 0xffff); + } + + public static final int extractContainer(int cid) { + return (cid & 0xff); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnEngine.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnEngine.java new file mode 100644 index 0000000..7b07511 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/mock/MockYarnEngine.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.mock; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.NodeReport; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.client.api.AMRMClient; +import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; +import org.apache.slider.server.appmaster.operations.AbstractRMOperation; +import org.apache.slider.server.appmaster.operations.CancelSingleRequest; +import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation; +import org.apache.slider.server.appmaster.operations.ContainerRequestOperation; +import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertNotNull; + +/** + * This is an evolving engine to mock YARN operations + */ +public class MockYarnEngine { + protected static final Logger log = + LoggerFactory.getLogger(MockYarnEngine.class); + + public MockYarnCluster cluster; + public Allocator allocator; + List pending = new ArrayList<>(); + + ApplicationId appId = new MockApplicationId(0, 0); + + ApplicationAttemptId attemptId = new MockApplicationAttemptId(appId, 1); + + @Override + public String toString() { + return "MockYarnEngine " + cluster + " + pending=" + pending.size(); + } + + public int containerCount() { + return cluster.containersInUse(); + } + + public MockYarnEngine(int clusterSize, int containersPerNode) { + cluster = new MockYarnCluster(clusterSize, containersPerNode); + allocator = new Allocator(cluster); + } + + /** + * Allocate a container from a request. The containerID will be + * unique, nodeId and other fields chosen internally with + * no such guarantees; resource and priority copied over + * @param request request + * @return container + */ + public Container allocateContainer(AMRMClient.ContainerRequest request) { + MockContainer allocated = allocator.allocate(request); + if (allocated != null) { + MockContainerId id = (MockContainerId)allocated.id; + id.applicationAttemptId = attemptId; + } + return allocated; + } + + MockYarnCluster.MockYarnClusterContainer releaseContainer(ContainerId containerId) { + return cluster.release(containerId); + } + + /** + * Process a list of operations -release containers to be released, + * allocate those for which there is space (but don't rescan the list after + * the scan) + * @param ops + * @return + */ + public List execute(List ops) { + return execute(ops, new ArrayList<>()); + } + + /** + * Process a list of operations -release containers to be released, + * allocate those for which there is space (but don't rescan the list after + * the scan). Unsatisifed entries are appended to the "pending" list + * @param ops operations + * @return the list of all satisfied operations + */ + public List execute(List ops, + List released) { + validateRequests(ops); + List allocation = new ArrayList<>(); + for (AbstractRMOperation op : ops) { + if (op instanceof ContainerReleaseOperation) { + ContainerReleaseOperation cro = (ContainerReleaseOperation) op; + ContainerId cid = cro.getContainerId(); + assertNotNull(releaseContainer(cid)); + released.add(cid); + } else if (op instanceof CancelSingleRequest) { + // no-op + } else if (op instanceof ContainerRequestOperation) { + ContainerRequestOperation req = (ContainerRequestOperation) op; + Container container = allocateContainer(req.getRequest()); + if (container != null) { + log.info("allocated container {} for {}", container, req); + allocation.add(container); + } else { + log.debug("Unsatisfied allocation {}", req); + pending.add(req); + } + } else { + log.warn("Unsupported operation {}", op); + } + } + return allocation; + } + + /** + * Try and mimic some of the logic of AMRMClientImpl.checkLocalityRelaxationConflict + * @param ops operations list + */ + void validateRequests(List ops) { + // run through the requests and verify that they are all consistent. + List outstandingRequests = new ArrayList<>(); + for (AbstractRMOperation operation : ops) { + if (operation instanceof ContainerRequestOperation) { + ContainerRequestOperation containerRequest = + (ContainerRequestOperation) operation; + ContainerRequest amRequest = containerRequest.getRequest(); + Priority priority = amRequest.getPriority(); + boolean relax = amRequest.getRelaxLocality(); + + for (ContainerRequestOperation req : outstandingRequests) { + if (req.getPriority() == priority && req.getRelaxLocality() != relax) { + // mismatch in values + Assert.fail("operation " + operation + " has incompatible request" + + " priority from outsanding request"); + } + outstandingRequests.add(containerRequest); + + } + + } + } + } + + /** + * Get the list of node reports. These are not cloned; updates will persist in the nodemap + * @return current node report list + */ + List getNodeReports() { + return cluster.getNodeReports(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/monkey/TestMockMonkey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/monkey/TestMockMonkey.java new file mode 100644 index 0000000..00c6aa8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/monkey/TestMockMonkey.java @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.model.monkey; + +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.slider.api.InternalKeys; +import org.apache.slider.server.appmaster.actions.ActionHalt; +import org.apache.slider.server.appmaster.actions.ActionKillContainer; +import org.apache.slider.server.appmaster.actions.AsyncAction; +import org.apache.slider.server.appmaster.actions.QueueService; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler; +import org.apache.slider.server.appmaster.monkey.ChaosKillAM; +import org.apache.slider.server.appmaster.monkey.ChaosKillContainer; +import org.apache.slider.server.appmaster.monkey.ChaosMonkeyService; +import org.apache.slider.server.appmaster.monkey.ChaosTarget; +import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +public class TestMockMonkey extends BaseMockAppStateTest { + + /** + * This queue service is NOT started; tests need to poll the queue + * rather than expect them to execute + */ + QueueService queues; + ChaosMonkeyService monkey; + + @Before + public void init() { + YarnConfiguration configuration = new YarnConfiguration(); + queues = new QueueService(); + queues.init(configuration); + monkey = new ChaosMonkeyService(metrics.getMetrics(), queues); + monkey.init(configuration); + } + + @Test + public void testMonkeyStart() throws Throwable { + monkey.start(); + monkey.stop(); + } + + @Test + public void testMonkeyPlay() throws Throwable { + ChaosCounter counter = new ChaosCounter(); + monkey.addTarget("target", counter, InternalKeys.PROBABILITY_PERCENT_100); + assertEquals(1, monkey.getTargetCount()); + monkey.play(); + assertEquals(1, counter.count); + } + + @Test + public void testMonkeySchedule() throws Throwable { + ChaosCounter counter = new ChaosCounter(); + assertEquals(0, monkey.getTargetCount()); + monkey.addTarget("target", counter, InternalKeys.PROBABILITY_PERCENT_100); + assertEquals(1, monkey.getTargetCount()); + assertTrue(monkey.schedule(0, 1, TimeUnit.SECONDS)); + assertEquals(1, queues.scheduledActions.size()); + } + + @Test + public void testMonkeyDoesntAddProb0Actions() throws Throwable { + ChaosCounter counter = new ChaosCounter(); + monkey.addTarget("target", counter, 0); + assertEquals(0, monkey.getTargetCount()); + monkey.play(); + assertEquals(0, counter.count); + } + + + @Test + public void testMonkeyScheduleProb0Actions() throws Throwable { + ChaosCounter counter = new ChaosCounter(); + monkey.addTarget("target", counter, 0); + assertFalse(monkey.schedule(0, 1, TimeUnit.SECONDS)); + assertEquals(0, queues.scheduledActions.size()); + } + + + @Test + public void testMonkeyPlaySometimes() throws Throwable { + ChaosCounter counter = new ChaosCounter(); + ChaosCounter counter2 = new ChaosCounter(); + monkey.addTarget("target1", counter, InternalKeys.PROBABILITY_PERCENT_1 + * 50); + monkey.addTarget("target2", counter2, InternalKeys + .PROBABILITY_PERCENT_1 * 25); + + for (int i = 0; i < 100; i++) { + monkey.play(); + } + log.info("Counter1 = {} counter2 = {}", counter.count, counter2.count); + /* + * Relying on probability here to give approximate answers + */ + assertTrue(counter.count > 25); + assertTrue(counter.count < 75); + assertTrue(counter2.count < counter.count); + } + + @Test + public void testAMKiller() throws Throwable { + + ChaosKillAM chaos = new ChaosKillAM(queues, -1); + chaos.chaosAction(); + assertEquals(1, queues.scheduledActions.size()); + AsyncAction action = queues.scheduledActions.take(); + assertTrue(action instanceof ActionHalt); + } + + + @Test + public void testContainerKillerEmptyApp() throws Throwable { + + + ChaosKillContainer chaos = new ChaosKillContainer(appState, + queues, + new MockRMOperationHandler()); + chaos.chaosAction(); + assertEquals(0, queues.scheduledActions.size()); + } + + + @Test + public void testContainerKillerIgnoresAM() throws Throwable { + + addAppMastertoAppState(); + assertEquals(1, appState.getLiveContainers().size()); + + ChaosKillContainer chaos = new ChaosKillContainer(appState, + queues, + new MockRMOperationHandler()); + chaos.chaosAction(); + assertEquals(0, queues.scheduledActions.size()); + } + + + + @Test + public void testContainerKiller() throws Throwable { + MockRMOperationHandler ops = new MockRMOperationHandler(); + getRole0Status().setDesired(1); + List instances = createAndStartNodes(); + assertEquals(1, instances.size()); + RoleInstance instance = instances.get(0); + + ChaosKillContainer chaos = new ChaosKillContainer(appState, queues, ops); + chaos.chaosAction(); + assertEquals(1, queues.scheduledActions.size()); + AsyncAction action = queues.scheduledActions.take(); + ActionKillContainer killer = (ActionKillContainer) action; + assertEquals(killer.getContainerId(), instance.getContainerId()); + killer.execute(null, queues, appState); + assertEquals(1, ops.releases); + + ContainerReleaseOperation operation = (ContainerReleaseOperation) ops + .operations.get(0); + assertEquals(operation.getContainerId(), instance.getContainerId()); + } + + + + /** + * Chaos target that just implement a counter + */ + private static class ChaosCounter implements ChaosTarget { + int count; + + @Override + public void chaosAction() { + count++; + } + + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder( + "ChaosCounter{"); + sb.append("count=").append(count); + sb.append('}'); + return sb.toString(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/security/TestSecurityConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/security/TestSecurityConfiguration.java new file mode 100644 index 0000000..76c4343 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/security/TestSecurityConfiguration.java @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.slider.server.appmaster.security; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.slider.common.SliderKeys; +import org.apache.slider.common.SliderXmlConfKeys; +import org.apache.slider.core.conf.AggregateConf; +import org.apache.slider.core.conf.MapOperations; +import org.apache.slider.core.exceptions.SliderException; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * + */ +public class TestSecurityConfiguration { + + @Test + public void testValidLocalConfiguration() throws Throwable { + Configuration config = new Configuration(); + config.set(CommonConfigurationKeysPublic + .HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + AggregateConf aggregateConf = new AggregateConf(); + MapOperations compOps = + aggregateConf.getAppConfOperations().getOrAddComponent(SliderKeys + .COMPONENT_AM); + compOps.put(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL, "test"); + compOps.put(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH, + "/some/local/path"); + + SecurityConfiguration securityConfiguration = + new SecurityConfiguration(config, aggregateConf, "testCluster"); + } + + @Test + public void testValidDistributedConfiguration() throws Throwable { + Configuration config = new Configuration(); + config.set(CommonConfigurationKeysPublic + .HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + AggregateConf aggregateConf = new AggregateConf(); + MapOperations compOps = + aggregateConf.getAppConfOperations().getOrAddComponent(SliderKeys + .COMPONENT_AM); + compOps.put(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL, "test"); + compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab"); + + SecurityConfiguration securityConfiguration = + new SecurityConfiguration(config, aggregateConf, "testCluster"); + } + + @Test + public void testMissingPrincipalNoLoginWithDistributedConfig() throws Throwable { + Configuration config = new Configuration(); + config.set(CommonConfigurationKeysPublic + .HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + AggregateConf aggregateConf = new AggregateConf(); + MapOperations compOps = + aggregateConf.getAppConfOperations().getOrAddComponent(SliderKeys + .COMPONENT_AM); + compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab"); + + try { + SecurityConfiguration securityConfiguration = + new SecurityConfiguration(config, aggregateConf, "testCluster") { + @Override + protected UserGroupInformation getLoginUser() throws + IOException { + return null; + } + }; + fail("expected SliderException"); + } catch (SliderException e) { + // expected + } + } + + @Test + public void testMissingPrincipalNoLoginWithLocalConfig() throws Throwable { + Configuration config = new Configuration(); + config.set(CommonConfigurationKeysPublic + .HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + AggregateConf aggregateConf = new AggregateConf(); + MapOperations compOps = + aggregateConf.getAppConfOperations().getOrAddComponent(SliderKeys + .COMPONENT_AM); + compOps.put(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH, + "/some/local/path"); + + try { + SecurityConfiguration securityConfiguration = + new SecurityConfiguration(config, aggregateConf, "testCluster") { + @Override + protected UserGroupInformation getLoginUser() throws IOException { + return null; + } + }; + fail("expected SliderException"); + } catch (SliderException e) { + // expected + } + } + + @Test + public void testBothKeytabMechanismsConfigured() throws Throwable { + Configuration config = new Configuration(); + config.set(CommonConfigurationKeysPublic + .HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + AggregateConf aggregateConf = new AggregateConf(); + MapOperations compOps = + aggregateConf.getAppConfOperations().getOrAddComponent(SliderKeys + .COMPONENT_AM); + compOps.put(SliderXmlConfKeys.KEY_KEYTAB_PRINCIPAL, "test"); + compOps.put(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH, + "/some/local/path"); + compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab"); + + try { + SecurityConfiguration securityConfiguration = + new SecurityConfiguration(config, aggregateConf, + "testCluster"); + fail("expected SliderException"); + } catch (SliderException e) { + // expected + } + } + + @Test + public void testMissingPrincipalButLoginWithDistributedConfig() throws Throwable { + Configuration config = new Configuration(); + config.set(CommonConfigurationKeysPublic + .HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + AggregateConf aggregateConf = new AggregateConf(); + MapOperations compOps = + aggregateConf.getAppConfOperations().getOrAddComponent(SliderKeys + .COMPONENT_AM); + compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab"); + + SecurityConfiguration securityConfiguration = + new SecurityConfiguration(config, aggregateConf, "testCluster"); + } + + @Test + public void testMissingPrincipalButLoginWithLocalConfig() throws Throwable { + Configuration config = new Configuration(); + config.set(CommonConfigurationKeysPublic + .HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + AggregateConf aggregateConf = new AggregateConf(); + MapOperations compOps = + aggregateConf.getAppConfOperations().getOrAddComponent(SliderKeys + .COMPONENT_AM); + compOps.put(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH, + "/some/local/path"); + + SecurityConfiguration securityConfiguration = + new SecurityConfiguration(config, aggregateConf, "testCluster"); + } + + @Test + public void testKeypathLocationOnceLocalized() throws Throwable { + Configuration config = new Configuration(); + config.set(CommonConfigurationKeysPublic + .HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + AggregateConf aggregateConf = new AggregateConf(); + MapOperations compOps = + aggregateConf.getAppConfOperations().getOrAddComponent(SliderKeys + .COMPONENT_AM); + compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab"); + + SecurityConfiguration securityConfiguration = + new SecurityConfiguration(config, aggregateConf, "testCluster"); + + assertEquals(new File(SliderKeys.KEYTAB_DIR, "some.keytab") + .getAbsolutePath(), + securityConfiguration.getKeytabFile(aggregateConf) + .getAbsolutePath()); + } + + @Test + public void testAMKeytabProvided() throws Throwable { + Configuration config = new Configuration(); + AggregateConf aggregateConf = new AggregateConf(); + MapOperations compOps = + aggregateConf.getAppConfOperations().getOrAddComponent(SliderKeys + .COMPONENT_AM); + compOps.put(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH, " "); + + SecurityConfiguration securityConfiguration = + new SecurityConfiguration(config, aggregateConf, "testCluster"); + assertFalse(securityConfiguration.isKeytabProvided()); + + compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, ""); + assertFalse(securityConfiguration.isKeytabProvided()); + + compOps.put(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME, "some.keytab"); + assertTrue(securityConfiguration.isKeytabProvided()); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/PathEntryMarshalling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/PathEntryMarshalling.java new file mode 100644 index 0000000..bc3cbbe --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/PathEntryMarshalling.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.web.rest.registry; + +import org.apache.hadoop.registry.client.binding.JsonSerDeser; + +class PathEntryMarshalling + extends JsonSerDeser { + public PathEntryMarshalling() { + super(PathEntryResource.class); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestMarshalling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestMarshalling.java new file mode 100644 index 0000000..fc47440 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/registry/TestRegistryRestMarshalling.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.appmaster.web.rest.registry; + +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes; +import org.junit.Test; + +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; + +/** + * This test exists because Jersey appears to behave "oddly" + * when it comes to marshalling JSON, and some of the REST resources + * appear to have trouble. + * + * This test tries to isolate it + */ +public class TestRegistryRestMarshalling { + + @Test + public void testDeser() throws Throwable { + PathEntryMarshalling pem = new PathEntryMarshalling(); + PathEntryResource unmarshalled = pem.fromResource( + "/org/apache/slider/server/appmaster/web/rest/registry/sample.json"); + + ServiceRecord serviceRecord = unmarshalled.service; + assertNotNull(serviceRecord); + assertNotNull(serviceRecord.get(YarnRegistryAttributes.YARN_ID)); + assertNotEquals("", serviceRecord.get(YarnRegistryAttributes.YARN_PERSISTENCE)); + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java new file mode 100644 index 0000000..dc8900e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestClusterSpecificationBlock.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.slider.server.appmaster.web.view; + +import com.google.inject.AbstractModule; +import com.google.inject.Guice; +import com.google.inject.Injector; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.slider.api.ClusterDescription; +import org.apache.slider.providers.ProviderService; +import org.apache.slider.server.appmaster.model.mock.MockAppState; +import org.apache.slider.server.appmaster.model.mock.MockClusterServices; +import org.apache.slider.server.appmaster.model.mock.MockProviderService; +import org.apache.slider.server.appmaster.state.AbstractClusterServices; +import org.apache.slider.server.appmaster.state.AppState; +import org.apache.slider.server.appmaster.state.ProviderAppState; +import org.apache.slider.server.appmaster.web.WebAppApi; +import org.apache.slider.server.appmaster.web.WebAppApiImpl; +import org.junit.Before; +import org.junit.Test; + +import java.io.PrintWriter; +import java.io.StringWriter; + +import static org.junit.Assert.assertEquals; + +public class TestClusterSpecificationBlock { + + private ClusterSpecificationBlock clusterSpecBlock; + + @Before + public void setup() { + AppState appState = new MyAppState(new MockClusterServices()); + ProviderAppState providerAppState = new ProviderAppState( + "undefined", + appState); + ProviderService providerService = new MockProviderService(); + + WebAppApiImpl inst = new WebAppApiImpl( + providerAppState, + providerService, + null, + null, null, null, null); + + Injector injector = Guice.createInjector(new AbstractModule() { + @Override + protected void configure() { + bind(WebAppApi.class).toInstance(inst); + } + }); + + clusterSpecBlock = injector.getInstance(ClusterSpecificationBlock.class); + } + + @Test + public void testJsonGeneration() { + StringWriter sw = new StringWriter(64); + PrintWriter pw = new PrintWriter(sw); + + Hamlet hamlet = new Hamlet(pw, 0, false); + + int level = hamlet.nestLevel(); + clusterSpecBlock.doRender(hamlet); + + assertEquals(level, hamlet.nestLevel()); + } + + private static class MyAppState extends MockAppState { + public MyAppState(AbstractClusterServices recordFactory) { + super(recordFactory); + this.setClusterStatus(new MockClusterDescription()); + } + } + + private static class MockClusterDescription extends ClusterDescription { + @Override + public String toJsonString() { + return "{\"foo\": \"bar\"}"; + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java new file mode 100644 index 0000000..ee38189 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestContainerStatsBlock.java @@ -0,0 +1,254 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.slider.server.appmaster.web.view; + +import com.google.inject.AbstractModule; +import com.google.inject.Guice; +import com.google.inject.Injector; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR; +import org.apache.hadoop.yarn.webapp.hamlet.HamletImpl.EImp; +import org.apache.slider.api.ClusterNode; +import org.apache.slider.providers.ProviderService; +import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest; +import org.apache.slider.server.appmaster.model.mock.MockContainer; +import org.apache.slider.server.appmaster.model.mock.MockContainerId; +import org.apache.slider.server.appmaster.model.mock.MockNodeId; +import org.apache.slider.server.appmaster.model.mock.MockProviderService; +import org.apache.slider.server.appmaster.model.mock.MockResource; +import org.apache.slider.server.appmaster.state.ProviderAppState; +import org.apache.slider.server.appmaster.state.RoleInstance; +import org.apache.slider.server.appmaster.web.WebAppApi; +import org.apache.slider.server.appmaster.web.WebAppApiImpl; +import org.apache.slider.server.appmaster.web.view.ContainerStatsBlock.ClusterNodeNameComparator; +import org.apache.slider.server.appmaster.web.view.ContainerStatsBlock.TableAnchorContent; +import org.apache.slider.server.appmaster.web.view.ContainerStatsBlock.TableContent; +import org.junit.Before; +import org.junit.Test; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class TestContainerStatsBlock extends BaseMockAppStateTest { + + private ContainerStatsBlock statsBlock; + + + private Container cont1, cont2; + + @Before + public void setup() throws Exception { + super.setup(); + ProviderService providerService = new MockProviderService(); + ProviderAppState providerAppState = new ProviderAppState( + "undefined", + appState); + + WebAppApiImpl inst = new WebAppApiImpl( + providerAppState, + providerService, + null, + metrics, null, null, null); + + Injector injector = Guice.createInjector(new WebappModule(inst)); + statsBlock = injector.getInstance(ContainerStatsBlock.class); + + cont1 = new MockContainer(); + + cont1.setId(mockContainerId(0)); + cont1.setNodeId(new MockNodeId()); + cont1.setPriority(Priority.newInstance(1)); + cont1.setResource(new MockResource(0, 0)); + + cont2 = new MockContainer(); + cont2.setId(mockContainerId(1)); + cont2.setNodeId(new MockNodeId()); + cont2.setPriority(Priority.newInstance(1)); + cont2.setResource(new MockResource(0, 0)); + } + + + public static class WebappModule extends AbstractModule { + final WebAppApiImpl instance; + + WebappModule(WebAppApiImpl instance) { + this.instance = instance; + } + + @Override + protected void configure() { + bind(WebAppApi.class).toInstance(instance); + } + } + + + public MockContainerId mockContainerId(int count) { + return new MockContainerId(applicationAttemptId, count); + } + + @Test + public void testGetContainerInstances() { + List roles = Arrays.asList( + new RoleInstance(cont1), + new RoleInstance(cont2) + ); + Map map = statsBlock.getContainerInstances(roles); + + assertEquals(2, map.size()); + + assertTrue(map.containsKey("mockcontainer_0")); + assertEquals(map.get("mockcontainer_0"), roles.get(0)); + + assertTrue(map.containsKey("mockcontainer_1")); + assertEquals(map.get("mockcontainer_1"), roles.get(1)); + } + + @Test + public void testGenerateRoleDetailsWithTwoColumns() { + StringWriter sw = new StringWriter(64); + PrintWriter pw = new PrintWriter(sw); + + Hamlet hamlet = new Hamlet(pw, 0, false); + + // Make a div to put the content into + DIV div = hamlet.div(); + + String detailsName = "testing"; + String selector = "selector"; + Map data = new HashMap(); + data.put(new ContainerStatsBlock.TableContent("Foo"), "bar"); + + int levelPrior = hamlet.nestLevel(); + statsBlock.generateRoleDetails(div, selector, detailsName, data.entrySet()); + + // Close out the div we made + // DIV._() will actually invoke the wrong method (creating

), explicit + // cast to make sure we're closing out the

+ ((EImp) div)._(); + + assertEquals(levelPrior, hamlet.nestLevel()); + } + + @Test + public void testGenerateRoleDetailsWithOneColumn() { + StringWriter sw = new StringWriter(64); + PrintWriter pw = new PrintWriter(sw); + + Hamlet hamlet = new Hamlet(pw, 0, false); + DIV div = hamlet.div(); + + String detailsName = "testing"; + String selector = "selector"; + Map data = new HashMap(); + data.put(new ContainerStatsBlock.TableContent("Bar"), null); + + int levelPrior = hamlet.nestLevel(); + statsBlock.generateRoleDetails(div, selector, detailsName, data.entrySet()); + + // Close out the div we made + // DIV._() will actually invoke the wrong method (creating

), explicit + // cast to make sure we're closing out the

+ ((EImp) div)._(); + + assertEquals(levelPrior, hamlet.nestLevel()); + } + + @Test + public void testGenerateRoleDetailsWithNoData() { + StringWriter sw = new StringWriter(64); + PrintWriter pw = new PrintWriter(sw); + + Hamlet hamlet = new Hamlet(pw, 0, false); + DIV div = hamlet.div(); + + String detailsName = "testing"; + String selector = "selector"; + Map data = new HashMap(); + + int levelPrior = hamlet.nestLevel(); + statsBlock.generateRoleDetails(div, selector, detailsName, data.entrySet()); + + // Close out the div we made + // DIV._() will actually invoke the wrong method (creating

), explicit + // cast to make sure we're closing out the

+ ((EImp) div)._(); + + assertEquals(levelPrior, hamlet.nestLevel()); + } + + @Test + public void testClusterNodeNameComparator() { + ClusterNode n1 = new ClusterNode(mockContainerId(1)), + n2 = new ClusterNode(mockContainerId(2)), + n3 = new ClusterNode(mockContainerId(3)); + + List nodes = new ArrayList(); + nodes.add(n2); + nodes.add(n3); + nodes.add(n1); + + Collections.sort(nodes, new ClusterNodeNameComparator()); + + String prevName = ""; + for (ClusterNode node : nodes) { + assertTrue(prevName.compareTo(node.name) <= 0); + prevName = node.name; + } + } + + @Test + public void testTableContent() { + StringWriter sw = new StringWriter(64); + PrintWriter pw = new PrintWriter(sw); + TableContent tc = new TableContent("foo"); + + Hamlet hamlet = new Hamlet(pw, 0, false); + TR> tr = hamlet.table().tr(); + + int prevLevel = hamlet.nestLevel(); + // printCell should not end the tr + tc.printCell(tr); + tr._(); + assertEquals(prevLevel, hamlet.nestLevel()); + } + + @Test + public void testTableAnchorContent() { + StringWriter sw = new StringWriter(64); + PrintWriter pw = new PrintWriter(sw); + TableContent tc = new TableAnchorContent("foo", "http://bar.com"); + + Hamlet hamlet = new Hamlet(pw, 0, false); + TR> tr = hamlet.table().tr(); + + int prevLevel = hamlet.nestLevel(); + // printCell should not end the tr + tc.printCell(tr); + tr._(); + assertEquals(prevLevel, hamlet.nestLevel()); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java new file mode 100644 index 0000000..1ab7e93 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/view/TestIndexBlock.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.slider.server.appmaster.web.view; + +import com.google.inject.AbstractModule; +import com.google.inject.Guice; +import com.google.inject.Injector; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; +import org.apache.slider.providers.ProviderService; +import org.apache.slider.server.appmaster.model.appstate.BaseMockAppStateAATest; +import org.apache.slider.server.appmaster.model.mock.MockContainer; +import org.apache.slider.server.appmaster.model.mock.MockContainerId; +import org.apache.slider.server.appmaster.model.mock.MockNodeId; +import org.apache.slider.server.appmaster.model.mock.MockProviderService; +import org.apache.slider.server.appmaster.model.mock.MockResource; +import org.apache.slider.server.appmaster.state.ContainerOutcome; +import org.apache.slider.server.appmaster.state.OutstandingRequest; +import org.apache.slider.server.appmaster.state.ProviderAppState; +import org.apache.slider.server.appmaster.state.RoleStatus; +import org.apache.slider.server.appmaster.web.WebAppApi; +import org.apache.slider.server.appmaster.web.WebAppApiImpl; +import org.junit.Before; +import org.junit.Test; + +import java.io.PrintWriter; +import java.io.StringWriter; + +public class TestIndexBlock extends BaseMockAppStateAATest { + + private IndexBlock indexBlock; + + private Container cont1, cont2; + + @Before + public void setup() throws Exception { + super.setup(); + assertNotNull(appState); + ProviderService providerService = new MockProviderService(); + ProviderAppState providerAppState = new ProviderAppState( + "undefined", + appState); + + WebAppApiImpl inst = new WebAppApiImpl( + providerAppState, + providerService, + null, + metrics, null, null, null); + + Injector injector = Guice.createInjector(new AbstractModule() { + @Override + protected void configure() { + bind(WebAppApi.class).toInstance(inst); + } + }); + + indexBlock = injector.getInstance(IndexBlock.class); + + cont1 = new MockContainer(); + cont1.setId(new MockContainerId(applicationAttemptId, 0)); + cont1.setNodeId(new MockNodeId()); + cont1.setPriority(Priority.newInstance(1)); + cont1.setResource(new MockResource(0, 0)); + + cont2 = new MockContainer(); + cont2.setId(new MockContainerId(applicationAttemptId, 1)); + cont2.setNodeId(new MockNodeId()); + cont2.setPriority(Priority.newInstance(1)); + cont2.setResource(new MockResource(0, 0)); + } + + @Test + public void testIndex() { + RoleStatus role0 = getRole0Status(); + RoleStatus role1 = getRole1Status(); + RoleStatus role2 = getRole2Status(); + + int role0_desired = 8; + + role0.setDesired(role0_desired); + int role0_actual = 5; + int role0_requested = role0_desired - role0_actual; + for (int i = 0; i < role0_actual; i++) { + role0.incActual(); + } + assertEquals(role0.getActual(), role0_actual); + for (int i = 0; i < role0_requested; i++) { + role0.incRequested(); + } + assertEquals(role0.getRequested(), role0_requested); + + int role0_failures = 2; + + role0.noteFailed(false, "", ContainerOutcome.Failed); + role0.noteFailed(true, "", ContainerOutcome.Failed); + + // all aa roles fields are in the + int aarole_desired = 200; + aaRole.setDesired(aarole_desired); + int aarole_actual = 90; + int aarole_active = 1; + int aarole_requested = aarole_desired - aarole_actual; + int aarole_pending = aarole_requested - 1; + int aarole_failures = 0; + for (int i = 0; i < aarole_actual; i++) { + aaRole.incActual(); + } + assertEquals(aaRole.getActual(), aarole_actual); + aaRole.setOutstandingAArequest(new OutstandingRequest(2, "")); + // add a requested + aaRole.incRequested(); + aaRole.setPendingAntiAffineRequests(aarole_pending); + assertEquals(aaRole.getPendingAntiAffineRequests(), aarole_pending); + + assertEquals(aaRole.getActualAndRequested(), aarole_actual + 1); + StringWriter sw = new StringWriter(64); + PrintWriter pw = new PrintWriter(sw); + + Hamlet hamlet = new Hamlet(pw, 0, false); + + indexBlock.doIndex(hamlet, "accumulo"); + + String body = sw.toString(); + log.info(body); + // verify role data came out + assertTrue(body.contains("role0")); + assertContains(role0_desired, body); + assertContains(role0_actual, body); + assertContains(role0_requested, body); + assertContains(role0_failures, body); + + assertTrue(body.contains("role1")); + assertTrue(body.contains("role2")); + + assertContains(aarole_desired, body); + assertContains(aarole_actual, body); +// assertContains(aarole_requested, body) + assertContains(aarole_failures, body); + assertTrue(body.contains(indexBlock.buildAADetails(true, aarole_pending))); + + // verify that the sorting took place + assertTrue(body.indexOf("role0") < body.indexOf("role1")); + assertTrue(body.indexOf("role1") < body.indexOf("role2")); + + assertFalse(body.contains(IndexBlock.ALL_CONTAINERS_ALLOCATED)); + // role + } + + void assertContains(int ex, String html) { + assertStringContains(Integer.toString(ex), html); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/management/TestGauges.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/management/TestGauges.java new file mode 100644 index 0000000..16151fc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/management/TestGauges.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.server.management; + +import org.apache.slider.server.appmaster.management.LongGauge; +import org.apache.slider.test.SliderTestBase; +import org.junit.Test; + +public class TestGauges extends SliderTestBase { + + @Test + public void testLongGaugeOperations() throws Throwable { + LongGauge gauge = new LongGauge(); + assertEquals(0, gauge.get()); + gauge.inc(); + assertEquals(1, gauge.get()); + gauge.inc(); + assertEquals(2, gauge.get()); + gauge.inc(); + assertEquals(3, gauge.get()); + assertEquals(gauge.getValue().longValue(), gauge.get()); + assertEquals(gauge.getCount().longValue(), gauge.get()); + + gauge.dec(); + assertEquals(2, gauge.get()); + assertEquals(1, gauge.decToFloor(1)); + assertEquals(1, gauge.get()); + assertEquals(0, gauge.decToFloor(1)); + assertEquals(0, gauge.decToFloor(1)); + assertEquals(0, gauge.decToFloor(0)); + + gauge.set(4); + assertEquals(0, gauge.decToFloor(8)); + + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java index a93ec57..458d1bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/servicemonitor/TestPortProbe.java @@ -17,8 +17,8 @@ package org.apache.slider.server.servicemonitor; -import org.junit.Assert; import org.apache.hadoop.conf.Configuration; +import org.junit.Assert; import org.junit.Test; public class TestPortProbe extends Assert { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/KeysForTests.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/KeysForTests.java new file mode 100644 index 0000000..2277339 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/KeysForTests.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.test; + +import org.apache.slider.common.SliderKeys; +import org.apache.slider.common.SliderXMLConfKeysForTesting; + +/** + * Keys shared across tests + */ +public interface KeysForTests extends SliderKeys, SliderXMLConfKeysForTesting { + /** + * Username for all clusters, ZK, etc + */ + String USERNAME = "bigdataborat"; + + int WAIT_TIME = 120; + String WAIT_TIME_ARG = Integer.toString(WAIT_TIME); + + String SLIDER_TEST_XML = "slider-test.xml"; + +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/MicroZKCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/MicroZKCluster.java new file mode 100644 index 0000000..a7f353e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/MicroZKCluster.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.registry.client.api.RegistryOperations; +import org.apache.hadoop.registry.client.impl.zk.RegistryOperationsService; +import org.apache.hadoop.registry.server.services.MicroZookeeperService; +import org.apache.slider.common.tools.SliderUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.IOException; + +public class MicroZKCluster implements Closeable { + protected static final Logger log = + LoggerFactory.getLogger(MicroZKCluster.class); + + public static final String HOSTS = "127.0.0.1"; + MicroZookeeperService zkService; + String zkBindingString; + final Configuration conf; + public RegistryOperations registryOperations; + + MicroZKCluster() { + this(SliderUtils.createConfiguration()); + } + + MicroZKCluster(Configuration conf) { + this.conf = conf; + } + + void createCluster(String name) { + zkService = new MicroZookeeperService(name); + + zkService.init(conf); + zkService.start(); + zkBindingString = zkService.getConnectionString(); + log.info("Created {}", this); + registryOperations = new RegistryOperationsService( + "registry", + zkService); + registryOperations.init(conf); + registryOperations.start(); + } + + @Override + public void close() throws IOException { + if (registryOperations != null) { + registryOperations.stop(); + } + if (zkService != null) { + zkService.stop(); + } + } + + @Override + public String toString() { + return "Micro ZK cluster as " + zkBindingString; + } + + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/Outcome.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/Outcome.java new file mode 100644 index 0000000..7dd4bef --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/Outcome.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.test; + +/** + * Outcome for probes + */ + +public class Outcome { + + public final String name; + + private Outcome(String name) { + this.name = name; + } + + public static Outcome Success = new Outcome( + "Success"); + public static Outcome Retry = new Outcome("Retry"); + static Outcome Fail = new Outcome("Fail"); + + /** + * build from a bool, where false is mapped to retry + * @param b boolean + * @return an outcome + */ + static Outcome fromBool(boolean b) { + return b ? Success : Retry; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/SliderTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/SliderTestBase.java new file mode 100644 index 0000000..bf616dc --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/SliderTestBase.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.test; + +import org.apache.hadoop.fs.FileUtil; +import org.apache.slider.common.SliderXMLConfKeysForTesting; +import org.apache.slider.server.appmaster.management.MetricsAndMonitoring; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestName; + +import java.io.File; + + +/** + * Base class for unit tests as well as ones starting mini clusters + * -the foundational code and methods + * + */ + +public abstract class SliderTestBase extends SliderTestUtils { + + /** + * Singleton metric registry + */ + public static final MetricsAndMonitoring metrics = new MetricsAndMonitoring(); + public static final int WEB_STARTUP_TIME = 30000; + + @Rule + public TestName methodName = new TestName(); + + @BeforeClass + public static void nameThread() { + Thread.currentThread().setName("JUnit"); + } + + @Before + public void setup() throws Exception { + sliderClientClassName = DEFAULT_SLIDER_CLIENT; + FileUtil.fullyDelete(new File(SliderXMLConfKeysForTesting + .TEST_SECURITY_DIR)); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/SliderTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/SliderTestUtils.java new file mode 100644 index 0000000..ae4096a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/SliderTestUtils.java @@ -0,0 +1,1469 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.test; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectReader; +import com.fasterxml.jackson.databind.ObjectWriter; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonParser; +import com.sun.jersey.api.client.Client; +import com.sun.jersey.api.client.config.ClientConfig; +import com.sun.jersey.api.client.config.DefaultClientConfig; +import com.sun.jersey.api.json.JSONConfiguration; +import com.sun.jersey.client.urlconnection.URLConnectionClientHandler; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.service.ServiceStateException; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.webapp.ForbiddenException; +import org.apache.hadoop.yarn.webapp.NotFoundException; +import org.apache.slider.api.ClusterDescription; +import org.apache.slider.api.ClusterNode; +import org.apache.slider.api.RoleKeys; +import org.apache.slider.api.StateValues; +import org.apache.slider.api.StatusKeys; +import org.apache.slider.client.SliderClient; +import org.apache.slider.common.params.Arguments; +import org.apache.slider.common.tools.Duration; +import org.apache.slider.common.tools.SliderUtils; +import org.apache.slider.core.conf.AggregateConf; +import org.apache.slider.core.exceptions.BadClusterStateException; +import org.apache.slider.core.exceptions.SliderException; +import org.apache.slider.core.exceptions.WaitTimeoutException; +import org.apache.slider.core.main.LauncherExitCodes; +import org.apache.slider.core.main.ServiceLaunchException; +import org.apache.slider.core.main.ServiceLauncher; +import org.apache.slider.core.registry.docstore.PublishedConfigSet; +import org.apache.slider.core.registry.docstore.PublishedConfiguration; +import org.apache.slider.core.restclient.UgiJerseyBinding; +import org.apache.slider.core.restclient.UrlConnectionOperations; +import org.apache.slider.server.services.workflow.ForkedProcessService; +import org.junit.Assert; +import org.junit.Assume; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.UnsupportedEncodingException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.TimeoutException; + +import static org.apache.slider.common.params.Arguments.ARG_OPTION; + +/** + * Static utils for tests in this package and in other test projects. + * + * It is designed to work with mini clusters as well as remote ones + * + * This class is not final and may be extended for test cases. + * + * Some of these methods are derived from the SwiftUtils and SwiftTestUtils + * classes -replicated here so that they are available in Hadoop-2.0 code + */ +public class SliderTestUtils extends Assert { + protected static final Logger log = + LoggerFactory.getLogger(SliderTestUtils.class); + public static final String DEFAULT_SLIDER_CLIENT = SliderClient.class + .getName(); + public static String sliderClientClassName = DEFAULT_SLIDER_CLIENT; + + public static final Map EMPTY_MAP = Collections.emptyMap(); + public static final Map EMPTY_INT_MAP = Collections.emptyMap(); + public static final List EMPTY_LIST = Collections.emptyList(); + + public static final ObjectReader OBJECT_READER; + public static final ObjectWriter OBJECT_WRITER; + + static { + ObjectMapper mapper = new ObjectMapper(); + OBJECT_READER = mapper.reader(Object.class); + OBJECT_WRITER = mapper.writer(); + } + + public static interface Action { + public Object invoke() throws ServiceLaunchException, Exception; + } + + public static interface Probe { + public Outcome invoke(Map args) throws ServiceLaunchException, Exception; + } + + public static void describe(String s) { + log.info(""); + log.info("==============================="); + log.info(s); + log.info("==============================="); + log.info(""); + } + + /** + * Convert a JSON string to something readable + * @param json + * @return a string for printing + */ + public static String prettyPrintJson(String json) { + Gson gson = new GsonBuilder().setPrettyPrinting().create(); + return gson.toJson(new JsonParser().parse(json)); + } + + /** + * Convert an object to something readable + * @param src + * @return a string for printing + */ + public static String prettyPrintAsJson(Object src) + throws JsonProcessingException, UnsupportedEncodingException { + return new String(OBJECT_WRITER.writeValueAsBytes(src), "UTF8"); + } + + /** + * Skip the test with a message + * @param message message logged and thrown + */ + public static void skip(String message) { + log.warn("Skipping test: {}", message); + Assume.assumeTrue(message, false); + } + + /** + * Skip the test with a message if condition holds + * @param condition predicate + * @param message message logged and thrown + */ + public static void assume(boolean condition, String message) { + if (!condition) { + skip(message); + } + } + + /** + * Skip a test if not running on Windows + */ + public static void assumeWindows() { + assume(Shell.WINDOWS, "not windows"); + } + + /** + * Skip a test if running on Windows + */ + public static void assumeNotWindows() { + assume(!Shell.WINDOWS, "windows"); + } + + /** + * skip a test on windows + */ + public static void skipOnWindows() { + assumeNotWindows(); + } + + /** + * Equality size for a list + * @param left + * @param right + */ + public static void assertListEquals(List left, List right) { + String lval = collectionToString(left); + String rval = collectionToString(right); + String text = "comparing " + lval + " to " + rval; + assertEquals(text, left.size(), right.size()); + for (int i = 0; i < left.size(); i++) { + assertEquals(text, left.get(i), right.get(i)); + } + } + + /** + * Assert a list has a given length + * @param list list + * @param size size to have + */ + public static void assertListLength(List list, int size) { + String lval = collectionToString(list); + assertEquals(lval, size, list.size()); + } + + /** + * Stringify a collection with [ ] at either end + * @param collection collection + * @return string value + */ + public static String collectionToString(List collection) { + return "[" + SliderUtils.join(collection, ", ", false) + "]"; + } + + /** + * Assume that a string option is set and not equal to "" + * @param conf configuration file + * @param key key to look for + */ + public static void assumeStringOptionSet(Configuration conf, String key) { + if (SliderUtils.isUnset(conf.getTrimmed(key))) { + skip("Configuration key $key not set"); + } + } + + /** + * assert that a string option is set and not equal to "" + * @param conf configuration file + * @param key key to look for + */ + public static void assertStringOptionSet(Configuration conf, String key) { + getRequiredConfOption(conf, key); + } + + /** + * Assume that a boolean option is set and true. + * Unset or false triggers a test skip + * @param conf configuration file + * @param key key to look for + */ + public static void assumeBoolOptionTrue(Configuration conf, String key) { + assumeBoolOption(conf, key, false); + } + + /** + * Assume that a boolean option is true. + * False triggers a test skip + * @param conf configuration file + * @param key key to look for + * @param defval default value if the property is not defined + */ + public static void assumeBoolOption( + Configuration conf, String key, boolean defval) { + assume(conf.getBoolean(key, defval), + "Configuration key $key is false"); + } + + /** + * Get a required config option (trimmed, incidentally). + * Test will fail if not set + * @param conf configuration + * @param key key + * @return the string + */ + public static String getRequiredConfOption(Configuration conf, String key) { + String val = conf.getTrimmed(key); + if (SliderUtils.isUnset(val)) { + fail("Missing configuration option $key"); + } + return val; + } + + /** + * Fails a test because required behavior has not been implemented. + */ + public static void failNotImplemented() { + fail("Not implemented"); + } + + /** + * Assert that any needed libraries being present. On Unix none are needed; + * on windows they must be present + */ + public static void assertNativeLibrariesPresent() { + String errorText = SliderUtils.checkForRequiredNativeLibraries(); + if (SliderUtils.isSet(errorText)) { + fail(errorText); + } + } + + /** + * Wait for the cluster live; fail if it isn't within the (standard) timeout + * @param sliderClient client + * @return the app report of the live cluster + */ + public static ApplicationReport waitForClusterLive( + SliderClient sliderClient, + int goLiveTime) throws IOException, YarnException { + ApplicationReport report = sliderClient.monitorAppToRunning( + new Duration(goLiveTime)); + assertNotNull( + "Cluster did not go live in the time $goLiveTime", + report); + return report; + } + + protected static String[] toArray(List args) { + String[] converted = new String[args.size()]; + for (int i = 0; i < args.size(); i++) { + Object elt = args.get(i); + assertNotNull(args.get(i)); + converted[i] = elt.toString(); + } + return converted; + } + + public static void waitWhileClusterLive(SliderClient client, int timeout) + throws IOException, YarnException { + Duration duration = new Duration(timeout); + duration.start(); + while (client.actionExists(client.getDeployedClusterName(), true) == + LauncherExitCodes.EXIT_SUCCESS && !duration.getLimitExceeded()) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + } + } + if (duration.getLimitExceeded()) { + fail("Cluster " + client.getDeployedClusterName() + " still live after " + + timeout + " ms"); + } + } + + public static void waitUntilClusterLive(SliderClient client, int timeout) + throws IOException, YarnException { + Duration duration = new Duration(timeout); + duration.start(); + while (LauncherExitCodes.EXIT_SUCCESS != client.actionExists( + client.getDeployedClusterName(), true) && + !duration.getLimitExceeded()) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + } + } + if (duration.getLimitExceeded()) { + fail("Cluster " + client.getDeployedClusterName() + " not live after " + + timeout + " ms"); + } + } + + /** + * Spin waiting for the Slider role count to match expected + * @param client client + * @param role role to look for + * @param desiredCount RS count + * @param timeout timeout + */ + public static ClusterDescription waitForRoleCount( + SliderClient client, + String role, + Integer desiredCount, + int timeout) throws IOException, YarnException { + return waitForRoleCount(client, Collections.singletonMap(role, + desiredCount), timeout); + } + + /** + * Spin waiting for the Slider role count to match expected + * @param client client + * @param roles map of roles to look for + * @param timeout timeout + */ + public static ClusterDescription waitForRoleCount( + SliderClient client, + Map roles, + int timeout) throws IOException, YarnException { + String operation = "startup"; + String clustername = client.getDeployedClusterName(); + ClusterDescription status = null; + Duration duration = new Duration(timeout); + duration.start(); + boolean roleCountFound = false; + while (!roleCountFound) { + StringBuilder details = new StringBuilder(); + + boolean timedOut = duration.getLimitExceeded(); + try { + status = client.getClusterDescription(clustername); + roleCountFound = true; + for (Map.Entry entry : roles.entrySet()) { + String role = entry.getKey(); + int desiredCount = entry.getValue(); + List instances = status.instances.get(role); + int instanceCount = instances != null ? instances.size() : 0; + if (instanceCount != desiredCount) { + roleCountFound = false; + } + details.append("[").append(role).append("]: desired: ") + .append(desiredCount).append("; actual: ") + .append(instanceCount).append(" "); + + // call out requested count, as this is a cause of problems on + // overloaded functional test clusters + String requested = status.roles.get(role).get(RoleKeys.ROLE_REQUESTED_INSTANCES); + if (requested != "0") { + details.append("requested: ").append(requested).append(" "); + } + } + if (roleCountFound) { + //successful + log.info("{}: role count as desired: {}", operation, details); + break; + } + } catch (BadClusterStateException e) { + // cluster not live yet; ignore or rethrow + if (timedOut) { + throw e; + } + details.append(e.toString()); + } + if (timedOut) { + duration.finish(); + describe(operation + ": role count not met after " + duration + ": " + details); + log.info(prettyPrintJson(status.toJsonString())); + fail(operation + ": role counts not met after " + duration + ": " + + details.toString() + " in \n" + status); + } + log.debug("Waiting: " + details); + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + } + } + return status; + } + + /** + * Wait for the hbase master to be live (or past it in the lifecycle) + * @param client slider client + * @param spintime time to wait + * @return true if the cluster came out of the sleep time live + * @throws IOException + * @throws SliderException + */ + public static boolean spinForClusterStartup( + SliderClient client, + long spintime, + String role) + throws WaitTimeoutException, IOException, YarnException { + int state = client.waitForRoleInstanceLive(role, spintime); + return state == StateValues.STATE_LIVE; + } + + public static ClusterDescription dumpClusterStatus( + SliderClient client, + String text) throws IOException, YarnException { + ClusterDescription status = client.getClusterDescription(); + dumpClusterDescription(text, status); + return status; + } + + public static List listNodesInRole( + SliderClient client, + String role) throws IOException, YarnException { + return client.listClusterNodesInRole(role); + } + + public static void dumpClusterDescription( + String text, + ClusterDescription status) throws IOException { + describe(text); + log.info(prettyPrintJson(status.toJsonString())); + } + + + public static void dumpClusterDescription(String text, AggregateConf status) { + describe(text); + log.info(status.toString()); + } + + /** + * Fetch the current site config from the Slider AM, from the + * clientProperties field of the ClusterDescription + * @param client client + * @return the site config + */ + public static Configuration fetchClientSiteConfig(SliderClient client) + throws IOException, YarnException { + ClusterDescription status = client.getClusterDescription(); + Configuration siteConf = new Configuration(false); + for (Entry entry : status.clientProperties.entrySet()) { + siteConf.set(entry.getKey(), entry.getValue(), "slider cluster"); + } + return siteConf; + } + + /** + * Fetch a web page + * @param url URL + * @return the response body + */ + + public static String GET(URL url) throws IOException { + return fetchWebPageRaisedErrorCodes(url.toString()); + } + + public static String GET(URL url, String path) throws IOException { + return GET(url.toString(), path); + } + + public static String GET(String base, String path) throws IOException { + String s = appendToURL(base, path); + return GET(s); + } + + public static String GET(String s) throws IOException { + return fetchWebPageRaisedErrorCodes(s); + } + + public static String appendToURL(String base, String path) { + return SliderUtils.appendToURL(base, path); + } + + public static String appendToURL(String base, String... paths) { + return SliderUtils.appendToURL(base, paths); + } + + /** + * Fetch a web page + * This DOES NOT work with secure connections. + * @param url URL + * @return the response body + */ + + public static String fetchWebPage(String url) + throws IOException { + log.info("GET {}", url); + URL destURL = new URL(url); + HttpURLConnection conn = (HttpURLConnection) destURL.openConnection(); + conn.setRequestMethod("GET"); + conn.setConnectTimeout(10000); + conn.setFollowRedirects(true); + BufferedReader reader = null; + StringBuffer response; + + try { + conn.connect(); + int resultCode = conn.getResponseCode(); + log.info("Result code of {}", resultCode); + reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); + String inputLine; + response = new StringBuffer(); + + while ((inputLine = reader.readLine()) != null) { + response.append(inputLine).append("\n"); + } + } catch (IOException e) { + log.error("Failed on {}: {}", url, e); + throw e; + } finally { + if (reader != null) { + reader.close(); + } + } + + String responseBody = response.toString(); + log.info("Response body:\n{}", responseBody); + return responseBody; + } + + /** + * Fetches a web page asserting that the response code is between 200 and 400. + * This DOES NOT work with secure connections. + *

+ * + * Will error on 400 and 500 series response codes and let 200 and 300 through. + * @param url URL to get as string + * @return body of response + * @throws IOException Network IO problems or exit code >= 400 not specifically handled + * @throws NotFoundException 404 received + * @throws ForbiddenException 401 received + */ + public static String fetchWebPageRaisedErrorCodes(String url) + throws IOException { + assertNotNull(url); + + log.info("Fetching HTTP content at {}", url); + + URL destURL = new URL(url); + assertNotEquals(0, destURL.getPort()); + assertNotNull(destURL.getHost()); + + HttpURLConnection conn = (HttpURLConnection) destURL.openConnection(); + conn.setRequestMethod("GET"); + conn.setConnectTimeout(10000); + conn.setFollowRedirects(true); + BufferedReader reader = null; + StringBuffer response; + int resultCode; + + try { + conn.connect(); + resultCode = conn.getResponseCode(); + log.info("Result code of {}", resultCode); + reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); + String inputLine; + response = new StringBuffer(); + + while ((inputLine = reader.readLine()) != null) { + response.append(inputLine).append("\n"); + } + } catch (IOException e) { + throw NetUtils.wrapException(url, destURL.getPort(), "localhost", 0, e); + } finally { + if (reader != null) { + reader.close(); + } + } + + String body = response.toString(); + uprateFaults("GET", url, resultCode, body); + log.info("Response body:\n{}", body); + return body; + } + + /** + * Generate exceptions from error codes >= 400. Some are converted + * into specific exceptions. + * @param verb HTTP verb + * @param url URL + * @param resultCode result code + * @param body any body + * @throws NotFoundException 404 received + * @throws ForbiddenException 401 received + * @throws IOException any other exit code + */ + public static void uprateFaults( + String verb, + String url, + int resultCode, + String body) throws IOException { + + if (resultCode == 404) { + throw new NotFoundException(url); + } + if (resultCode == 401) { + throw new ForbiddenException(url); + } + if (!(resultCode >= 200 && resultCode < 400)) { + String bodyDetails = (body == null ? + "(no body)" : + "body length " + body.length() + ":\n:" + body); + String message = verb + " to " + url + " failed with exit code " + + resultCode + "; " + bodyDetails; + + log.error(message); + throw new IOException(message); + } + } + + /** + * Execute any operation provided as a closure which returns a string, swallowing exceptions until + * eventually they time out. + * + * @param timeout + * @param operation + * @return + */ + public static String execOperation(int timeout, Action operation) + throws Exception { + Duration duration = new Duration(timeout).start(); + Exception ex = new IOException("limit exceeded before starting"); + while (!duration.getLimitExceeded()) { + try { + Object result = operation.invoke(); + return result.toString(); + } catch (Exception e) { + ex = e; + try { + Thread.sleep(1000); + } catch (InterruptedException e1) { + } + } + } + // timeout + throw ex; + } + + /** + * Static factory for URL connections + */ + static UrlConnectionOperations connectionOperations; + static UgiJerseyBinding jerseyBinding; + + /** + * Static initializer of the connection operations + * @param conf config + */ + public static synchronized void initHttpTestSupport(Configuration conf) { + connectionOperations = new UrlConnectionOperations(conf); + jerseyBinding = new UgiJerseyBinding(connectionOperations); + } + + /** + * Check for the HTTP support being initialized + */ + public static synchronized void assertHttpSupportInitialized() { + assertNotNull(connectionOperations); + assertNotNull(jerseyBinding); + } + + /** + * Create Jersey client with UGI integration + * @return + */ + public static Client createUGIJerseyClient() { + assertHttpSupportInitialized(); + ClientConfig clientConfig = createJerseyClientConfig(); + return new Client(jerseyBinding.getHandler(), clientConfig); + } + + /** + * Create Jersey client with URL handling by way + * of the java.net classes. This DOES NOT have any SPNEGO + * integration. If used to query a secure cluster via the + * RM Proxy, it MUST fail. + * @return a basic Jersey client + */ + public static Client createBasicJerseyClient() { + ClientConfig clientConfig = createJerseyClientConfig(); + return new Client(new URLConnectionClientHandler(), + clientConfig); + } + + /** + * Create a jersey client config with the settings needed for tests + * (e.g. POJO mappings) + * @return a client config + */ + public static ClientConfig createJerseyClientConfig() { + ClientConfig clientConfig = new DefaultClientConfig(); + clientConfig.getFeatures().put(JSONConfiguration.FEATURE_POJO_MAPPING, + Boolean.TRUE); + return clientConfig; + } + + /** + * Assert that a service operation succeeded + * @param service service + */ + public static void assertSucceeded(ServiceLauncher service) { + assertEquals(0, service.getServiceExitCode()); + } + + public static void assertContainersLive(ClusterDescription clusterDescription, + String component, int expected) { + log.info("Asserting component {} expected count {}", component, expected); + int actual = extractLiveContainerCount(clusterDescription, component); + if (expected != actual) { + log.warn("{} actual={}, expected {} in \n{}\n", component, actual, + expected, clusterDescription); + } + assertEquals(expected, actual); + } + + /** + * Robust extraction of live container count + * @param clusterDescription status + * @param component component to resolve + * @return the number of containers live. + */ + public static int extractLiveContainerCount( + ClusterDescription clusterDescription, + String component) { + Map stats = clusterDescription.statistics.get(component); + int actual = 0; + if (stats != null && stats.containsKey(StatusKeys.STATISTICS_CONTAINERS_LIVE)) { + actual = stats.get(StatusKeys.STATISTICS_CONTAINERS_LIVE); + } + return actual; + } + + /** + * Exec a set of commands, wait a few seconds for it to finish. + * @param status code + * @param commands + * @return the process + */ + public static ForkedProcessService exec(int status, List commands) + throws IOException, TimeoutException { + ForkedProcessService process = exec(commands); + + Integer exitCode = process.getExitCode(); + assertNotNull(exitCode); + assertEquals(status, exitCode.intValue()); + return process; + } + + /** + * Exec a set of commands, wait a few seconds for it to finish. + * @param commands + * @return + */ + public static ForkedProcessService exec(List commands) + throws IOException, TimeoutException { + ForkedProcessService process; + process = new ForkedProcessService( + commands.get(0), + EMPTY_MAP, + commands); + process.init(new Configuration()); + process.start(); + int timeoutMillis = 5000; + if (!process.waitForServiceToStop(timeoutMillis)) { + throw new TimeoutException( + "Process did not stop in " + timeoutMillis + "mS"); + } + return process; + } + + /** + * Does an application exist? Run the commands and if the + * operation fails with a FileNotFoundException, then + * this method returns false. + *

+ * Run something harmless like a -version command, something + * which must return 0 + * + * @param commands + * @return true if the command sequence succeeded + * false if they failed with no file + * @throws Exception on any other failure cause + */ + public static boolean doesAppExist(List commands) + throws IOException, TimeoutException { + try { + exec(0, commands); + return true; + } catch (ServiceStateException e) { + if (!(e.getCause() instanceof FileNotFoundException)) { + throw e; + } + return false; + } + } + + /** + * Locate an executable on the path + * @param exe executable name. If it is an absolute path which + * exists then it will returned direct + * @return the path to an exe or null for no match + */ + public static File locateExecutable(String exe) { + File exeNameAsPath = new File(exe).getAbsoluteFile(); + if (exeNameAsPath.exists()) { + return exeNameAsPath; + } + + File exepath = null; + String path = extractPath(); + String[] dirs = path.split(System.getProperty("path.separator")); + for (String dirname : dirs) { + File dir = new File(dirname); + + File possible = new File(dir, exe); + if (possible.exists()) { + exepath = possible; + } + } + return exepath; + } + + /** + * Lookup the PATH env var + * @return the path or null + */ + public static String extractPath() { + return extractEnvVar("PATH"); + } + + /** + * Find an environment variable. Uses case independent checking for + * the benefit of windows. + * Will fail if the var is not found. + * @param var path variable in upper case + * @return the env var + */ + public static String extractEnvVar(String var) { + String realkey = ""; + + for (String it : System.getenv().keySet()) { + if (it.toUpperCase(Locale.ENGLISH).equals(var)) { + realkey = it; + } + } + + if (SliderUtils.isUnset(realkey)) { + fail("No environment variable " + var + " found"); + } + String val = System.getenv(realkey); + + log.info("{} = {}", realkey, val); + return val; + } + + /** + * Create a temp JSON file. After coming up with the name, the file + * is deleted + * @return the filename + */ + public static File createTempJsonFile() throws IOException { + return tmpFile(".json"); + } + + /** + * Create a temp file with the specific name. It's deleted after creation, + * to avoid "file exists exceptions" + * @param suffix suffix, e.g. ".txt" + * @return a path to a file which may be created + */ + public static File tmpFile(String suffix) throws IOException { + File reportFile = File.createTempFile( + "temp", + suffix, + new File("target")); + reportFile.delete(); + return reportFile; + } + + /** + * Execute a closure, assert it fails with a given exit code and text + * @param exitCode exit code + * @param text text (can be "") + * @param action action + * @return + */ + public void assertFailsWithException(int exitCode, + String text, + Action action) throws Exception { + try { + action.invoke(); + fail("Operation was expected to fail —but it succeeded"); + } catch (ServiceLaunchException e) { + assertExceptionDetails(e, exitCode, text); + } + } + + /** + * Execute a closure, assert it fails with a given exit code and text + * @param text text (can be "") + * @param action action + * @return + */ + public void assertFailsWithExceptionClass(Class clazz, + String text, + Action action) throws Exception { + try { + action.invoke(); + fail("Operation was expected to fail —but it succeeded"); + } catch (Exception e) { + assertExceptionDetails(e, clazz, text); + } + } + + public static void assertExceptionDetails( + ServiceLaunchException ex, + int exitCode) { + assertExceptionDetails(ex, exitCode, null); + } + + /** + * Make an assertion about the exit code of an exception + * @param ex exception + * @param exitCode exit code + * @param text error text to look for in the exception + */ + public static void assertExceptionDetails( + ServiceLaunchException ex, + int exitCode, + String text) { + if (exitCode != ex.getExitCode()) { + String message = String.format("Wrong exit code, expected %d but" + + " got %d in %s", exitCode, ex.getExitCode(), ex); + log.warn(message, ex); + throw new AssertionError(message, ex); + } + if (SliderUtils.isSet(text)) { + if (!(ex.toString().contains(text))) { + String message = String.format("String match for \"%s\"failed in %s", + text, ex); + log.warn(message, ex); + throw new AssertionError(message, ex); + } + } + } + + /** + * Make an assertion about the class of an exception + * @param ex exception + * @param clazz exit code + * @param text error text to look for in the exception + */ + static void assertExceptionDetails( + Exception ex, + Class clazz, + String text) throws Exception { + if (ex.getClass() != clazz) { + throw ex; + } + if (SliderUtils.isSet(text) && !(ex.toString().contains(text))) { + throw ex; + } + } + + /** + * Launch the slider client with the specific args; no validation + * of return code takes place + * @param conf configuration + * @param args arg list + * @return the launcher + */ + protected static ServiceLauncher execSliderCommand( + Configuration conf, + List args) throws Throwable { + ServiceLauncher serviceLauncher = + new ServiceLauncher(sliderClientClassName); + + log.debug("slider {}", SliderUtils.join(args, " ", false)); + serviceLauncher.launchService(conf, + toArray(args), + false); + return serviceLauncher; + } + + /** + * Launch a slider command to a given exit code. + * Most failures will trigger exceptions; this is for the exit code of the runService() + * call. + * @param exitCode desired exit code + * @param conf configuration + * @param args arg list + * @return the launcher + */ + protected static ServiceLauncher execSliderCommand( + int exitCode, + Configuration conf, + List args) throws Throwable { + ServiceLauncher serviceLauncher = execSliderCommand(conf, + args); + assertEquals(exitCode, serviceLauncher.getServiceExitCode()); + return serviceLauncher; + } + + public static ServiceLauncher launch(Class serviceClass, + Configuration conf, + List args) throws + Throwable { + ServiceLauncher serviceLauncher = + new ServiceLauncher(serviceClass.getName()); + + String joinedArgs = SliderUtils.join(args, " ", false); + log.debug("slider {}", joinedArgs); + + serviceLauncher.launchService(conf, + toArray(args), + false); + return serviceLauncher; + } + + public static Throwable launchExpectingException(Class serviceClass, + Configuration conf, + String expectedText, + List args) + throws Throwable { + try { + ServiceLauncher launch = launch(serviceClass, conf, args); + throw new AssertionError("Expected an exception with text containing $expectedText "+ + " -but the service completed with exit code ${launch.serviceExitCode}"); + } catch (AssertionError error) { + throw error; + } catch (Throwable thrown) { + if (SliderUtils.isSet(expectedText) && !thrown.toString().contains + (expectedText)) { + //not the right exception -rethrow + log.warn("Caught Exception did not contain expected text" + + "\"" + expectedText + "\""); + throw thrown; + } + return thrown; + } + } + + + public static ServiceLauncher launchClientAgainstRM( + String address, + List args, + Configuration conf) throws Throwable { + assertNotNull(address); + log.info("Connecting to rm at {}", address); + if (!args.contains(Arguments.ARG_MANAGER)) { + args.add(Arguments.ARG_MANAGER); + args.add(address); + } + ServiceLauncher launcher = execSliderCommand(conf, args); + return launcher; + } + + /** + * Add a configuration parameter as a cluster configuration option + * @param extraArgs extra arguments + * @param conf config + * @param option option + */ + public static void addClusterConfigOption( + List extraArgs, + YarnConfiguration conf, + String option) { + + conf.getTrimmed(option); + extraArgs.add(ARG_OPTION); + extraArgs.add(option); + extraArgs.add(getRequiredConfOption(conf, option)); + } + + /** + * Assert that a path refers to a directory + * @param fs filesystem + * @param path path of the directory + * @throws IOException on File IO problems + */ + public static void assertIsDirectory(FileSystem fs, + Path path) throws IOException { + FileStatus fileStatus = fs.getFileStatus(path); + assertIsDirectory(fileStatus); + } + + /** + * Assert that a path refers to a directory + * @param fileStatus stats to check + */ + public static void assertIsDirectory(FileStatus fileStatus) { + assertTrue("Should be a dir -but isn't: " + fileStatus, + fileStatus.isDirectory()); + } + + /** + * Assert that a path exists -but make no assertions as to the + * type of that entry + * + * @param fileSystem filesystem to examine + * @param message message to include in the assertion failure message + * @param path path in the filesystem + * @throws IOException IO problems + */ + public static void assertPathExists( + FileSystem fileSystem, + String message, + Path path) throws IOException { + if (!fileSystem.exists(path)) { + //failure, report it + fail( + message + ": not found \"" + path + "\" in " + path.getParent() + + "-" + + ls(fileSystem, path.getParent())); + } + } + + /** + * Assert that a path does not exist + * + * @param fileSystem filesystem to examine + * @param message message to include in the assertion failure message + * @param path path in the filesystem + * @throws IOException IO problems + */ + public static void assertPathDoesNotExist( + FileSystem fileSystem, + String message, + Path path) throws IOException { + try { + FileStatus status = fileSystem.getFileStatus(path); + // a status back implies there is a file here + fail(message + ": unexpectedly found " + path + " as " + status); + } catch (FileNotFoundException expected) { + //this is expected + + } + } + + /** + * Assert that a FileSystem.listStatus on a dir finds the subdir/child entry + * @param fs filesystem + * @param dir directory to scan + * @param subdir full path to look for + * @throws IOException IO probles + */ + public static void assertListStatusFinds(FileSystem fs, + Path dir, + Path subdir) throws IOException { + FileStatus[] stats = fs.listStatus(dir); + boolean found = false; + StringBuilder builder = new StringBuilder(); + for (FileStatus stat : stats) { + builder.append(stat.toString()).append('\n'); + if (stat.getPath().equals(subdir)) { + found = true; + } + } + assertTrue("Path " + subdir + + " not found in directory " + dir + ":" + builder, + found); + } + + /** + * List a a path to string + * @param fileSystem filesystem + * @param path directory + * @return a listing of the filestatuses of elements in the directory, one + * to a line, precedeed by the full path of the directory + * @throws IOException connectivity problems + */ + public static String ls(FileSystem fileSystem, Path path) + throws + IOException { + if (path == null) { + //surfaces when someone calls getParent() on something at the top of the path + return "/"; + } + FileStatus[] stats; + String pathtext = "ls " + path; + try { + stats = fileSystem.listStatus(path); + } catch (FileNotFoundException e) { + return pathtext + " -file not found"; + } catch (IOException e) { + return pathtext + " -failed: " + e; + } + return pathtext + fileStatsToString(stats, "\n"); + } + + /** + * Take an array of filestats and convert to a string (prefixed w/ a [01] counter + * @param stats array of stats + * @param separator separator after every entry + * @return a stringified set + */ + public static String fileStatsToString(FileStatus[] stats, String separator) { + StringBuilder buf = new StringBuilder(stats.length * 128); + for (int i = 0; i < stats.length; i++) { + buf.append(String.format("[%02d] %s", i, stats[i])).append(separator); + } + return buf.toString(); + } + + public static void waitWhileClusterLive(SliderClient sliderClient) + throws IOException, YarnException { + waitWhileClusterLive(sliderClient, 30000); + } + + public static void dumpRegistryInstances( + Map instances) { + describe("service registry slider instances"); + for (Entry it : instances.entrySet()) { + log.info(" {} : {}", it.getKey(), it.getValue()); + } + describe("end list service registry slider instances"); + } + + + public static void dumpRegistryInstanceIDs(List instanceIds) { + describe("service registry instance IDs"); + dumpCollection(instanceIds); + } + + public static void dumpRegistryServiceTypes(Collection entries) { + describe("service registry types"); + dumpCollection(entries); + } + + public static void dumpCollection(Collection entries) { + log.info("number of entries: {}", entries.size()); + for (V it : entries) { + log.info(it.toString()); + } + } + + public static void dumpArray(Object[] entries) { + log.info("number of entries: {}", entries.length); + for (Object it : entries) { + log.info(it.toString()); + } + } + + public static void dumpMap(Map map) { + for (Entry it : map.entrySet()) { + log.info("\"{}\": \"{}\"", it.getKey().toString(), it.getValue() + .toString()); + } + } + + /** + * Get a time option in seconds if set, otherwise the default value (also in seconds). + * This operation picks up the time value as a system property if set -that + * value overrides anything in the test file + * @param conf + * @param key + * @param defValMillis + * @return + */ + public static int getTimeOptionMillis( + Configuration conf, + String key, + int defValMillis) { + int val = conf.getInt(key, 0); + val = Integer.getInteger(key, val); + int time = 1000 * val; + if (time == 0) { + time = defValMillis; + } + return time; + } + + public void dumpConfigurationSet(PublishedConfigSet confSet) { + for (String key : confSet.keys()) { + PublishedConfiguration config = confSet.get(key); + log.info("{} -- {}", key, config.description); + } + } + + /** + * Convert a file to a URI suitable for use in an argument + * @param file file + * @return a URI string valid on all platforms + */ + public String toURIArg(File file) { + return file.getAbsoluteFile().toURI().toString(); + } + + /** + * Assert a file exists; fails with a listing of the parent dir + * @param text text for front of message + * @param file file to look for + * @throws FileNotFoundException + */ + public void assertFileExists(String text, File file) + throws FileNotFoundException { + if (!file.exists()) { + File parent = file.getParentFile(); + String[] files = parent.list(); + StringBuilder builder = new StringBuilder(); + builder.append(parent.getAbsolutePath()); + builder.append(":\n"); + for (String name : files) { + builder.append(" "); + builder.append(name); + builder.append("\n"); + } + throw new FileNotFoundException(text + ": " + file + " not found in " + + builder); + } + } + + /** + * Repeat a probe until it succeeds, if it does not execute a failure + * closure then raise an exception with the supplied message + * @param probe probe + * @param timeout time in millis before giving up + * @param sleepDur sleep between failing attempts + * @param args map of arguments to the probe + * @param failIfUnsuccessful if the probe fails after all the attempts + * —should it raise an exception + * @param failureMessage message to include in exception raised + * @param failureHandler closure to invoke prior to the failure being raised + */ + protected void repeatUntilSuccess( + String action, + Probe probe, + int timeout, + int sleepDur, + Map args, + boolean failIfUnsuccessful, + String failureMessage, + Action failureHandler) throws Exception { + log.debug("Probe {} timelimit {}", action, timeout); + if (timeout < 1000) { + fail("Timeout " + timeout + " too low: milliseconds are expected, not " + + "seconds"); + } + int attemptCount = 1; + boolean succeeded = false; + boolean completed = false; + Duration duration = new Duration(timeout); + duration.start(); + while (!completed) { + Outcome outcome = probe.invoke(args); + if (outcome.equals(Outcome.Success)) { + // success + log.debug("Success after {} attempt(s)", attemptCount); + succeeded = true; + completed = true; + } else if (outcome.equals(Outcome.Retry)) { + // failed but retry possible + attemptCount++; + completed = duration.getLimitExceeded(); + if (!completed) { + log.debug("Attempt {} failed", attemptCount); + try { + Thread.sleep(sleepDur); + } catch (InterruptedException e) { + } + } + } else if (outcome.equals(Outcome.Fail)) { + // fast fail + log.debug("Fast fail of probe"); + completed = true; + } + } + if (!succeeded) { + if (duration.getLimitExceeded()) { + log.info("probe timed out after {} and {} attempts", timeout, + attemptCount); + } + if (failureHandler != null) { + failureHandler.invoke(); + } + if (failIfUnsuccessful) { + fail(failureMessage); + } + } + } + + /** + * Get a value from a map; raise an assertion if it is not there + * @param map map to look up + * @param key key + * @return the string value + */ + public String requiredMapValue(Map map, String key) { + assertNotNull(map.get(key)); + return map.get(key).toString(); + } + + public static void assertStringContains(String expected, String text) { + assertNotNull("null text", text); + if (!text.contains(expected)) { + String message = String.format("did not find %s in \"%s\"", expected, + text); + log.error(message); + fail(message); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/TestAssertions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/TestAssertions.java new file mode 100644 index 0000000..851a08f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/TestAssertions.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.test; + +import org.apache.slider.api.ClusterDescription; +import org.apache.slider.api.StatusKeys; +import org.junit.Test; + +import java.io.InputStream; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +/** + * Test for some of the command test base operations + */ +public class TestAssertions { + + public static final String CLUSTER_JSON = "json/cluster.json"; + + @Test + public void testNoInstances() throws Throwable { + ClusterDescription clusterDescription = new ClusterDescription(); + clusterDescription.instances = null; + SliderTestUtils.assertContainersLive(clusterDescription, "example", 0); + } + + @Test + public void testEmptyInstances() throws Throwable { + ClusterDescription clusterDescription = new ClusterDescription(); + SliderTestUtils.assertContainersLive(clusterDescription, "example", 0); + } + + @Test + public void testLiveInstances() throws Throwable { + InputStream stream = getClass().getClassLoader().getResourceAsStream( + CLUSTER_JSON); + assertNotNull("could not load " + CLUSTER_JSON, stream); + ClusterDescription liveCD = ClusterDescription.fromStream(stream); + assertNotNull(liveCD); + SliderTestUtils.assertContainersLive(liveCD, "SLEEP_LONG", 4); + assertEquals((Integer) 1, liveCD.statistics.get("SLEEP_LONG").get( + StatusKeys.STATISTICS_CONTAINERS_ANTI_AFFINE_PENDING)); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/YarnMiniClusterTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/YarnMiniClusterTestBase.java new file mode 100644 index 0000000..6d0f2d4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/YarnMiniClusterTestBase.java @@ -0,0 +1,1083 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.test; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.service.ServiceOperations; +import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.MiniYARNCluster; +import org.apache.slider.api.ClusterNode; +import org.apache.slider.client.SliderClient; +import org.apache.slider.common.SliderExitCodes; +import org.apache.slider.common.SliderXmlConfKeys; +import org.apache.slider.common.params.ActionFreezeArgs; +import org.apache.slider.common.params.Arguments; +import org.apache.slider.common.params.SliderActions; +import org.apache.slider.common.tools.Duration; +import org.apache.slider.common.tools.SliderFileSystem; +import org.apache.slider.common.tools.SliderUtils; +import org.apache.slider.core.exceptions.ErrorStrings; +import org.apache.slider.core.exceptions.SliderException; +import org.apache.slider.core.main.ServiceLauncher; +import org.apache.slider.core.main.ServiceLauncherBaseTest; +import org.apache.slider.server.appmaster.SliderAppMaster; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Map.Entry; + +import static org.apache.slider.test.KeysForTests.*; + +/** + * Base class for mini cluster tests -creates a field for the + * mini yarn cluster + */ +public abstract class YarnMiniClusterTestBase extends ServiceLauncherBaseTest { + protected static final Logger log = + LoggerFactory.getLogger(YarnMiniClusterTestBase.class); + + /** + * Mini YARN cluster only + */ + public static final int CLUSTER_GO_LIVE_TIME = 3 * 60 * 1000; + public static final int CLUSTER_STOP_TIME = 1 * 60 * 1000; + public static final int SIGTERM = -15; + public static final int SIGKILL = -9; + public static final int SIGSTOP = -17; + public static + final String NO_ARCHIVE_DEFINED = "Archive configuration option not set: "; + /** + * RAM for the YARN containers: {@value} + */ + public static final String YRAM = "256"; + public static final String FIFO_SCHEDULER = "org.apache.hadoop.yarn.server" + + ".resourcemanager.scheduler.fifo.FifoScheduler"; + public static final YarnConfiguration SLIDER_CONFIG = + SliderUtils.createConfiguration(); + public static boolean kill_supported; + + static { + SLIDER_CONFIG.setInt(SliderXmlConfKeys.KEY_AM_RESTART_LIMIT, 1); + SLIDER_CONFIG.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 100); + SLIDER_CONFIG.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false); + SLIDER_CONFIG.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false); + SLIDER_CONFIG + .setBoolean(SliderXmlConfKeys.KEY_SLIDER_AM_DEPENDENCY_CHECKS_DISABLED, + true); + SLIDER_CONFIG + .setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 1); + } + + + public int thawWaitTime = DEFAULT_THAW_WAIT_TIME_SECONDS * 1000; + public int freezeWaitTime = DEFAULT_TEST_FREEZE_WAIT_TIME_SECONDS * 1000; + public int sliderTestTimeout = DEFAULT_TEST_TIMEOUT_SECONDS * 1000; + public boolean teardownKillall = DEFAULT_TEARDOWN_KILLALL; + /** + * This is set in a system property + */ + + @Rule + public Timeout testTimeout = new Timeout( + getTimeOptionMillis(getTestConfiguration(), + KEY_TEST_TIMEOUT, + DEFAULT_TEST_TIMEOUT_SECONDS * 1000) + ); + protected MiniDFSCluster hdfsCluster; + protected MiniYARNCluster miniCluster; + protected boolean switchToImageDeploy = false; + protected boolean imageIsRemote = false; + protected URI remoteImageURI; + protected List clustersToTeardown = new ArrayList<>(); + private int clusterCount = 1; + + /** + * Clent side test: validate system env before launch + */ + @BeforeClass + public static void checkClientEnv() throws IOException, SliderException { + SliderUtils.validateSliderClientEnvironment(null); + } + + /** + * Work out if kill is supported + */ + @BeforeClass + public static void checkKillSupport() { + kill_supported = !Shell.WINDOWS; + } + + /** + * Probe for the disks being healthy in a mini cluster. Only the first + * NM is checked + * + * @param miniCluster + */ + public static void assertMiniClusterDisksHealthy( + MiniYARNCluster miniCluster) { + boolean healthy = miniCluster.getNodeManager( + 0).getNodeHealthChecker().getDiskHandler().areDisksHealthy(); + assertTrue("Disks on test cluster unhealthy —may be full", healthy); + } + + /** + * Inner work building the mini dfs cluster + * + * @param name + * @param conf + * @return + */ + public static MiniDFSCluster buildMiniHDFSCluster( + String name, + YarnConfiguration conf) throws IOException { + assertNativeLibrariesPresent(); + + File baseDir = new File("./target/hdfs", name).getAbsoluteFile(); + //use file: to rm it recursively + FileUtil.fullyDelete(baseDir); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); + MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); + + MiniDFSCluster cluster = builder.build(); + return cluster; + } + + public static String buildFsDefaultName(MiniDFSCluster miniDFSCluster) { + if (miniDFSCluster != null) { + return String.format("hdfs://localhost:%d/", + miniDFSCluster.getNameNodePort()); + } else { + return "file:///"; + } + } + + /** + * force kill the application after waiting for + * it to shut down cleanly + * + * @param client client to talk to + * @param waitTime time in milliseconds to wait + * @return the final application report + */ + public static ApplicationReport waitForAppToFinish( + SliderClient client, + int waitTime) throws IOException, YarnException { + ApplicationReport report = client.monitorAppToState( + YarnApplicationState.FINISHED, + new Duration(waitTime)); + if (report == null) { + log.info("Forcibly killing application"); + dumpClusterStatus(client, "final application status"); + //list all the nodes' details + List nodes = listNodesInRole(client, ""); + if (SliderUtils.isEmpty(nodes)) { + log.info("No live nodes"); + } + for (ClusterNode node : nodes) { + log.info(node.toString()); + } + client.forceKillApplication( + "timed out waiting for application to complete"); + report = client.getApplicationReport(); + } + return report; + } + + /** + * Assert that an operation failed because a cluster is in use + * + * @param e exception + */ + public static void assertFailureClusterInUse(SliderException e) { + assertExceptionDetails(e, + SliderExitCodes.EXIT_APPLICATION_IN_USE, + ErrorStrings.E_CLUSTER_RUNNING); + } + + protected String buildClustername(String clustername) { + if (SliderUtils.isSet(clustername)) { + return clustername; + } else { + return createClusterName(); + } + } + + /** + * Create the cluster name from the method name and an auto-incrementing + * counter. + * + * @return a cluster name + */ + protected String createClusterName() { + String base = methodName.getMethodName().toLowerCase(Locale.ENGLISH); + if (clusterCount++ > 1) { + return String.format("%s-%d", base, clusterCount); + } + return base; + } + + @Override + public void setup() throws Exception { + super.setup(); + Configuration testConf = getTestConfiguration(); + thawWaitTime = getTimeOptionMillis(testConf, + KEY_TEST_THAW_WAIT_TIME, + thawWaitTime); + freezeWaitTime = getTimeOptionMillis(testConf, + KEY_TEST_FREEZE_WAIT_TIME, + freezeWaitTime); + sliderTestTimeout = getTimeOptionMillis(testConf, + KEY_TEST_TIMEOUT, + sliderTestTimeout); + teardownKillall = + testConf.getBoolean(KEY_TEST_TEARDOWN_KILLALL, + teardownKillall); + + } + + @After + public void teardown() { + describe("teardown"); + stopRunningClusters(); + stopMiniCluster(); + } + + protected void addToTeardown(SliderClient client) { + clustersToTeardown.add(client); + } + + protected void addToTeardown(ServiceLauncher launcher) { + if (launcher != null) { + SliderClient sliderClient = launcher.getService(); + if (sliderClient != null) { + addToTeardown(sliderClient); + } + } + } + + /** + * Kill any java process with the given grep pattern + * + * @param grepString string to grep for + */ + public int killJavaProcesses(String grepString, int signal) + throws IOException, InterruptedException { + + String[] commandString; + if (!Shell.WINDOWS) { + String killCommand = String.format( + "jps -l| grep %s | awk '{print $1}' | xargs kill %d", grepString, + signal); + log.info("Command command = {}", killCommand); + + commandString = new String[]{"bash", "-c", killCommand}; + } else { + // windows + if (!kill_supported) { + return -1; + } + + /* + "jps -l | grep "String" | awk "{print $1}" | xargs -n 1 taskkill /PID" + */ + String killCommand = String.format( + "jps -l | grep %s | gawk '{print $1}' | xargs -n 1 taskkill /f " + + "/PID", grepString); + commandString = new String[]{"CMD", "/C", killCommand}; + } + + Process command = new ProcessBuilder(commandString).start(); + int exitCode = command.waitFor(); + + logStdOutStdErr(command); + return exitCode; + } + + /** + * Kill all processes which match one of the list of grepstrings + * + * @param greps + * @param signal + */ + public void killJavaProcesses(List greps, int signal) + throws IOException, InterruptedException { + for (String grep : greps) { + killJavaProcesses(grep, signal); + } + } + + protected YarnConfiguration getConfiguration() { + return SLIDER_CONFIG; + } + + /** + * Stop any running cluster that has been added + */ + public void stopRunningClusters() { + for (SliderClient client : clustersToTeardown) { + try { + maybeStopCluster(client, "", "Teardown at end of test case", true); + } catch (Exception e) { + log.warn("While stopping cluster " + e, e); + } + } + } + + public void stopMiniCluster() { + Log commonslog = LogFactory.getLog(YarnMiniClusterTestBase.class); + ServiceOperations.stopQuietly(commonslog, miniCluster); + if (hdfsCluster != null) { + hdfsCluster.shutdown(); + } + } + + /** + * Create and start a minicluster + * + * @param name cluster/test name; if empty one is created from the junit method + * @param conf configuration to use + * @param noOfNodeManagers #of NMs + * @param numLocalDirs #of local dirs + * @param numLogDirs #of log dirs + * @param startHDFS create an HDFS mini cluster + * @return the name of the cluster + */ + protected String createMiniCluster(String name, + YarnConfiguration conf, + int noOfNodeManagers, + int numLocalDirs, + int numLogDirs, + boolean startHDFS) throws IOException { + assertNativeLibrariesPresent(); + conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64); + conf.set(YarnConfiguration.RM_SCHEDULER, FIFO_SCHEDULER); + patchDiskCapacityLimits(conf); + SliderUtils.patchConfiguration(conf); + name = buildClustername(name); + miniCluster = new MiniYARNCluster( + name, + noOfNodeManagers, + numLocalDirs, + numLogDirs); + miniCluster.init(conf); + miniCluster.start(); + // health check + assertMiniClusterDisksHealthy(miniCluster); + if (startHDFS) { + createMiniHDFSCluster(name, conf); + } + return name; + } + + public void patchDiskCapacityLimits(YarnConfiguration conf) { + conf.setFloat( + YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, + 99.0f); + conf.setInt(SliderXmlConfKeys.DFS_NAMENODE_DU_RESERVED_KEY, + 2 * 1024 * 1024); + conf.setBoolean("yarn.nodemanager.disk-health-checker.enable", false); + } + + /** + * Create a mini HDFS cluster and save it to the hdfsClusterField + * + * @param name + * @param conf + */ + public void createMiniHDFSCluster(String name, YarnConfiguration conf) + throws IOException { + hdfsCluster = buildMiniHDFSCluster(name, conf); + } + + /** + * Launch the client with the specific args against the MiniMR cluster + * launcher ie expected to have successfully completed + * + * @param conf configuration + * @param args arg list + * @return the return code + */ + protected ServiceLauncher launchClientAgainstMiniMR( + Configuration conf, + List args) + throws Throwable { + ServiceLauncher launcher = + launchClientNoExitCodeCheck(conf, args); + int exited = launcher.getServiceExitCode(); + if (exited != 0) { + throw new SliderException(exited, "Launch failed with exit code " + exited); + } + return launcher; + } + + /** + * Launch the client with the specific args against the MiniMR cluster + * without any checks for exit codes + * + * @param conf configuration + * @param args arg list + * @return the return code + */ + public ServiceLauncher launchClientNoExitCodeCheck( + Configuration conf, + List args) throws Throwable { + assertNotNull(miniCluster); + return launchClientAgainstRM(getRMAddr(), args, conf); + } + + /** + * Kill all Slider Services. + * + * @param signal + */ + public int killAM(int signal) throws IOException, InterruptedException { + return killJavaProcesses(SliderAppMaster.SERVICE_CLASSNAME_SHORT, signal); + } + + public void logStdOutStdErr(Process p) throws IOException { + try (BufferedReader br = new BufferedReader(new InputStreamReader(p + .getInputStream()))) { + String line = br.readLine(); + while (line != null) { + log.info(line); + line = br.readLine(); + } + } + try (BufferedReader br = new BufferedReader(new InputStreamReader(p + .getErrorStream()))) { + String line = br.readLine(); + while (line != null) { + log.error(line); + line = br.readLine(); + } + } + } + + /** + * List any java process + */ + public void lsJavaProcesses() throws InterruptedException, IOException { + Process bash = new ProcessBuilder("jps", "-v").start(); + bash.waitFor(); + logStdOutStdErr(bash); + } + + public YarnConfiguration getTestConfiguration() { + YarnConfiguration conf = getConfiguration(); + conf.addResource(SLIDER_TEST_XML); + return conf; + } + + protected String getRMAddr() { + assertNotNull(miniCluster); + String addr = miniCluster.getConfig().get(YarnConfiguration.RM_ADDRESS); + assertNotNull(addr != null); + assertNotEquals("", addr); + return addr; + } + + /** + * return the default filesystem, which is HDFS if the miniDFS cluster is + * up, file:// if not + * + * @return a filesystem string to pass down + */ + protected String getFsDefaultName() { + return buildFsDefaultName(hdfsCluster); + } + + protected String getWaitTimeArg() { + return WAIT_TIME_ARG; + } + + protected int getWaitTimeMillis(Configuration conf) { + + return WAIT_TIME * 1000; + } + + public ServiceLauncher createCluster( + String clustername, + boolean deleteExistingData, + boolean blockUntilRunning) + throws Throwable { + return createCluster(clustername, EMPTY_INT_MAP, EMPTY_LIST, + deleteExistingData, blockUntilRunning, EMPTY_MAP); + } + + /** + * Create a cluster + * + * @param clustername cluster name + * @param roles map of rolename to count + * @param extraArgs list of extra args to add to the creation command + * @param deleteExistingData should the data of any existing cluster + * of this name be deleted + * @param blockUntilRunning block until the AM is running + * @param clusterOps map of key=value cluster options to set with the --option arg + * @return launcher which will have executed the command. + */ + public ServiceLauncher createCluster( + String clustername, + Map roles, + List extraArgs, + boolean deleteExistingData, + boolean blockUntilRunning, + Map clusterOps) + throws Throwable { + return createOrBuildCluster( + SliderActions.ACTION_CREATE, + clustername, + roles, + extraArgs, + deleteExistingData, + blockUntilRunning, + clusterOps); + } + + /** + * Create or build a cluster (the action is set by the first verb) + * + * @param action operation to invoke: SliderActions.ACTION_CREATE or SliderActions.ACTION_BUILD + * @param clustername cluster name + * @param roles map of rolename to count + * @param extraArgs list of extra args to add to the creation command + * @param deleteExistingData should the data of any existing cluster + * of this name be deleted + * @param blockUntilRunning block until the AM is running + * @param clusterOps map of key=value cluster options to set with the --option arg + * @return launcher which will have executed the command. + */ + public ServiceLauncher createOrBuildCluster(String action, + String clustername, + Map roles, List extraArgs, + boolean deleteExistingData, + boolean blockUntilRunning, Map clusterOps) + throws Throwable { + assertNotNull(clustername); + assertNotEquals("", clustername); + assertNotNull(miniCluster); + // update action should keep existing data + Configuration config = miniCluster.getConfig(); + if (deleteExistingData && !SliderActions.ACTION_UPDATE.equals(action)) { + FileSystem dfs = FileSystem.get(new URI(getFsDefaultName()), config); + + SliderFileSystem sliderFileSystem = new SliderFileSystem(dfs, config); + Path clusterDir = sliderFileSystem.buildClusterDirPath(clustername); + log.info("deleting instance data at {}", clusterDir); + //this is a safety check to stop us doing something stupid like deleting / + assertTrue(clusterDir.toString().contains("/.slider/")); + rigorousDelete(sliderFileSystem, clusterDir, 60000); + } + + + List componentList = new ArrayList<>(); + for (Entry entry : roles.entrySet()) { + String role = entry.getKey(); + Integer val = entry.getValue(); + log.info("Component {} := {}", role, val); + componentList.add(Arguments.ARG_COMPONENT); + componentList.add(role); + componentList.add(Integer.toString(val)); + } + + List argsList = new ArrayList<>(); + argsList.addAll(Arrays.asList( + action, clustername, + Arguments.ARG_MANAGER, getRMAddr(), + Arguments.ARG_FILESYSTEM, getFsDefaultName(), + Arguments.ARG_DEBUG, + Arguments.ARG_CONFDIR, getConfDir() + )); + if (blockUntilRunning) { + argsList.add(Arguments.ARG_WAIT); + argsList.add(WAIT_TIME_ARG); + } + + argsList.addAll(getExtraCLIArgs()); + argsList.addAll(componentList); + argsList.addAll(getImageCommands()); + + //now inject any cluster options + for (Entry entry : clusterOps.entrySet()) { + argsList.add(Arguments.ARG_OPTION); + argsList.add(entry.getKey()); + argsList.add(entry.getValue()); + } + + if (extraArgs != null) { + argsList.addAll(extraArgs); + } + ServiceLauncher launcher = launchClientAgainstMiniMR( + //config includes RM binding info + new YarnConfiguration(config), + //varargs list of command line params + argsList + ); + assertEquals(0, launcher.getServiceExitCode()); + SliderClient client = launcher.getService(); + if (blockUntilRunning) { + client.monitorAppToRunning(new Duration(CLUSTER_GO_LIVE_TIME)); + } + return launcher; + } + + /** + * Delete with some pauses and backoff; designed to handle slow delete + * operation in windows + */ + public void rigorousDelete( + SliderFileSystem sliderFileSystem, + Path path, long timeout) throws IOException, SliderException { + + if (path.toUri().getScheme() == "file") { + File dir = new File(path.toUri().getPath()); + rigorousDelete(dir, timeout); + } else { + Duration duration = new Duration(timeout); + duration.start(); + FileSystem dfs = sliderFileSystem.getFileSystem(); + boolean deleted = false; + while (!deleted && !duration.getLimitExceeded()) { + dfs.delete(path, true); + deleted = !dfs.exists(path); + if (!deleted) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + log.info("ignoring interrupted sleep"); + } + } + } + } + sliderFileSystem.verifyDirectoryNonexistent(path); + } + + /** + * Delete with some pauses and backoff; designed to handle slow delete + * operation in windows + * + * @param dir dir to delete + * @param timeout timeout in millis + */ + public void rigorousDelete(File dir, long timeout) throws IOException { + Duration duration = new Duration(timeout); + duration.start(); + boolean deleted = false; + while (!deleted && !duration.getLimitExceeded()) { + FileUtils.deleteQuietly(dir); + deleted = !dir.exists(); + if (!deleted) { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + log.info("ignoring interrupted sleep"); + } + } + } + if (!deleted) { + // noisy delete raises an IOE + FileUtils.deleteDirectory(dir); + } + } + + /** + * Add arguments to launch Slider with. + *

+ * Extra arguments are added after standard arguments and before roles. + * + * @return additional arguments to launch Slider with + */ + protected List getExtraCLIArgs() { + return new ArrayList<>(); + } + + public String getConfDir() throws FileNotFoundException { + return getResourceConfDirURI(); + } + + /** + * Get the key for the application + * + * @return + */ + public String getApplicationHomeKey() { + failNotImplemented(); + return null; + } + + /** + * Get the archive path -which defaults to the local one + * + * @return + */ + public String getArchivePath() { + return getLocalArchive(); + } + + /** + * Get the local archive -the one defined in the test configuration + * + * @return a possibly null/empty string + */ + public final String getLocalArchive() { + return getTestConfiguration().getTrimmed(getArchiveKey()); + } + + /** + * Get the key for archives in tests + * + * @return + */ + public String getArchiveKey() { + failNotImplemented(); + return null; + } + + /** + * Merge a k-v pair into a simple k=v string; simple utility + * + * @param key key + * @param val value + * @return the string to use after a -D option + */ + public String define(String key, String val) { + return String.format("%s=%s", key, val); + } + + public void assumeTestEnabled(boolean flag) { + assume(flag, "test disabled"); + } + + public void assumeArchiveDefined() { + String archive = getArchivePath(); + boolean defined = archive != null && archive != ""; + if (!defined) { + log.warn(NO_ARCHIVE_DEFINED + getArchiveKey()); + } + assume(defined, NO_ARCHIVE_DEFINED + getArchiveKey()); + } + + /** + * Assume that application home is defined. This does not check that the + * path is valid -that is expected to be a failure on tests that require + * application home to be set. + */ + public void assumeApplicationHome() { + String applicationHome = getApplicationHome(); + assume(applicationHome != null && applicationHome != "", + "Application home dir option not set " + getApplicationHomeKey()); + } + + public String getApplicationHome() { + return getTestConfiguration().getTrimmed(getApplicationHomeKey()); + } + + public List getImageCommands() { + if (switchToImageDeploy) { + // its an image that had better be defined + assertNotNull(getArchivePath()); + if (!imageIsRemote) { + // its not remote, so assert it exists + File f = new File(getArchivePath()); + assertTrue(f.exists()); + return Arrays.asList(Arguments.ARG_IMAGE, f.toURI().toString()); + } else { + assertNotNull(remoteImageURI); + + // if it is remote, then its whatever the archivePath property refers to + return Arrays.asList(Arguments.ARG_IMAGE, remoteImageURI.toString()); + } + } else { + assertNotNull(getApplicationHome()); + assertTrue(new File(getApplicationHome()).exists()); + return Arrays.asList(Arguments.ARG_APP_HOME, getApplicationHome()); + } + } + + /** + * Start a cluster that has already been defined + * + * @param clustername cluster name + * @param extraArgs list of extra args to add to the creation command + * @param blockUntilRunning block until the AM is running + * @return launcher which will have executed the command. + */ + public ServiceLauncher thawCluster(String clustername, + List extraArgs, boolean blockUntilRunning) + throws Throwable { + assertNotNull(clustername); + assertNotEquals("", clustername); + assertNotNull(miniCluster); + + List argsList = new ArrayList<>(); + argsList.addAll(Arrays.asList( + SliderActions.ACTION_THAW, clustername, + Arguments.ARG_MANAGER, getRMAddr(), + Arguments.ARG_WAIT, WAIT_TIME_ARG, + Arguments.ARG_FILESYSTEM, getFsDefaultName() + )); + argsList.addAll(getExtraCLIArgs()); + + if (extraArgs != null) { + argsList.addAll(extraArgs); + } + ServiceLauncher launcher = launchClientAgainstMiniMR( + //config includes RM binding info + new YarnConfiguration(miniCluster.getConfig()), + //varargs list of command line params + argsList + ); + assertEquals(0, launcher.getServiceExitCode()); + SliderClient client = launcher.getService(); + if (blockUntilRunning) { + client.monitorAppToRunning(new Duration(CLUSTER_GO_LIVE_TIME)); + } + return launcher; + } + + /** + * Get the resource configuration dir in the source tree + * + * @return + */ + public File getResourceConfDir() throws FileNotFoundException { + File f = new File(getTestConfigurationPath()).getAbsoluteFile(); + if (!f.exists()) { + throw new FileNotFoundException( + "Resource configuration directory " + f + " not found"); + } + return f; + } + + public String getTestConfigurationPath() { + failNotImplemented(); + return null; + } + + /** + * get a URI string to the resource conf dir that is suitable for passing down + * to the AM -and works even when the default FS is hdfs + */ + public String getResourceConfDirURI() throws FileNotFoundException { + ; + return getResourceConfDir().getAbsoluteFile().toURI().toString(); + } + + /** + * Log an application report + * + * @param report + */ + public void logReport(ApplicationReport report) { + log.info(SliderUtils.reportToString(report)); + } + + /** + * Log a list of application reports + * + * @param apps + */ + public void logApplications(List apps) { + for (ApplicationReport r : apps) { + logReport(r); + } + } + + /** + * Wait for the cluster live; fail if it isn't within the (standard) timeout + * + * @param client client + * @return the app report of the live cluster + */ + public ApplicationReport waitForClusterLive(SliderClient client) + throws IOException, YarnException { + return waitForClusterLive(client, CLUSTER_GO_LIVE_TIME); + } + + /** + * force kill the application after waiting for + * it to shut down cleanly + * + * @param client client to talk to + * @return the final application report + */ + public ApplicationReport waitForAppToFinish(SliderClient client) + throws IOException, YarnException { + + int waitTime = getWaitTimeMillis(client.getConfig()); + return waitForAppToFinish(client, waitTime); + } + + public int clusterActionFreeze(SliderClient sliderClient, String clustername) + throws IOException, YarnException { + return clusterActionFreeze(sliderClient, clustername, "action stop"); + } + + public int clusterActionFreeze(SliderClient sliderClient, String clustername, + String message) throws IOException, YarnException { + return clusterActionFreeze(sliderClient, clustername, message, false); + } + + /** + * stop the cluster via the stop action -and wait for {@link #CLUSTER_STOP_TIME} + * for the cluster to stop. If it doesn't + * + * @param sliderClient client + * @param clustername cluster + * @return the exit code + */ + public int clusterActionFreeze(SliderClient sliderClient, String clustername, + String message, boolean force) + throws IOException, YarnException { + log.info("Stopping cluster {}: {}", clustername, message); + ActionFreezeArgs freezeArgs = new ActionFreezeArgs(); + freezeArgs.setWaittime(CLUSTER_STOP_TIME); + freezeArgs.message = message; + freezeArgs.force = force; + int exitCode = sliderClient.actionFreeze(clustername, + freezeArgs); + if (exitCode != 0) { + log.warn("Cluster stop failed with error code {}", exitCode); + } + return exitCode; + } + + /** + * Teardown-time cluster termination; will stop the cluster iff the client + * is not null + * + * @param sliderClient client + * @param clustername name of cluster to teardown + * @return + */ + public int maybeStopCluster( + SliderClient sliderClient, + String clustername, + String message, + boolean force) throws IOException, YarnException { + if (sliderClient != null) { + if (SliderUtils.isUnset(clustername)) { + clustername = sliderClient.getDeployedClusterName(); + } + //only stop a cluster that exists + if (SliderUtils.isSet(clustername)) { + return clusterActionFreeze(sliderClient, clustername, message, force); + } + } + return 0; + } + + public String roleMapToString(Map roles) { + StringBuilder builder = new StringBuilder(); + for (Entry entry : roles.entrySet()) { + builder.append(entry.getKey()); + builder.append("->"); + builder.append(entry.getValue()); + builder.append(" "); + } + return builder.toString(); + } + + /** + * Turn on test runs against a copy of the archive that is + * uploaded to HDFS -this method copies up the + * archive then switches the tests into archive mode + */ + public void enableTestRunAgainstUploadedArchive() throws IOException { + Path remotePath = copyLocalArchiveToHDFS(getLocalArchive()); + // image mode + switchToRemoteImageDeploy(remotePath); + } + + /** + * Switch to deploying a remote image + * + * @param remotePath the remote path to use + */ + public void switchToRemoteImageDeploy(Path remotePath) { + switchToImageDeploy = true; + imageIsRemote = true; + remoteImageURI = remotePath.toUri(); + } + + /** + * Copy a local archive to HDFS + * + * @param localArchive local archive + * @return the path of the uploaded image + */ + public Path copyLocalArchiveToHDFS(String localArchive) throws IOException { + assertNotNull(localArchive); + File localArchiveFile = new File(localArchive); + assertTrue(localArchiveFile.exists()); + assertNotNull(hdfsCluster); + Path remoteUnresolvedArchive = new Path(localArchiveFile.getName()); + assertTrue(FileUtil.copy( + localArchiveFile, + hdfsCluster.getFileSystem(), + remoteUnresolvedArchive, + false, + getTestConfiguration())); + Path remotePath = hdfsCluster.getFileSystem().resolvePath( + remoteUnresolvedArchive); + return remotePath; + } + + /** + * Create a SliderFileSystem instance bonded to the running FS. + * The YARN cluster must be up and running already + * + * @return + */ + public SliderFileSystem createSliderFileSystem() + throws URISyntaxException, IOException { + FileSystem dfs = + FileSystem.get(new URI(getFsDefaultName()), getConfiguration()); + SliderFileSystem hfs = new SliderFileSystem(dfs, getConfiguration()); + return hfs; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/YarnZKMiniClusterTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/YarnZKMiniClusterTestBase.java new file mode 100644 index 0000000..c449d6f3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/YarnZKMiniClusterTestBase.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.slider.test; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.slider.common.tools.SliderUtils; +import org.apache.slider.core.zk.BlockingZKWatcher; +import org.apache.slider.core.zk.ZKIntegration; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.apache.slider.test.KeysForTests.USERNAME; + +/** + * Base class for mini cluster tests that use Zookeeper + */ +public abstract class YarnZKMiniClusterTestBase extends YarnMiniClusterTestBase { + + protected MicroZKCluster microZKCluster; + + public void stopMiniCluster() { + super.stopMiniCluster(); + IOUtils.closeStream(microZKCluster); + } + + public ZKIntegration createZKIntegrationInstance(String zkQuorum, + String clusterName, + boolean createClusterPath, + boolean canBeReadOnly, + int timeout) throws IOException, InterruptedException { + int sessionTimeout = ZKIntegration.SESSION_TIMEOUT; + + BlockingZKWatcher watcher = new BlockingZKWatcher(); + ZKIntegration zki = ZKIntegration.newInstance(zkQuorum, + USERNAME, + clusterName, + createClusterPath, + canBeReadOnly, + watcher, + sessionTimeout); + boolean fromCache = zki.init(); + //here the callback may or may not have occurred. + //optionally wait for it + if (timeout > 0 && !fromCache) { + watcher.waitForZKConnection(timeout); + } + //if we get here, the binding worked + log.info("Connected: {}", zki); + return zki; + } + + /** + * Wait for a flag to go true + * @param connectedFlag + */ + public void waitForZKConnection(AtomicBoolean connectedFlag, int timeout) + throws InterruptedException { + synchronized (connectedFlag) { + if (!connectedFlag.get()) { + log.info("waiting for ZK event"); + //wait a bit + connectedFlag.wait(timeout); + } + } + assertTrue(connectedFlag.get()); + } + + /** + * Create and start a minicluster with ZK + * @param name cluster/test name + * @param conf configuration to use + * @param noOfNodeManagers #of NMs + * @param numLocalDirs #of local dirs + * @param numLogDirs #of log dirs + * @param startZK create a ZK micro cluster *THIS IS IGNORED* + * @param startHDFS create an HDFS mini cluster + */ + protected String createMiniCluster(String name, + YarnConfiguration conf, + int noOfNodeManagers, + int numLocalDirs, + int numLogDirs, + boolean startZK, + boolean startHDFS) throws IOException { + if (SliderUtils.isUnset(name)) { + name = methodName.getMethodName(); + } + createMicroZKCluster("-" + name, conf); + conf.setBoolean(RegistryConstants.KEY_REGISTRY_ENABLED, true); + conf.set(RegistryConstants.KEY_REGISTRY_ZK_QUORUM, getZKBinding()); + //now create the cluster + name = super.createMiniCluster(name, conf, noOfNodeManagers, numLocalDirs, numLogDirs, + startHDFS); + + return name; + } + + /** + * Create and start a minicluster + * @param name cluster/test name + * @param conf configuration to use + * @param noOfNodeManagers #of NMs + * @param startZK create a ZK micro cluster + */ + protected String createMiniCluster(String name, + YarnConfiguration conf, + int noOfNodeManagers, + boolean startZK) throws IOException { + return createMiniCluster(name, conf, noOfNodeManagers, 1, 1, startZK, + false); + } + + /** + * Create and start a minicluster with the name from the test method + * @param conf configuration to use + * @param noOfNodeManagers #of NMs + * @param startZK create a ZK micro cluster + */ + protected String createMiniCluster(YarnConfiguration conf, + int noOfNodeManagers, + boolean startZK) throws IOException { + return createMiniCluster("", conf, noOfNodeManagers, 1, 1, startZK, + false); + } + + public void createMicroZKCluster(String name, Configuration conf) { + microZKCluster = new MicroZKCluster(new Configuration(conf)); + microZKCluster.createCluster(name); + } + + public void assertHasZKCluster() { + assertNotNull(microZKCluster); + } + + public String getZKBinding() { + if (microZKCluster == null) { + return "localhost:1"; + } else { + return microZKCluster.zkBindingString; + } + } + + /** + * CLI args include all the ZK bindings needed + * @return + */ + protected List getExtraCLIArgs() { + return Arrays.asList( + "-D", define(RegistryConstants.KEY_REGISTRY_ZK_QUORUM, getZKBinding()) + ); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/json/cluster.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/json/cluster.json new file mode 100644 index 0000000..3251b42 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/json/cluster.json @@ -0,0 +1,266 @@ +{ + "version": "1.0", + "name": "test-aa-sleep", + "type": "agent", + "state": 3, + "createTime": 1455829237388, + "updateTime": 1455829240086, + "originConfigurationPath": "hdfs://slider-3.cluster:8020/user/qe/.slider/cluster/test-aa-sleep/snapshot", + "generatedConfigurationPath": "hdfs://slider-3.cluster:8020/user/qe/.slider/cluster/test-aa-sleep/generated", + "dataPath": "hdfs://slider-3.cluster:8020/user/qe/.slider/cluster/test-aa-sleep/database", + "options": { + "internal.provider.name": "agent", + "internal.container.failure.shortlife": "60000", + "internal.container.failure.threshold": "5", + "site.global.security_enabled": "true", + "zookeeper.quorum": "slider-3.cluster:2181,slider-2.cluster:2181,slider-1.cluster:2181", + "slider.cluster.directory.permissions": "0770", + "internal.generated.conf.path": "hdfs://slider-3.cluster:8020/user/qe/.slider/cluster/test-aa-sleep/generated", + "env.MALLOC_ARENA_MAX": "4", + "internal.tmp.dir": "hdfs://slider-3.cluster:8020/user/qe/.slider/cluster/test-aa-sleep/tmp", + "slider.data.directory.permissions": "0770", + "internal.am.tmp.dir": "hdfs://slider-3.cluster:8020/user/qe/.slider/cluster/test-aa-sleep/tmp/appmaster", + "internal.snapshot.conf.path": "hdfs://slider-3.cluster:8020/user/qe/.slider/cluster/test-aa-sleep/snapshot", + "application.def": "hdfs://slider-3.cluster:8020/user/qe/.slider/cluster/test-aa-sleep/appdef/appPkg.zip", + "zookeeper.hosts": "slider-3.cluster,slider-2.cluster,slider-1.cluster", + "zookeeper.path": "/services/slider/users/qe/test-aa-sleep", + "internal.data.dir.path": "hdfs://slider-3.cluster:8020/user/qe/.slider/cluster/test-aa-sleep/database", + "internal.addons.dir.path": "hdfs://slider-3.cluster:8020/user/qe/.slider/cluster/test-aa-sleep/tmp/addons", + "internal.application.image.path": null, + "internal.appdef.dir.path": "hdfs://slider-3.cluster:8020/user/qe/.slider/cluster/test-aa-sleep/tmp/appdef", + "site.fs.defaultFS": "hdfs://slider-3.cluster:8020", + "internal.application.home": null, + "site.dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM", + "site.fs.default.name": "hdfs://slider-3.cluster:8020", + "application.name": "test-aa-sleep" + }, + "info": { + "yarn.vcores": "1", + "info.am.app.id": "application_1455824487784_0020", + "yarn.memory": "10240", + "info.am.web.url": "http://slider-3.cluster:1025/", + "info.am.rpc.port": "1024", + "info.am.hostname": "slider-3.cluster", + "info.am.web.port": "1025", + "info.am.container.id": "container_e01_1455824487784_0020_01_000001", + "info.am.attempt.id": "appattempt_1455824487784_0020_000001", + "live.time": "18 Feb 2016 21:00:37 GMT", + "live.time.millis": "1455829237388", + "create.time": "18 Feb 2016 21:00:37 GMT", + "create.time.millis": "1455829237388", + "containers.at.am-restart": "0", + "status.time": "18 Feb 2016 21:02:38 GMT", + "status.time.millis": "1455829358648", + "info.am.agent.status.url": "https://slider-3.cluster:46914/", + "info.am.agent.ops.url": "https://slider-3.cluster:39678/", + "info.am.agent.ops.port": "39678", + "info.am.agent.status.port": "46914" + }, + "statistics": { + "SLEEP_100": { + "containers.failed.preempted": 0, + "containers.completed": 0, + "containers.failed": 0, + "containers.requested": 1, + "containers.desired": 1, + "containers.start.failed": 0, + "containers.failed.node": 0, + "containers.start.started": 0, + "containers.active.requests": 0, + "containers.failed.recently": 0, + "containers.live": 1, + "containers.anti-affine.pending": 0 + }, + "SLEEP_LONG": { + "containers.failed.preempted": 0, + "containers.completed": 0, + "containers.failed": 0, + "containers.requested": 4, + "containers.desired": 5, + "containers.start.failed": 0, + "containers.failed.node": 0, + "containers.start.started": 0, + "containers.active.requests": 0, + "containers.failed.recently": 0, + "containers.live": 4, + "containers.anti-affine.pending": 1 + }, + "slider-appmaster": { + "containers.completed": 0, + "containers.failed": 0, + "containers.unknown.completed": 0, + "containers.surplus": 0, + "containers.start.failed": 0, + "containers.start.started": 5, + "containers.live": 6 + } + }, + "instances": { + "SLEEP_100": [ + "container_e01_1455824487784_0020_01_000003" + ], + "SLEEP_LONG": [ + "container_e01_1455824487784_0020_01_000005", + "container_e01_1455824487784_0020_01_000004", + "container_e01_1455824487784_0020_01_000006", + "container_e01_1455824487784_0020_01_000002" + ], + "slider-appmaster": [ + "container_e01_1455824487784_0020_01_000001" + ] + }, + "roles": { + "slider-appmaster": { + "yarn.vcores": "1", + "yarn.memory": "256", + "role.releasing.instances": "0", + "role.failed.node.instances": "0", + "role.requested.instances": "0", + "role.actual.instances": "1", + "role.failed.recently.instances": "0", + "role.failed.starting.instances": "0", + "yarn.component.instances": "1", + "slider.keytab.principal.name": "qe@EXAMPLE.COM", + "role.failed.preempted.instances": "0", + "role.failed.instances": "0" + } + }, + "clientProperties": {}, + "status": { + "live": { + "SLEEP_100": { + "container_e01_1455824487784_0020_01_000003": { + "name": "container_e01_1455824487784_0020_01_000003", + "role": "SLEEP_100", + "roleId": 1, + "createTime": 1455829241537, + "startTime": 1455829241657, + "released": false, + "host": "slider-3.cluster", + "hostUrl": "http://slider-3.cluster:8042", + "state": 3, + "exitCode": 0, + "command": "python ./infra/agent/slider-agent/agent/main.py --label container_e01_1455824487784_0020_01_000003___SLEEP_100 --zk-quorum slider-3.cluster:2181,slider-2.cluster:2181,slider-1.cluster:2181 --zk-reg-path /registry/users/qe/services/org-apache-slider/test-aa-sleep > /slider-agent.out 2>&1 ; ", + "environment": [ + "LANGUAGE=\"en_US.UTF-8\"", + "PYTHONPATH=\"./infra/agent/slider-agent/\"", + "AGENT_LOG_ROOT=\"\"", + "SLIDER_PASSPHRASE=\"ucaEi2Qyp3dRIz96wbSwU1SMv2SOesoFCAFYJAtGnQ4XqU32t0\"", + "LC_ALL=\"en_US.UTF-8\"", + "AGENT_WORK_ROOT=\"$PWD\"", + "LANG=\"en_US.UTF-8\"" + ] + } + }, + "SLEEP_LONG": { + "container_e01_1455824487784_0020_01_000006": { + "name": "container_e01_1455824487784_0020_01_000006", + "role": "SLEEP_LONG", + "roleId": 3, + "createTime": 1455829247516, + "startTime": 1455829247567, + "released": false, + "host": "slider-5.cluster", + "hostUrl": "http://slider-5.cluster:8042", + "state": 3, + "exitCode": 0, + "command": "python ./infra/agent/slider-agent/agent/main.py --label container_e01_1455824487784_0020_01_000006___SLEEP_LONG --zk-quorum slider-3.cluster:2181,slider-2.cluster:2181,slider-1.cluster:2181 --zk-reg-path /registry/users/qe/services/org-apache-slider/test-aa-sleep > /slider-agent.out 2>&1 ; ", + "environment": [ + "LANGUAGE=\"en_US.UTF-8\"", + "PYTHONPATH=\"./infra/agent/slider-agent/\"", + "AGENT_LOG_ROOT=\"\"", + "SLIDER_PASSPHRASE=\"ucaEi2Qyp3dRIz96wbSwU1SMv2SOesoFCAFYJAtGnQ4XqU32t0\"", + "LC_ALL=\"en_US.UTF-8\"", + "AGENT_WORK_ROOT=\"$PWD\"", + "LANG=\"en_US.UTF-8\"" + ] + }, + "container_e01_1455824487784_0020_01_000004": { + "name": "container_e01_1455824487784_0020_01_000004", + "role": "SLEEP_LONG", + "roleId": 3, + "createTime": 1455829243488, + "startTime": 1455829243538, + "released": false, + "host": "slider-1.cluster", + "hostUrl": "http://slider-1.cluster:8042", + "state": 3, + "exitCode": 0, + "command": "python ./infra/agent/slider-agent/agent/main.py --label container_e01_1455824487784_0020_01_000004___SLEEP_LONG --zk-quorum slider-3.cluster:2181,slider-2.cluster:2181,slider-1.cluster:2181 --zk-reg-path /registry/users/qe/services/org-apache-slider/test-aa-sleep > /slider-agent.out 2>&1 ; ", + "environment": [ + "LANGUAGE=\"en_US.UTF-8\"", + "PYTHONPATH=\"./infra/agent/slider-agent/\"", + "AGENT_LOG_ROOT=\"\"", + "SLIDER_PASSPHRASE=\"ucaEi2Qyp3dRIz96wbSwU1SMv2SOesoFCAFYJAtGnQ4XqU32t0\"", + "LC_ALL=\"en_US.UTF-8\"", + "AGENT_WORK_ROOT=\"$PWD\"", + "LANG=\"en_US.UTF-8\"" + ] + }, + "container_e01_1455824487784_0020_01_000005": { + "name": "container_e01_1455824487784_0020_01_000005", + "role": "SLEEP_LONG", + "roleId": 3, + "createTime": 1455829245506, + "startTime": 1455829245565, + "released": false, + "host": "slider-2.cluster", + "hostUrl": "http://slider-2.cluster:8042", + "state": 3, + "exitCode": 0, + "command": "python ./infra/agent/slider-agent/agent/main.py --label container_e01_1455824487784_0020_01_000005___SLEEP_LONG --zk-quorum slider-3.cluster:2181,slider-2.cluster:2181,slider-1.cluster:2181 --zk-reg-path /registry/users/qe/services/org-apache-slider/test-aa-sleep > /slider-agent.out 2>&1 ; ", + "environment": [ + "LANGUAGE=\"en_US.UTF-8\"", + "PYTHONPATH=\"./infra/agent/slider-agent/\"", + "AGENT_LOG_ROOT=\"\"", + "SLIDER_PASSPHRASE=\"ucaEi2Qyp3dRIz96wbSwU1SMv2SOesoFCAFYJAtGnQ4XqU32t0\"", + "LC_ALL=\"en_US.UTF-8\"", + "AGENT_WORK_ROOT=\"$PWD\"", + "LANG=\"en_US.UTF-8\"" + ] + }, + "container_e01_1455824487784_0020_01_000002": { + "name": "container_e01_1455824487784_0020_01_000002", + "role": "SLEEP_LONG", + "roleId": 3, + "createTime": 1455829241551, + "startTime": 1455829241817, + "released": false, + "host": "slider-3.cluster", + "hostUrl": "http://slider-3.cluster:8042", + "state": 3, + "exitCode": 0, + "command": "python ./infra/agent/slider-agent/agent/main.py --label container_e01_1455824487784_0020_01_000002___SLEEP_LONG --zk-quorum slider-3.cluster:2181,slider-2.cluster:2181,slider-1.cluster:2181 --zk-reg-path /registry/users/qe/services/org-apache-slider/test-aa-sleep > /slider-agent.out 2>&1 ; ", + "environment": [ + "LANGUAGE=\"en_US.UTF-8\"", + "PYTHONPATH=\"./infra/agent/slider-agent/\"", + "AGENT_LOG_ROOT=\"\"", + "SLIDER_PASSPHRASE=\"ucaEi2Qyp3dRIz96wbSwU1SMv2SOesoFCAFYJAtGnQ4XqU32t0\"", + "LC_ALL=\"en_US.UTF-8\"", + "AGENT_WORK_ROOT=\"$PWD\"", + "LANG=\"en_US.UTF-8\"" + ] + } + }, + "slider-appmaster": { + "container_e01_1455824487784_0020_01_000001": { + "name": "container_e01_1455824487784_0020_01_000001", + "role": "slider-appmaster", + "roleId": 0, + "createTime": 1455829237437, + "startTime": 1455829237437, + "released": false, + "host": "slider-3.cluster", + "hostUrl": "http://slider-3.cluster:1025", + "state": 3, + "exitCode": 0 + } + } + } + }, + "liveness": { + "allRequestsSatisfied": false, + "requestsOutstanding": 1, + "activeRequests": 0 + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/log4j.properties new file mode 100644 index 0000000..3adbaa4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/log4j.properties @@ -0,0 +1,66 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# log4j configuration used during build and unit tests + +log4j.rootLogger=INFO,stdout +log4j.threshhold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n + +log4j.appender.subprocess=org.apache.log4j.ConsoleAppender +log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout +log4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n +#log4j.logger.org.apache.slider.yarn.appmaster.SliderAppMasterer.master=INFO,subprocess + +log4j.logger.org.apache.slider=DEBUG +log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG +log4j.logger.org.apache.hadoop.yarn.registry=DEBUG + +#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG +#log4j.logger.org.apache.hadoop.yarn.service=DEBUG +#log4j.logger.org.apache.hadoop.yarn.client=DEBUG + +#crank back on some noise +log4j.logger.org.apache.hadoop.ipc.CallQueueManager=WARN + +log4j.logger.org.apache.hadoop.util.Shell=ERROR +log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR +log4j.logger.org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager=FATAL +org.apache.hadoop.security.authentication.server.AuthenticationFilter=WARN +log4j.logger.org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner=WARN +log4j.logger.org.apache.hadoop.hdfs.server.blockmanagement=WARN +log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN +log4j.logger.org.apache.hadoop.hdfs=WARN +log4j.logger.BlockStateChange=WARN + +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN +log4j.logger.org.apache.zookeeper=WARN +log4j.logger.org.apache.zookeeper.ClientCnxn=FATAL + +log4j.logger.org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl=WARN +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeResourceMonitorImpl=ERROR +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.security=WARN +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher=WARN +log4j.logger.org.apache.hadoop.metrics2=ERROR +log4j.logger.org.apache.hadoop.util.HostsFileReader=WARN +log4j.logger.org.apache.hadoop.yarn.event.AsyncDispatcher=WARN +log4j.logger.org.apache.hadoop.security.token.delegation=WARN +log4j.logger.org.apache.hadoop.yarn.util.AbstractLivelinessMonitor=WARN +log4j.logger.org.apache.hadoop.yarn.server.nodemanager.security=WARN +log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMNMInfo=WARN diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app_configuration-resolved.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app_configuration-resolved.json new file mode 100644 index 0000000..5b90ba9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app_configuration-resolved.json @@ -0,0 +1,42 @@ +{ + "schema": "http://example.org/specification/v2.0.0", + + "global": { + "zookeeper.port": "2181", + "zookeeper.path": "/yarnapps_small_cluster", + "zookeeper.hosts": "zoo1,zoo2,zoo3", + "env.MALLOC_ARENA_MAX": "4", + "site.hbase.master.startup.retainassign": "true", + "site.fs.defaultFS": "hdfs://cluster:8020", + "site.fs.default.name": "hdfs://cluster:8020", + "site.hbase.master.info.port": "0", + "site.hbase.regionserver.info.port": "0" + }, + "components": { + + "worker": { + "zookeeper.port": "2181", + "zookeeper.path": "/yarnapps_small_cluster", + "zookeeper.hosts": "zoo1,zoo2,zoo3", + "env.MALLOC_ARENA_MAX": "4", + "site.hbase.master.startup.retainassign": "true", + "site.fs.defaultFS": "hdfs://cluster:8020", + "site.fs.default.name": "hdfs://cluster:8020", + "site.hbase.master.info.port": "0", + "site.hbase.regionserver.info.port": "0", + "jvm.heapsize": "512M" + }, + "master": { + "zookeeper.port": "2181", + "zookeeper.path": "/yarnapps_small_cluster", + "zookeeper.hosts": "zoo1,zoo2,zoo3", + "env.MALLOC_ARENA_MAX": "4", + "site.hbase.master.startup.retainassign": "true", + "site.fs.defaultFS": "hdfs://cluster:8020", + "site.fs.default.name": "hdfs://cluster:8020", + "site.hbase.master.info.port": "0", + "site.hbase.regionserver.info.port": "0", + "jvm.heapsize": "512M" + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app_configuration.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app_configuration.json new file mode 100644 index 0000000..5690225 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app_configuration.json @@ -0,0 +1,27 @@ +{ + "schema": "http://example.org/specification/v2.0.0", + "metadata": { + "description": "org/apache/slider/core/conf/examples/app_configuration.json" + }, + "global": { + + "zookeeper.port": "2181", + "zookeeper.path": "/yarnapps_small_cluster", + "zookeeper.hosts": "zoo1,zoo2,zoo3", + "env.MALLOC_ARENA_MAX": "4", + "site.hbase.master.startup.retainassign": "true", + "site.fs.defaultFS": "hdfs://cluster:8020", + "site.fs.default.name": "hdfs://cluster:8020", + "site.hbase.master.info.port": "0", + "site.hbase.regionserver.info.port": "0" + }, + "components": { + + "worker": { + "jvm.heapsize": "512M" + }, + "master": { + "jvm.heapsize": "512M" + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app_configuration_tokenized.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app_configuration_tokenized.json new file mode 100644 index 0000000..b902469 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/app_configuration_tokenized.json @@ -0,0 +1,27 @@ +{ + "schema": "http://example.org/specification/v2.0.0", + + "global": { + + "zookeeper.port": "2181", + "zookeeper.path": "/yarnapps_small_cluster", + "zookeeper.hosts": "zoo1,zoo2,zoo3", + "env.MALLOC_ARENA_MAX": "4", + "site.hbase.master.startup.retainassign": "true", + "site.fs.defaultFS": "hdfs://${CLUSTER_NAME}:8020", + "site.fs.default.name": "hdfs://${CLUSTER_NAME}:8020", + "site.hbase.master.info.port": "0", + "site.hbase.regionserver.info.port": "0", + "site.hbase.user_name": "${USER}", + "site.hbase.another.user": "${USER_NAME}" + }, + "components": { + + "worker": { + "jvm.heapsize": "512M" + }, + "master": { + "jvm.heapsize": "512M" + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/empty.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/empty.json new file mode 100644 index 0000000..73f5180 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/empty.json @@ -0,0 +1,8 @@ +{ + "schema": "http://example.org/specification/v2.0.0", + + "global": { + }, + "components": { + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/internal-resolved.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/internal-resolved.json new file mode 100644 index 0000000..da53b94 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/internal-resolved.json @@ -0,0 +1,24 @@ +{ + "schema": "http://example.org/specification/v2.0.0", + + "metadata": { + "description": "Internal resolved - org/apache/slider/core/conf/examples/internal-resolved.json" + }, + "global": { + "application.name": "small_cluster", + "application.type": "hbase", + "application": "hdfs://cluster:8020/apps/hbase/v/1.0.0/application.tar" + }, + "components": { + + "diagnostics": { + "application.name": "small_cluster", + "application.type": "hbase", + "application": "hdfs://cluster:8020/apps/hbase/v/1.0.0/application.tar", + "create.hadoop.deployed.info": "(release-2.3.0) @dfe463", + "create.hadoop.build.info": "2.3.0", + "create.time.millis": "1393512091276", + "create.time": "27 Feb 2014 14:41:31 GMT" + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/internal.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/internal.json new file mode 100644 index 0000000..b628d10 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/internal.json @@ -0,0 +1,24 @@ +{ + "schema": "http://example.org/specification/v2.0.0", + + "metadata": { + "description": "Internal unresolved - org/apache/slider/core/conf/examples/internal.json" + }, + "global": { + "application.name": "small_cluster", + "application.type": "hbase", + "application": "hdfs://cluster:8020/apps/hbase/v/1.0.0/application.tar", + "internal.chaos.monkey.probability.amlaunchfailure": "10000", + "internal.chaos.monkey.interval.seconds": "60", + "internal.chaos.monkey.enabled": "true" + }, + "components": { + + "diagnostics": { + "create.hadoop.deployed.info": "(release-2.3.0) @dfe463", + "create.hadoop.build.info": "2.3.0", + "create.time.millis": "1393512091276", + "create.time": "27 Feb 2014 14:41:31 GMT" + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/overridden-resolved.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/overridden-resolved.json new file mode 100644 index 0000000..edd941f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/overridden-resolved.json @@ -0,0 +1,25 @@ +{ + "schema": "http://example.org/specification/v2.0.0", + + "global": { + "g1": "a", + "g2": "b" + }, + "components": { + "simple": { + "g1": "a", + "g2": "b" + }, + "master": { + "name": "m", + "g1": "overridden", + "g2": "b" + }, + "worker": { + "name": "worker", + "g1": "overridden-by-worker", + "g2": "b", + "timeout": "1000" + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/overridden.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/overridden.json new file mode 100644 index 0000000..8237ad4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/overridden.json @@ -0,0 +1,23 @@ +{ + "schema": "http://example.org/specification/v2.0.0", + + "global": { + "g1": "a", + "g2": "b" + }, + "components": { + "simple": { + }, + "master": { + "name": "m", + "g1": "overridden" + + }, + "worker": { + "name": "worker", + "g1": "overridden-by-worker", + "timeout": "1000" + + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/resources.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/resources.json new file mode 100644 index 0000000..83001e1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/core/conf/examples/resources.json @@ -0,0 +1,25 @@ +{ + "schema": "http://example.org/specification/v2.0.0", + + "metadata": { + "description": "example of a resources file: org/apache/slider/core/conf/examples/resources.json" + }, + + "global": { + "yarn.vcores": "1", + "yarn.memory": "512" + }, + + "components": { + "master": { + "yarn.component.instances": "1", + "yarn.vcores": "1", + "yarn.memory": "1024" + }, + "worker": { + "yarn.component.instances":"5", + "yarn.vcores": "1", + "yarn.memory": "512" + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/appmaster/web/rest/registry/sample.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/appmaster/web/rest/registry/sample.json new file mode 100644 index 0000000..bc6429c --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/appmaster/web/rest/registry/sample.json @@ -0,0 +1,9 @@ +{ + "nodes": ["/users/example/services/org-apache-slider/test-registry-rest-resources/components"], "service": { + "description": "Slider Application Master", + "yarn:id": "application_1411664296263_0001", + "yarn:persistence": 1, + "external": [], + "internal": [] +} +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-3-role.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-3-role.json new file mode 100644 index 0000000..ceab0a5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-3-role.json @@ -0,0 +1,6 @@ +{"entry":{"org.apache.slider.server.avro.RoleHistoryHeader":{"version":1,"saved":1415296260647,"savedx":"149863b1a27","savedate":"6 Nov 2014 17:51:00 GMT","roles":3}}} +{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":false,"last_used":0}}} +{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":false,"last_used":0}}} +{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":0,"active":false,"last_used":0}}} +{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":2,"active":true,"last_used":0}}} +{"entry":{"org.apache.slider.server.avro.RoleHistoryFooter":{"count":4}}} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-6-role.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-6-role.json new file mode 100644 index 0000000..f1c53d5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history-v01-6-role.json @@ -0,0 +1,8 @@ +{"entry":{"org.apache.slider.server.avro.RoleHistoryHeader":{"version":1,"saved":1415296260647,"savedx":"149863b1a27","savedate":"6 Nov 2014 17:51:00 GMT","roles":6}}} +{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":1,"active":false,"last_used":0}}} +{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":2,"active":false,"last_used":0}}} +{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.85","role":0,"active":false,"last_used":0}}} +{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":4,"active":true,"last_used":0}}} +{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":5,"active":true,"last_used":0}}} +{"entry":{"org.apache.slider.server.avro.NodeEntryRecord":{"host":"192.168.1.86","role":6,"active":true,"last_used":0}}} +{"entry":{"org.apache.slider.server.avro.RoleHistoryFooter":{"count":6}}} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history_v01b_1_role.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history_v01b_1_role.json new file mode 100644 index 0000000..67d644f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/resources/org/apache/slider/server/avro/history_v01b_1_role.json @@ -0,0 +1,38 @@ +{ + "entry": { + "org.apache.slider.server.avro.RoleHistoryHeader": { + "version": 1, + "saved": 1450435691617, + "savedx": "151b4b44461", + "savedate": "18 Dec 2015 10:48:11 GMT", + "roles": 2 + } + } +} +{ + "entry": { + "org.apache.slider.server.avro.RoleHistoryMapping": { + "rolemap": { + "echo": 1, + "slider-appmaster": 0 + } + } + } +} +{ + "entry": { + "org.apache.slider.server.avro.NodeEntryRecord": { + "host": "192.168.56.1", + "role": 1, + "active": true, + "last_used": 0 + } + } +} +{ + "entry": { + "org.apache.slider.server.avro.RoleHistoryFooter": { + "count": 1 + } + } +}