diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
index ee77ecb..f02f7bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
@@ -322,16 +322,38 @@
easymock
3.1
test
+
+
+ org.objenesis
+ objenesis
+
+
org.powermock
powermock-api-easymock
- 1.5
+ 1.6.5
test
+ org.powermock
+ powermock-module-junit4
+ 1.6.5
+
+
+ org.javassist
+ javassist
+
+
+ org.objenesis
+ objenesis
+
+
+
+
+
javax.servlet.jsp
jsp-api
runtime
@@ -354,6 +376,13 @@
swagger-annotations
1.5.4
+
+
+ org.apache.hadoop
+ hadoop-minicluster
+ test
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
index ef96c9b..0c9b52c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/zk/ZKIntegration.java
@@ -337,5 +337,11 @@ public static String mkSliderUserPath(String username) {
return SVC_SLIDER_USERS + "/" + username;
}
-
+ /**
+ * Blocking enum of users
+ * @return an unordered list of clusters under a user
+ */
+ public List getClusters() throws KeeperException, InterruptedException {
+ return zookeeper.getChildren(userPath, null);
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
index 9a89c39..c5394fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/security/SecurityConfiguration.java
@@ -17,6 +17,7 @@
package org.apache.slider.server.appmaster.security;
import com.google.common.base.Preconditions;
+import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import static org.apache.slider.core.main.LauncherExitCodes.EXIT_UNAUTHORIZED;
@@ -126,13 +127,14 @@ public String getPrincipal () throws IOException {
}
public boolean isKeytabProvided() {
- boolean keytabProvided = instanceDefinition.getAppConfOperations()
- .getComponent(SliderKeys.COMPONENT_AM)
- .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH) != null ||
- instanceDefinition.getAppConfOperations()
- .getComponent(SliderKeys.COMPONENT_AM).
- get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME) != null;
- return keytabProvided;
+ String keytabLocalPath = instanceDefinition.getAppConfOperations()
+ .getComponent(SliderKeys.COMPONENT_AM)
+ .get(SliderXmlConfKeys.KEY_AM_KEYTAB_LOCAL_PATH);
+ String keytabName = instanceDefinition.getAppConfOperations()
+ .getComponent(SliderKeys.COMPONENT_AM)
+ .get(SliderXmlConfKeys.KEY_AM_LOGIN_KEYTAB_NAME);
+ return StringUtils.isNotBlank(keytabLocalPath)
+ || StringUtils.isNotBlank(keytabName);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index 6f54959..b00a610 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -448,6 +448,11 @@ public RoleHistory getRoleHistory() {
return roleHistory;
}
+ @VisibleForTesting
+ public void setRoleHistory(RoleHistory roleHistory) {
+ this.roleHistory = roleHistory;
+ }
+
/**
* Get the path used for history files
* @return the directory used for history files
@@ -1979,7 +1984,7 @@ private void checkFailureThreshold(RoleStatus role)
role.getName(), failures, threshold);
}
- if (failures > threshold) {
+ if (threshold > 0 && failures > threshold) {
throw new TriggerClusterTeardownException(
SliderExitCodes.EXIT_DEPLOYMENT_FAILED,
FinalApplicationStatus.FAILED, ErrorStrings.E_UNSTABLE_CLUSTER +
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/api/TestRPCBinding.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/api/TestRPCBinding.java
new file mode 100644
index 0000000..635ba68
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/api/TestRPCBinding.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.slider.server.appmaster.rpc.RpcBinder;
+import org.apache.slider.server.appmaster.rpc.SliderClusterProtocolPB;
+import org.junit.Test;
+
+import java.net.InetSocketAddress;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests RPC work
+ */
+public class TestRPCBinding {
+
+ @Test
+ public void testRegistration() throws Throwable {
+ Configuration conf = new Configuration();
+ RpcBinder.registerSliderAPI(conf);
+ assertTrue(RpcBinder.verifyBondedToProtobuf(conf, SliderClusterProtocolPB.class));
+ }
+
+ @Test
+ public void testGetProxy() throws Throwable {
+ Configuration conf = new Configuration();
+ InetSocketAddress saddr= new InetSocketAddress("127.0.0.1",9000);
+ SliderClusterProtocol proxy =
+ RpcBinder.connectToServer(saddr, null, conf, 1000);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBadArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBadArgs.java
new file mode 100644
index 0000000..df95ac2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBadArgs.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.SliderActions;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+import org.apache.slider.core.exceptions.ErrorStrings;
+import org.apache.slider.core.exceptions.UsageException;
+import org.apache.slider.core.main.ServiceLauncherBaseTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+/**
+ * Test the argument parsing/validation logic
+ */
+public class TestClientBadArgs extends ServiceLauncherBaseTest {
+
+ @Test
+ public void testNoAction() throws Throwable {
+ launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "Usage: slider COMMAND",
+ EMPTY_LIST);
+
+ }
+
+ @Test
+ public void testUnknownAction() throws Throwable {
+ launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "not-a-known-action",
+ Arrays.asList("not-a-known-action"));
+ }
+
+ @Test
+ public void testActionWithoutOptions() throws Throwable {
+ launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "Usage: slider build ",
+ Arrays.asList(SliderActions.ACTION_BUILD));
+ }
+
+ @Test
+ public void testActionWithoutEnoughArgs() throws Throwable {
+ launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ ErrorStrings.ERROR_NOT_ENOUGH_ARGUMENTS,
+ Arrays.asList(SliderActions.ACTION_THAW));
+ }
+
+ @Test
+ public void testActionWithTooManyArgs() throws Throwable {
+ launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ ErrorStrings.ERROR_TOO_MANY_ARGUMENTS,
+ Arrays.asList(SliderActions.ACTION_HELP,
+ "hello, world"));
+ }
+
+ @Test
+ public void testBadImageArg() throws Throwable {
+ launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "Unknown option: --image",
+ Arrays.asList(SliderActions.ACTION_HELP,
+ Arguments.ARG_IMAGE));
+ }
+
+ @Test
+ public void testRegistryUsage() throws Throwable {
+ Throwable exception = launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "org.apache.slider.core.exceptions.UsageException: Argument --name missing",
+ Arrays.asList(SliderActions.ACTION_REGISTRY));
+ assertTrue(exception instanceof UsageException);
+ log.info(exception.toString());
+ }
+
+ @Test
+ public void testRegistryExportBadUsage1() throws Throwable {
+ Throwable exception = launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "Expected a value after parameter --getexp",
+ Arrays.asList(SliderActions.ACTION_REGISTRY,
+ Arguments.ARG_NAME,
+ "cl1",
+ Arguments.ARG_GETEXP));
+ assertTrue(exception instanceof BadCommandArgumentsException);
+ log.info(exception.toString());
+ }
+
+ @Test
+ public void testRegistryExportBadUsage2() throws Throwable {
+ Throwable exception = launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "Expected a value after parameter --getexp",
+ Arrays.asList(SliderActions.ACTION_REGISTRY,
+ Arguments.ARG_NAME,
+ "cl1",
+ Arguments.ARG_LISTEXP,
+ Arguments.ARG_GETEXP));
+ assertTrue(exception instanceof BadCommandArgumentsException);
+ log.info(exception.toString());
+ }
+
+ @Test
+ public void testRegistryExportBadUsage3() throws Throwable {
+ Throwable exception = launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "Usage: registry",
+ Arrays.asList(SliderActions.ACTION_REGISTRY,
+ Arguments.ARG_NAME,
+ "cl1",
+ Arguments.ARG_LISTEXP,
+ Arguments.ARG_GETEXP,
+ "export1"));
+ assertTrue(exception instanceof UsageException);
+ log.info(exception.toString());
+ }
+
+ @Test
+ public void testUpgradeUsage() throws Throwable {
+ Throwable exception = launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "org.apache.slider.core.exceptions.BadCommandArgumentsException: Not enough arguments for action: upgrade Expected minimum 1 but got 0",
+ Arrays.asList(SliderActions.ACTION_UPGRADE));
+ assertTrue(exception instanceof BadCommandArgumentsException);
+ log.info(exception.toString());
+ }
+
+ @Test
+ public void testUpgradeWithTemplateOptionOnly() throws Throwable {
+ String appName = "test_hbase";
+ Throwable exception = launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "BadCommandArgumentsException: Option --resources must be specified with option --template",
+ Arrays.asList(SliderActions.ACTION_UPGRADE,
+ appName,
+ Arguments.ARG_TEMPLATE,
+ "/tmp/appConfig.json"
+ ));
+ assertTrue(exception instanceof BadCommandArgumentsException);
+ log.info(exception.toString());
+ }
+
+ public Configuration createTestConfig() {
+ Configuration configuration = new Configuration();
+ configuration.set(YarnConfiguration.RM_ADDRESS, "127.0.0.1:8032");
+ return configuration;
+ }
+
+ @Test
+ public void testUpgradeWithResourcesOptionOnly() throws Throwable {
+ String appName = "test_hbase";
+ Throwable exception = launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "BadCommandArgumentsException: Option --template must be specified with option --resources",
+ Arrays.asList(SliderActions.ACTION_UPGRADE,
+ appName,
+ Arguments.ARG_RESOURCES,
+ "/tmp/resources.json"
+ ));
+ assertTrue(exception instanceof BadCommandArgumentsException);
+ log.info(exception.toString());
+ }
+
+ @Test
+ public void testUpgradeWithTemplateResourcesAndContainersOption() throws Throwable {
+ String appName = "test_hbase";
+ Throwable exception = launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "BadCommandArgumentsException: Option --containers cannot be "
+ + "specified with --template or --resources",
+ Arrays.asList(SliderActions.ACTION_UPGRADE,
+ appName,
+ Arguments.ARG_TEMPLATE,
+ "/tmp/appConfig.json",
+ Arguments.ARG_RESOURCES,
+ "/tmp/resources.json",
+ Arguments.ARG_CONTAINERS,
+ "container_1"
+ ));
+ assertTrue(exception instanceof BadCommandArgumentsException);
+ log.info(exception.toString());
+ }
+
+ @Test
+ public void testUpgradeWithTemplateResourcesAndComponentsOption() throws Throwable {
+ String appName = "test_hbase";
+ Throwable exception = launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "BadCommandArgumentsException: Option --components cannot be "
+ + "specified with --template or --resources",
+ Arrays.asList(SliderActions.ACTION_UPGRADE,
+ appName,
+ Arguments.ARG_TEMPLATE,
+ "/tmp/appConfig.json",
+ Arguments.ARG_RESOURCES,
+ "/tmp/resources.json",
+ Arguments.ARG_COMPONENTS,
+ "HBASE_MASTER"
+ ));
+ assertTrue(exception instanceof BadCommandArgumentsException);
+ log.info(exception.toString());
+ }
+
+ @Test
+ public void testCreateAppWithAddonPkgBadArg1() throws Throwable {
+ //add on package without specifying add on package name
+ Throwable exception = launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "Expected 2 values after --addon",
+ Arrays.asList(SliderActions.ACTION_CREATE,
+ "cl1",
+ Arguments.ARG_ADDON,
+ "addon1"));
+ assertTrue(exception instanceof BadCommandArgumentsException);
+ log.info(exception.toString());
+ }
+
+ @Test
+ public void testNodesMissingFile() throws Throwable {
+ Throwable exception = launchExpectingException(SliderClient.class,
+ createTestConfig(),
+ "after parameter --out",
+ Arrays.asList(SliderActions.ACTION_NODES, Arguments.ARG_OUTPUT));
+ assertTrue(exception instanceof BadCommandArgumentsException);
+ }
+
+ @Test
+ public void testFlexWithNoComponents() throws Throwable {
+ Throwable exception = launchExpectingException(SliderClient.class,
+ new Configuration(),
+ "Usage: slider flex ",
+ Arrays.asList(
+ SliderActions.ACTION_FLEX,
+ "flex1",
+ Arguments.ARG_DEFINE, YarnConfiguration.RM_ADDRESS + "=127.0.0.1:8032"
+ ));
+ assertTrue(exception instanceof UsageException);
+ log.info(exception.toString());
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBasicArgs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBasicArgs.java
new file mode 100644
index 0000000..400e8d9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestClientBasicArgs.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.ClientArgs;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.main.ServiceLauncher;
+import org.apache.slider.core.main.ServiceLauncherBaseTest;
+import org.junit.Test;
+
+import java.net.UnknownHostException;
+import java.util.Arrays;
+
+/**
+ * Test bad argument handling
+ */
+public class TestClientBasicArgs extends ServiceLauncherBaseTest {
+
+ /**
+ * help should print out help string and then succeed
+ * @throws Throwable
+ */
+ @Test
+ public void testHelp() throws Throwable {
+ ServiceLauncher launcher = launch(SliderClient.class,
+ SliderUtils.createConfiguration(),
+ Arrays.asList(ClientArgs.ACTION_HELP));
+ assertEquals(0, launcher.getServiceExitCode());
+ }
+
+ @Test
+ public void testNoArgs() throws Throwable {
+ launchExpectingException(SliderClient.class,
+ SliderUtils.createConfiguration(),
+ "Usage: slider COMMAND",
+ EMPTY_LIST);
+ }
+
+ @Test
+ public void testListUnknownRM() throws Throwable {
+ try {
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,
+ 1000);
+ conf.setLong(YarnConfiguration
+ .RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, 1000);
+ ServiceLauncher launcher = launch(SliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_LIST,
+ "cluster",
+ Arguments.ARG_MANAGER,
+ "badhost:8888"));
+ fail("expected an exception, got a launcher with exit code " +
+ launcher.getServiceExitCode());
+ } catch (UnknownHostException expected) {
+ //expected
+ }
+
+ }
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestCommonArgParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestCommonArgParsing.java
new file mode 100644
index 0000000..841b010
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestCommonArgParsing.java
@@ -0,0 +1,526 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.RoleKeys;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
+import org.apache.slider.common.params.ActionBuildArgs;
+import org.apache.slider.common.params.ActionCreateArgs;
+import org.apache.slider.common.params.ActionDestroyArgs;
+import org.apache.slider.common.params.ActionExistsArgs;
+import org.apache.slider.common.params.ActionFlexArgs;
+import org.apache.slider.common.params.ActionFreezeArgs;
+import org.apache.slider.common.params.ActionListArgs;
+import org.apache.slider.common.params.ActionStatusArgs;
+import org.apache.slider.common.params.ActionThawArgs;
+import org.apache.slider.common.params.ActionUpdateArgs;
+import org.apache.slider.common.params.ArgOps;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.ClientArgs;
+import org.apache.slider.common.params.SliderActions;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+import org.apache.slider.core.exceptions.ErrorStrings;
+import org.apache.slider.core.exceptions.SliderException;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test handling of common arguments, specifically how things get split up
+ */
+public class TestCommonArgParsing implements SliderActions, Arguments {
+
+
+ public static final String CLUSTERNAME = "clustername";
+
+ @Test
+ public void testCreateActionArgs() throws Throwable {
+ ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_CREATE,
+ "cluster1"));
+ assertEquals("cluster1", clientArgs.getClusterName());
+ }
+
+ @Test
+ public void testCreateFailsNoClustername() throws Throwable {
+ assertParseFails(Arrays.asList(ACTION_CREATE));
+ }
+
+ @Test
+ public void testCreateFailsTwoClusternames() throws Throwable {
+ assertParseFails(Arrays.asList(
+ ACTION_CREATE,
+ "c1",
+ "c2"
+ ));
+ }
+
+ @Test
+ public void testHelp() throws Throwable {
+ ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_HELP));
+ assertNull(clientArgs.getClusterName());
+ }
+
+ @Test
+ public void testSliderBasePath() throws Throwable {
+ ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST,
+ ARG_BASE_PATH, "/projects/slider/clusters"));
+ assertEquals(new Path("/projects/slider/clusters"),
+ clientArgs.getBasePath());
+ }
+
+ @Test
+ public void testNoSliderBasePath() throws Throwable {
+ ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST));
+ assertNull(clientArgs.getBasePath());
+ }
+
+ @Test
+ public void testListNoClusternames() throws Throwable {
+ ClientArgs clientArgs = createClientArgs(Arrays.asList(ACTION_LIST));
+ assertNull(clientArgs.getClusterName());
+ }
+
+ @Test
+ public void testListNoClusternamesDefinition() throws Throwable {
+ ClientArgs clientArgs = createClientArgs(Arrays.asList(
+ ACTION_LIST,
+ ARG_DEFINE,
+ "fs.default.FS=file://localhost"
+ ));
+ assertNull(clientArgs.getClusterName());
+ }
+
+ @Test
+ public void testList1Clustername() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(ACTION_LIST, "cluster1"));
+ assertEquals("cluster1", ca.getClusterName());
+ assertTrue(ca.getCoreAction() instanceof ActionListArgs);
+ }
+
+ @Test
+ public void testListFailsTwoClusternames() throws Throwable {
+ assertParseFails(Arrays.asList(
+ ACTION_LIST,
+ "c1",
+ "c2"
+ ));
+ }
+
+ @Test
+ public void testDefinitions() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_CREATE,
+ CLUSTERNAME,
+ "-D","yarn.resourcemanager.principal=yarn/server@LOCAL",
+ "-D","dfs.datanode.kerberos.principal=hdfs/server@LOCAL"
+ ));
+ Configuration conf = new Configuration(false);
+ ca.applyDefinitions(conf);
+ assertEquals(CLUSTERNAME, ca.getClusterName());
+ assertNull(conf.get(SliderXmlConfKeys.KEY_SLIDER_BASE_PATH));
+ SliderUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL);
+ SliderUtils.verifyPrincipalSet(
+ conf,
+ SliderXmlConfKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
+
+ }
+
+ @Test
+ public void testDefinitionsSettingBaseSliderDir() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_CREATE,
+ CLUSTERNAME,
+ "--basepath", "/projects/slider/clusters",
+ "-D","yarn.resourcemanager.principal=yarn/server@LOCAL",
+ "-D","dfs.datanode.kerberos.principal=hdfs/server@LOCAL"
+ ));
+ Configuration conf = new Configuration(false);
+ ca.applyDefinitions(conf);
+ assertEquals(CLUSTERNAME, ca.getClusterName());
+ assertEquals("/projects/slider/clusters", conf.get(SliderXmlConfKeys
+ .KEY_SLIDER_BASE_PATH));
+ SliderUtils.verifyPrincipalSet(conf, YarnConfiguration.RM_PRINCIPAL);
+ SliderUtils.verifyPrincipalSet(conf, SliderXmlConfKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
+
+ }
+
+ /**
+ * Test a start command
+ * @throws Throwable
+ */
+ @Test
+ public void testComplexThaw() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_THAW,
+ "--manager", "rhel:8032", "--filesystem", "hdfs://rhel:9090",
+ "-S","java.security.krb5.realm=LOCAL","-S", "java.security.krb5.kdc=rhel",
+ "-D","yarn.resourcemanager.principal=yarn/rhel@LOCAL",
+ "-D","namenode.resourcemanager.principal=hdfs/rhel@LOCAL",
+ "cl1"
+ ));
+ assertEquals("cl1", ca.getClusterName());
+ assertTrue(ca.getCoreAction() instanceof ActionThawArgs);
+ }
+
+ /**
+ * Test a force kill command where the app comes at the end of the line
+ * @throws Throwable
+ *
+ */
+ @Test
+ public void testStatusSplit() throws Throwable {
+
+ String appId = "application_1381252124398_0013";
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_STATUS,
+ "--manager", "rhel:8032",
+ "--filesystem", "hdfs://rhel:9090",
+ "-S","java.security.krb5.realm=LOCAL",
+ "-S", "java.security.krb5.kdc=rhel",
+ "-D","yarn.resourcemanager.principal=yarn/rhel@LOCAL",
+ "-D","namenode.resourcemanager.principal=hdfs/rhel@LOCAL",
+ appId
+ ));
+ assertEquals(appId, ca.getClusterName());
+ }
+
+ @Test
+ public void testFreezeFailsNoArg() throws Throwable {
+ assertParseFails(Arrays.asList(
+ ACTION_FREEZE
+ ));
+ }
+
+ @Test
+ public void testFreezeWorks1Arg() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_FREEZE,
+ CLUSTERNAME
+ ));
+ assertEquals(CLUSTERNAME, ca.getClusterName());
+ assertTrue(ca.getCoreAction() instanceof ActionFreezeArgs);
+ }
+
+ @Test
+ public void testFreezeFails2Arg() throws Throwable {
+ assertParseFails(Arrays.asList(
+ ACTION_FREEZE, "cluster", "cluster2"
+ ));
+ }
+
+ @Test
+ public void testFreezeForceWaitAndMessage() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_FREEZE, CLUSTERNAME,
+ ARG_FORCE,
+ ARG_WAIT, "0",
+ ARG_MESSAGE, "explanation"
+ ));
+ assertEquals(CLUSTERNAME, ca.getClusterName());
+ assertTrue(ca.getCoreAction() instanceof ActionFreezeArgs);
+ ActionFreezeArgs freezeArgs = (ActionFreezeArgs) ca.getCoreAction();
+ assertEquals("explanation", freezeArgs.message);
+ assertTrue(freezeArgs.force);
+ }
+
+ @Test
+ public void testGetStatusWorks1Arg() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_STATUS,
+ CLUSTERNAME
+ ));
+ assertEquals(CLUSTERNAME, ca.getClusterName());
+ assertTrue(ca.getCoreAction() instanceof ActionStatusArgs);
+ }
+
+ @Test
+ public void testExistsWorks1Arg() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_EXISTS,
+ CLUSTERNAME,
+ ARG_LIVE
+ ));
+ assertEquals(CLUSTERNAME, ca.getClusterName());
+ assertTrue(ca.getCoreAction() instanceof ActionExistsArgs);
+ assertTrue(ca.getActionExistsArgs().live);
+ }
+
+ @Test
+ public void testDestroy1Arg() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_DESTROY,
+ CLUSTERNAME
+ ));
+ assertEquals(CLUSTERNAME, ca.getClusterName());
+ assertTrue(ca.getCoreAction() instanceof ActionDestroyArgs);
+ }
+
+ /**
+ * Assert that a pass fails with a BadCommandArgumentsException
+ * @param argsList
+ */
+
+ private void assertParseFails(List argsList) throws SliderException {
+ try {
+ ClientArgs clientArgs = createClientArgs(argsList);
+ Assert.fail("exected an exception, got " + clientArgs);
+ } catch (BadCommandArgumentsException ignored) {
+ //expected
+ }
+ }
+
+ /**
+ * build and parse client args, after adding the base args list
+ * @param argsList
+ */
+ public ClientArgs createClientArgs(List argsList)
+ throws SliderException {
+ ClientArgs serviceArgs = new ClientArgs(argsList);
+ serviceArgs.parse();
+ return serviceArgs;
+ }
+
+ public ActionCreateArgs createAction(List argsList)
+ throws SliderException {
+ ClientArgs ca = createClientArgs(argsList);
+ assertEquals(ACTION_CREATE, ca.getAction());
+ ActionCreateArgs args = ca.getActionCreateArgs();
+ assertNotNull(args);
+ return args;
+ }
+
+ @Test
+ public void testCreateWaitTime() throws Throwable {
+ ActionCreateArgs createArgs = createAction(Arrays.asList(
+ ACTION_CREATE, "cluster1",
+ ARG_WAIT, "600"
+ ));
+ assertEquals(600, createArgs.getWaittime());
+ }
+
+
+ @Test
+ public void testSingleRoleArg() throws Throwable {
+ ActionCreateArgs createArgs = createAction(Arrays.asList(
+ ACTION_CREATE, "cluster1",
+ ARG_COMPONENT,"master","5"
+ ));
+ List tuples = createArgs.getComponentTuples();
+ assertEquals(2, tuples.size());
+ Map roleMap = ArgOps.convertTupleListToMap("roles", tuples);
+ assertEquals("5", roleMap.get("master"));
+ }
+
+ @Test
+ public void testNoRoleArg() throws Throwable {
+ ActionCreateArgs createArgs = createAction(Arrays.asList(
+ ACTION_CREATE, "cluster1"
+ ));
+ List tuples = createArgs.getComponentTuples();
+ Map roleMap = ArgOps.convertTupleListToMap("roles", tuples);
+ assertNull(roleMap.get("master"));
+ }
+
+
+ @Test
+ public void testMultiRoleArgBuild() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_BUILD, "cluster1",
+ ARG_COMPONENT, "master", "1",
+ ARG_COMPONENT, "worker", "2"
+ ));
+ assertEquals(ACTION_BUILD, ca.getAction());
+ assertTrue(ca.getCoreAction() instanceof ActionBuildArgs);
+ assertTrue(ca.getBuildingActionArgs() instanceof ActionBuildArgs);
+ AbstractClusterBuildingActionArgs args = ca.getActionBuildArgs();
+ List tuples = args.getComponentTuples();
+ assertEquals(4, tuples.size());
+ Map roleMap = ArgOps.convertTupleListToMap("roles", tuples);
+ assertEquals("1", roleMap.get("master"));
+ assertEquals("2", roleMap.get("worker"));
+ }
+
+ @Test
+ public void testArgUpdate() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_UPDATE, "cluster1",
+ ARG_TEMPLATE, "appConfig.json"
+ ));
+ assertEquals(ACTION_UPDATE, ca.getAction());
+ assertTrue(ca.getCoreAction() instanceof ActionUpdateArgs);
+ assertTrue(ca.getActionUpdateArgs() instanceof ActionUpdateArgs);
+ AbstractClusterBuildingActionArgs args = ca.getActionUpdateArgs();
+ assertNotNull(args.template);
+ }
+
+ @Test
+ public void testFlexArgs() throws Throwable {
+ ClientArgs ca = createClientArgs(Arrays.asList(
+ ACTION_FLEX, "cluster1",
+ ARG_COMPONENT, "master", "1",
+ ARG_COMPONENT, "worker", "2"
+ ));
+ assertTrue(ca.getCoreAction() instanceof ActionFlexArgs);
+ List tuples = ca.getActionFlexArgs().getComponentTuples();
+ assertEquals(4, tuples.size());
+ Map roleMap = ArgOps.convertTupleListToMap("roles", tuples);
+ assertEquals("1", roleMap.get("master"));
+ assertEquals("2", roleMap.get("worker"));
+ }
+
+ @Test
+ public void testDuplicateRole() throws Throwable {
+ ActionCreateArgs createArgs = createAction(Arrays.asList(
+ ACTION_CREATE, "cluster1",
+ ARG_COMPONENT, "master", "1",
+ ARG_COMPONENT, "master", "2"
+ ));
+ List tuples = createArgs.getComponentTuples();
+ assertEquals(4, tuples.size());
+ try {
+ Map roleMap = ArgOps.convertTupleListToMap(
+ "roles",
+ tuples);
+ Assert.fail("got a role map $roleMap not a failure");
+ } catch (BadCommandArgumentsException expected) {
+ assertTrue(expected.getMessage().contains(ErrorStrings.ERROR_DUPLICATE_ENTRY));
+ }
+ }
+
+ @Test
+ public void testOddRoleCount() throws Throwable {
+ ActionCreateArgs createArgs = createAction(Arrays.asList(
+ ACTION_CREATE, "cluster1",
+ ARG_COMPONENT,"master","1",
+ ARG_COMPONENT,"master","2"
+ ));
+ List tuples = createArgs.getComponentTuples();
+ tuples.add("loggers");
+ assertEquals(5, tuples.size());
+ try {
+ Map roleMap = ArgOps.convertTupleListToMap("roles", tuples);
+ Assert.fail("got a role map " + roleMap + " not a failure");
+ } catch (BadCommandArgumentsException expected) {
+ assertTrue(expected.getMessage().contains(ErrorStrings.ERROR_PARSE_FAILURE));
+ }
+ }
+
+ /**
+ * Create some role-opt client args, so that multiple tests can use it
+ * @return the args
+ */
+ public ActionCreateArgs createRoleOptClientArgs() throws SliderException {
+ ActionCreateArgs createArgs = createAction(Arrays.asList(
+ ACTION_CREATE, "cluster1",
+ ARG_COMPONENT, "master", "1",
+ ARG_COMP_OPT, "master", "cheese", "swiss",
+ ARG_COMP_OPT, "master", "env.CHEESE", "cheddar",
+ ARG_COMP_OPT, "master", ResourceKeys.YARN_CORES, "3",
+
+ ARG_COMPONENT, "worker", "2",
+ ARG_COMP_OPT, "worker", ResourceKeys.YARN_CORES, "2",
+ ARG_COMP_OPT, "worker", RoleKeys.JVM_HEAP, "65536",
+ ARG_COMP_OPT, "worker", "env.CHEESE", "stilton"
+ ));
+ return createArgs;
+ }
+
+ @Test
+ public void testRoleOptionParse() throws Throwable {
+ ActionCreateArgs createArgs = createRoleOptClientArgs();
+ Map> tripleMaps = createArgs.getCompOptionMap();
+ Map workerOpts = tripleMaps.get("worker");
+ assertEquals(3, workerOpts.size());
+ assertEquals("2", workerOpts.get(ResourceKeys.YARN_CORES));
+ assertEquals("65536", workerOpts.get(RoleKeys.JVM_HEAP));
+
+ Map masterOpts = tripleMaps.get("master");
+ assertEquals(3, masterOpts.size());
+ assertEquals("3", masterOpts.get(ResourceKeys.YARN_CORES));
+
+ }
+
+ @Test
+ public void testRoleOptionsMerge() throws Throwable {
+ ActionCreateArgs createArgs = createRoleOptClientArgs();
+
+ Map> roleOpts = createArgs.getCompOptionMap();
+
+ Map> clusterRoleMap = createEnvMap();
+ SliderUtils.applyCommandLineRoleOptsToRoleMap(clusterRoleMap, roleOpts);
+
+ Map masterOpts = clusterRoleMap.get("master");
+ assertEquals("swiss", masterOpts.get("cheese"));
+
+ Map workerOpts = clusterRoleMap.get("worker");
+ assertEquals("stilton", workerOpts.get("env.CHEESE"));
+ }
+
+ @Test
+ public void testEnvVariableApply() throws Throwable {
+ ActionCreateArgs createArgs = createRoleOptClientArgs();
+
+
+ Map> roleOpts = createArgs.getCompOptionMap();
+
+ Map> clusterRoleMap = createEnvMap();
+ SliderUtils.applyCommandLineRoleOptsToRoleMap(clusterRoleMap, roleOpts);
+
+ Map workerOpts = clusterRoleMap.get("worker");
+ assertEquals("stilton", workerOpts.get("env.CHEESE"));
+
+ Map envmap = SliderUtils.buildEnvMap(workerOpts);
+ assertEquals("stilton", envmap.get("CHEESE"));
+
+ }
+
+ /**
+ * static compiler complaining about matching LinkedHashMap with Map,
+ * so some explicit creation here
+ * @return a map of maps
+ */
+ public Map> createEnvMap() {
+
+ Map cheese = new HashMap<>();
+ cheese.put("cheese", "french");
+ Map envCheese = new HashMap<>();
+ envCheese.put("env.CHEESE", "french");
+ Map> envMap = new HashMap<>();
+ envMap.put("master", cheese);
+ envMap.put("worker", envCheese);
+ return envMap;
+ }
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java
new file mode 100644
index 0000000..0267c79
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestKeytabCommandOptions.java
@@ -0,0 +1,391 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.ClientArgs;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.main.ServiceLauncher;
+import org.apache.slider.core.main.ServiceLauncherBaseTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * Test a keytab installation
+ */
+public class TestKeytabCommandOptions extends ServiceLauncherBaseTest {
+
+ private static SliderFileSystem testFileSystem;
+
+ @Before
+ public void setupFilesystem() throws IOException {
+ org.apache.hadoop.fs.FileSystem fileSystem = new RawLocalFileSystem();
+ YarnConfiguration configuration = SliderUtils.createConfiguration();
+ fileSystem.setConf(configuration);
+ testFileSystem = new SliderFileSystem(fileSystem, configuration);
+ File testFolderDir = new File(testFileSystem
+ .buildKeytabInstallationDirPath("").toUri().getPath());
+ FileUtils.deleteDirectory(testFolderDir);
+ }
+
+ @Test
+ public void testInstallKeytab() throws Throwable {
+ // create a mock keytab file
+ File localKeytab =
+ FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+ String contents = UUID.randomUUID().toString();
+ FileUtils.write(localKeytab, contents);
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ ServiceLauncher launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABINSTALL,
+ ClientArgs.ARG_KEYTAB,
+ localKeytab.getAbsolutePath(),
+ Arguments.ARG_FOLDER,
+ "testFolder"));
+ Path installedPath = new Path(testFileSystem
+ .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
+ File installedKeytab = new File(installedPath.toUri().getPath());
+ assertTrue(installedKeytab.exists());
+ assertEquals(FileUtils.readFileToString(installedKeytab),
+ FileUtils.readFileToString(localKeytab));
+ }
+
+ @Test
+ public void testInstallThenDeleteKeytab() throws Throwable {
+ // create a mock keytab file
+ File localKeytab =
+ FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+ String contents = UUID.randomUUID().toString();
+ FileUtils.write(localKeytab, contents);
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ ServiceLauncher launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_INSTALL_KEYTAB,
+ ClientArgs.ARG_KEYTAB,
+ localKeytab.getAbsolutePath(),
+ Arguments.ARG_FOLDER,
+ "testFolder"));
+ Path installedPath = new Path(testFileSystem
+ .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
+ File installedKeytab = new File(installedPath.toUri().getPath());
+ assertTrue(installedKeytab.exists());
+ assertEquals(FileUtils.readFileToString(installedKeytab),
+ FileUtils.readFileToString(localKeytab));
+
+ launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABDELETE,
+ ClientArgs.ARG_KEYTAB,
+ localKeytab.getName(),
+ Arguments.ARG_FOLDER,
+ "testFolder"));
+
+ assertFalse(installedKeytab.exists());
+
+ }
+
+ @Test
+ public void testInstallThenListKeytab() throws Throwable {
+ // create a mock keytab file
+ File localKeytab =
+ FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+ String contents = UUID.randomUUID().toString();
+ FileUtils.write(localKeytab, contents);
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ ServiceLauncher launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_INSTALL_KEYTAB,
+ ClientArgs.ARG_KEYTAB,
+ localKeytab.getAbsolutePath(),
+ Arguments.ARG_FOLDER,
+ "testFolder"));
+ Path installedPath = new Path(testFileSystem
+ .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
+ File installedKeytab = new File(installedPath.toUri().getPath());
+ assertTrue(installedKeytab.exists());
+ assertEquals(FileUtils.readFileToString(installedKeytab),
+ FileUtils.readFileToString(localKeytab));
+
+ // install an additional copy into another folder to test listing
+ launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_INSTALL_KEYTAB,
+ ClientArgs.ARG_KEYTAB,
+ localKeytab.getAbsolutePath(),
+ Arguments.ARG_FOLDER,
+ "testFolder2"));
+
+ TestAppender testAppender = new TestAppender();
+
+ Logger.getLogger(SliderClient.class).addAppender(testAppender);
+
+ try {
+ launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABLIST)
+ );
+ assertEquals(3, testAppender.events.size());
+ String msg = (String) testAppender.events.get(1).getMessage();
+ assertTrue(msg.contains("/.slider/keytabs/testFolder"));
+ assertTrue(msg.endsWith(installedKeytab.getName()));
+ msg = (String) testAppender.events.get(2).getMessage();
+ assertTrue(msg.contains("/.slider/keytabs/testFolder"));
+ assertTrue(msg.endsWith(installedKeytab.getName()));
+ } finally {
+ Logger.getLogger(SliderClient.class).removeAppender(testAppender);
+ }
+
+ // now listing while specifying the folder name
+ testAppender = new TestAppender();
+
+ Logger.getLogger(SliderClient.class).addAppender(testAppender);
+
+ try {
+ launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABLIST,
+ Arguments.ARG_FOLDER,
+ "testFolder"));
+ assertEquals(2, testAppender.events.size());
+ String msg = (String) testAppender.events.get(1).getMessage();
+ assertTrue(msg.contains( "/.slider/keytabs/testFolder/" +
+ installedKeytab.getName()));
+ } finally {
+ Logger.getLogger(SliderClient.class).removeAppender(testAppender);
+ }
+ }
+
+ @Test
+ public void testDeleteNonExistentKeytab() throws Throwable {
+ // create a mock keytab file
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ try {
+ ServiceLauncher launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABDELETE,
+ ClientArgs.ARG_KEYTAB,
+ "HeyIDontExist.keytab",
+ Arguments.ARG_FOLDER,
+ "testFolder"));
+ fail("expected BadCommandArgumentsException from launch");
+ } catch (BadCommandArgumentsException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testInstallKeytabWithNoFolder() throws Throwable {
+ // create a mock keytab file
+ File localKeytab =
+ FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+ String contents = UUID.randomUUID().toString();
+ FileUtils.write(localKeytab, contents);
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ try {
+ ServiceLauncher launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABINSTALL,
+ ClientArgs.ARG_KEYTAB,
+ localKeytab.getAbsolutePath()));
+ fail("expected BadCommandArgumentsException from launch");
+ } catch (BadCommandArgumentsException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testInstallKeytabWithNoKeytab() throws Throwable {
+ // create a mock keytab file
+ File localKeytab =
+ FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+ String contents = UUID.randomUUID().toString();
+ FileUtils.write(localKeytab, contents);
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ try {
+ ServiceLauncher launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABINSTALL,
+ ClientArgs.ARG_FOLDER,
+ "testFolder"));
+ fail("expected BadCommandArgumentsException from launch");
+ } catch (BadCommandArgumentsException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testInstallKeytabAllowingOverwrite() throws Throwable {
+ // create a mock keytab file
+ File localKeytab =
+ FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+ String contents = UUID.randomUUID().toString();
+ FileUtils.write(localKeytab, contents);
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ ServiceLauncher launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABINSTALL,
+ ClientArgs.ARG_KEYTAB,
+ localKeytab.getAbsolutePath(),
+ Arguments.ARG_FOLDER,
+ "testFolder"));
+ Path installedPath = new Path(testFileSystem
+ .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
+ File installedKeytab = new File(installedPath.toUri().getPath());
+ assertTrue(installedKeytab.exists());
+ assertEquals(FileUtils.readFileToString(installedKeytab), FileUtils
+ .readFileToString(localKeytab));
+ launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABINSTALL,
+ ClientArgs.ARG_KEYTAB,
+ localKeytab.getAbsolutePath(),
+ Arguments.ARG_FOLDER,
+ "testFolder",
+ Arguments.ARG_OVERWRITE)
+ );
+ assertTrue(installedKeytab.exists());
+ assertEquals(FileUtils.readFileToString(installedKeytab),
+ FileUtils.readFileToString(localKeytab));
+ }
+
+ @Test
+ public void testInstallKeytabNotAllowingOverwrite() throws Throwable {
+ // create a mock keytab file
+ File localKeytab =
+ FileUtil.createLocalTempFile(getTempLocation(), "test", true);
+ String contents = UUID.randomUUID().toString();
+ FileUtils.write(localKeytab, contents);
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ ServiceLauncher launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABINSTALL,
+ ClientArgs.ARG_KEYTAB,
+ localKeytab.getAbsolutePath(),
+ Arguments.ARG_FOLDER,
+ "testFolder"));
+ Path installedPath = new Path(testFileSystem
+ .buildKeytabInstallationDirPath("testFolder"), localKeytab.getName());
+ File installedKeytab = new File(installedPath.toUri().getPath());
+ assertTrue(installedKeytab.exists());
+ assertEquals(FileUtils.readFileToString(installedKeytab),
+ FileUtils.readFileToString(localKeytab));
+ try {
+ launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABINSTALL,
+ ClientArgs.ARG_KEYTAB,
+ localKeytab.getAbsolutePath(),
+ Arguments.ARG_FOLDER,
+ "testFolder"));
+ fail("expected BadCommandArgumentsException from launch");
+ } catch (BadCommandArgumentsException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testInstallKeytabWithMissingKeytab() throws Throwable {
+ // create a mock keytab file
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ try {
+ ServiceLauncher launcher = launch(TestSliderClient.class,
+ conf,
+ Arrays.asList(
+ ClientArgs.ACTION_KEYTAB,
+ ClientArgs.ARG_KEYTABINSTALL,
+ ClientArgs.ARG_KEYTAB,
+ "HeyIDontExist.keytab",
+ Arguments.ARG_FOLDER,
+ "testFolder"));
+ fail("expected BadCommandArgumentsException from launch");
+ } catch (BadCommandArgumentsException e) {
+ // expected
+ }
+ }
+
+ private File getTempLocation () {
+ return new File(System.getProperty("user.dir") + "/target");
+ }
+
+ public static class TestSliderClient extends SliderClient {
+ public TestSliderClient() {
+ super();
+ }
+
+ @Override
+ protected void initHadoopBinding() throws IOException, SliderException {
+ sliderFileSystem = testFileSystem;
+ }
+
+ }
+
+ public static class TestAppender extends AppenderSkeleton{
+ public List events = new ArrayList<>();
+ public void close() {}
+ public boolean requiresLayout() {return false;}
+ @Override
+ protected void append(LoggingEvent event) {
+ events.add(event);
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestReplaceTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestReplaceTokens.java
new file mode 100644
index 0000000..9f94e9f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestReplaceTokens.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.slider.core.conf.ConfTree;
+import org.apache.slider.core.persist.JsonSerDeser;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test bad argument handling
+ */
+public class TestReplaceTokens extends Assert {
+
+ static final String PACKAGE = "/org/apache/slider/core/conf/examples/";
+ static final String app_configuration = "app_configuration_tokenized.json";
+
+ /**
+ * help should print out help string and then succeed
+ * @throws Throwable
+ */
+ @Test
+ public void testHelp() throws Throwable {
+ JsonSerDeser confTreeJsonSerDeser =
+ new JsonSerDeser<>(ConfTree.class);
+ ConfTree confTree = confTreeJsonSerDeser.fromResource(PACKAGE +
+ app_configuration);
+ SliderClient.replaceTokens(confTree, "testUser", "testCluster");
+ // cluster name is resolved later now
+ assertEquals("hdfs://${CLUSTER_NAME}:8020",
+ confTree.global.get("site.fs.defaultFS"));
+ assertEquals("hdfs://${CLUSTER_NAME}:8020",
+ confTree.global.get("site.fs.default.name"));
+ assertEquals("testUser", confTree.global.get("site.hbase.user_name"));
+ assertEquals("testUser", confTree.global.get("site.hbase.another.user"));
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderClientMethods.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderClientMethods.java
new file mode 100644
index 0000000..62bf12e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderClientMethods.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.buildutils.InstanceBuilder;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.launch.LaunchedApplication;
+import org.apache.slider.core.main.ServiceLauncherBaseTest;
+import org.apache.slider.core.persist.LockAcquireFailedException;
+import org.apache.slider.server.appmaster.model.mock.MockApplicationId;
+import org.easymock.EasyMock;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.easymock.PowerMock;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(SliderUtils.class)
+public class TestSliderClientMethods extends ServiceLauncherBaseTest {
+ protected static final Logger log =
+ LoggerFactory.getLogger(TestSliderClientMethods.class);
+
+ String AM_ENV = "LD_LIBRARY_PATH";
+ String PLACEHOLDER_KEY = "${distro.version}";
+ String PLACEHOLDER_SYSTEM_KEY = "DISTRO_VERSION";
+ String PLACEHOLDER_VALUE = "1.0.0";
+ String AM_ENV_2 = "PATH";
+ String PLACEHOLDER_KEY_2 = "${native.version}";
+ String PLACEHOLDER_SYSTEM_KEY_2 = "NATIVE_VERSION";
+ String PLACEHOLDER_VALUE_2 = "2.0.0";
+
+ @Test
+ public void testGeneratePlaceholderKeyValueMap() throws Throwable {
+ TestSliderClient testSliderClient = new TestSliderClient();
+
+ PowerMock.mockStatic(System.class);
+ EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY))
+ .andReturn(PLACEHOLDER_VALUE).anyTimes();
+ PowerMock.replayAll();
+
+ Map placeholders = testSliderClient.generatePlaceholderKeyValueMap(
+ AM_ENV + "=/usr/lib/" + PLACEHOLDER_KEY);
+ Assert.assertTrue(placeholders.containsKey(PLACEHOLDER_KEY));
+ Assert.assertEquals("Should be equal", PLACEHOLDER_VALUE,
+ placeholders.get(PLACEHOLDER_KEY));
+
+ PowerMock.verifyAll();
+ log.info("Placeholders = {}", placeholders);
+ }
+
+ @Test
+ public void testSetAmLaunchEnv() throws Throwable {
+ TestSliderClient testSliderClient = new TestSliderClient();
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ conf.set(SliderXmlConfKeys.KEY_AM_LAUNCH_ENV, AM_ENV + "=/usr/lib/"
+ + PLACEHOLDER_KEY);
+
+ PowerMock.mockStatic(System.class);
+ EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY))
+ .andReturn(PLACEHOLDER_VALUE);
+ PowerMock.replayAll();
+
+ Map amLaunchEnv = testSliderClient.getAmLaunchEnv(conf);
+ Assert.assertNotNull(amLaunchEnv);
+ Assert.assertNotNull(amLaunchEnv.get(AM_ENV));
+ Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV),
+ (Shell.WINDOWS ? "%" + AM_ENV + "%;" : "$" + AM_ENV + ":") +
+ "/usr/lib/" + PLACEHOLDER_VALUE);
+
+ PowerMock.verifyAll();
+ log.info("amLaunchEnv = {}", amLaunchEnv);
+ }
+
+ @Test
+ public void testSetAmLaunchEnvMulti() throws Throwable {
+ TestSliderClient testSliderClient = new TestSliderClient();
+ YarnConfiguration conf = SliderUtils.createConfiguration();
+ conf.set(SliderXmlConfKeys.KEY_AM_LAUNCH_ENV, AM_ENV + "=/usr/lib/"
+ + PLACEHOLDER_KEY + "," + AM_ENV_2 + "=/usr/bin/" + PLACEHOLDER_KEY_2);
+
+ PowerMock.mockStatic(System.class);
+ EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY))
+ .andReturn(PLACEHOLDER_VALUE);
+ EasyMock.expect(SliderUtils.getSystemEnv(PLACEHOLDER_SYSTEM_KEY_2))
+ .andReturn(PLACEHOLDER_VALUE_2);
+ PowerMock.replayAll();
+
+ Map amLaunchEnv = testSliderClient.getAmLaunchEnv(conf);
+ Assert.assertNotNull(amLaunchEnv);
+ Assert.assertEquals("Should have 2 envs", amLaunchEnv.size(), 2);
+ Assert.assertNotNull(amLaunchEnv.get(AM_ENV));
+ Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV),
+ (Shell.WINDOWS ? "%" + AM_ENV + "%;" : "$" + AM_ENV + ":") +
+ "/usr/lib/" + PLACEHOLDER_VALUE);
+ Assert.assertNotNull(amLaunchEnv.get(AM_ENV_2));
+ Assert.assertEquals("Should be equal", amLaunchEnv.get(AM_ENV_2),
+ (Shell.WINDOWS ? "%" + AM_ENV_2 + "%;" : "$" + AM_ENV_2 + ":") +
+ "/usr/bin/" + PLACEHOLDER_VALUE_2);
+
+ PowerMock.verifyAll();
+ log.info("amLaunchEnv = " + amLaunchEnv);
+ }
+
+ static class TestSliderClient extends SliderClient {
+ @Override
+ protected void persistInstanceDefinition(boolean overwrite,
+ Path appconfdir,
+ InstanceBuilder builder)
+ throws IOException, SliderException, LockAcquireFailedException {
+ super.persistInstanceDefinition(overwrite, appconfdir, builder);
+ }
+
+ @Override
+ public LaunchedApplication launchApplication(String clustername,
+ Path clusterDirectory,
+ AggregateConf instanceDefinition,
+ boolean debugAM,
+ long lifetime)
+ throws YarnException, IOException {
+ return new LaunchedApplication(new MockApplicationId(1), new SliderYarnClientImpl());
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderTokensCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderTokensCommand.java
new file mode 100644
index 0000000..27f46ce
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/client/TestSliderTokensCommand.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.common.params.ActionTokensArgs;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.SliderActions;
+import org.apache.slider.core.exceptions.BadClusterStateException;
+import org.apache.slider.core.exceptions.NotFoundException;
+import org.apache.slider.core.main.ServiceLauncherBaseTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+/**
+ * Test the argument parsing/validation logic
+ */
+public class TestSliderTokensCommand extends ServiceLauncherBaseTest {
+
+ public static YarnConfiguration config = createTestConfig();
+
+ public static YarnConfiguration createTestConfig() {
+ YarnConfiguration configuration = new YarnConfiguration();
+ configuration.set(YarnConfiguration.RM_ADDRESS, "127.0.0.1:8032");
+ return configuration;
+ }
+
+ @Test
+ public void testBadSourceArgs() throws Throwable {
+ launchExpectingException(SliderClient.class,
+ config,
+ ActionTokensArgs.DUPLICATE_ARGS,
+ Arrays.asList(SliderActions.ACTION_TOKENS,
+ Arguments.ARG_SOURCE, "target/tokens.bin",
+ Arguments.ARG_OUTPUT, "target/tokens.bin"
+ ));
+ }
+
+ @Test
+ public void testKTNoPrincipal() throws Throwable {
+ launchExpectingException(SliderClient.class,
+ config,
+ ActionTokensArgs.MISSING_KT_PROVIDER,
+ Arrays.asList(SliderActions.ACTION_TOKENS,
+ Arguments.ARG_KEYTAB, "target/keytab"
+ ));
+ }
+
+ @Test
+ public void testPrincipalNoKT() throws Throwable {
+ launchExpectingException(SliderClient.class,
+ config,
+ ActionTokensArgs.MISSING_KT_PROVIDER,
+ Arrays.asList(SliderActions.ACTION_TOKENS,
+ Arguments.ARG_PRINCIPAL, "bob@REALM"
+ ));
+ }
+
+ /**
+ * A missing keytab is an error
+ * @throws Throwable
+ */
+ @Test
+ public void testMissingKT() throws Throwable {
+ Throwable ex = launchExpectingException(SliderClient.class,
+ config,
+ TokensOperation.E_NO_KEYTAB,
+ Arrays.asList(SliderActions.ACTION_TOKENS,
+ Arguments.ARG_PRINCIPAL, "bob@REALM",
+ Arguments.ARG_KEYTAB, "target/keytab"
+ ));
+ if (!(ex instanceof NotFoundException)) {
+ throw ex;
+ }
+ }
+
+ @Test
+ public void testMissingSourceFile() throws Throwable {
+ Throwable ex = launchExpectingException(SliderClient.class,
+ config,
+ TokensOperation.E_MISSING_SOURCE_FILE,
+ Arrays.asList(SliderActions.ACTION_TOKENS,
+ Arguments.ARG_SOURCE, "target/tokens.bin"
+ ));
+ if (!(ex instanceof NotFoundException)) {
+ throw ex;
+ }
+ }
+
+ @Test
+ public void testListHarmlessWhenInsecure() throws Throwable {
+ execSliderCommand(0, config, Arrays.asList(SliderActions.ACTION_TOKENS));
+ }
+
+ @Test
+ public void testCreateFailsWhenInsecure() throws Throwable {
+ Throwable ex = launchExpectingException(SliderClient.class,
+ config,
+ TokensOperation.E_INSECURE,
+ Arrays.asList(SliderActions.ACTION_TOKENS,
+ Arguments.ARG_OUTPUT, "target/tokens.bin"
+ ));
+ if (!(ex instanceof BadClusterStateException)) {
+ throw ex;
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestClusterNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestClusterNames.java
new file mode 100644
index 0000000..9e79169
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestClusterNames.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+public class TestClusterNames {
+
+ void assertValidName(String name) {
+ boolean valid = SliderUtils.isClusternameValid(name);
+ Assert.assertTrue("Clustername '" + name + "' mistakenly declared invalid",
+ valid);
+ }
+
+ void assertInvalidName(String name) {
+ boolean valid = SliderUtils.isClusternameValid(name);
+ Assert.assertFalse("Clustername '\" + name + \"' mistakenly declared valid",
+ valid);
+ }
+
+ void assertInvalid(List names) {
+ for (String name : names) {
+ assertInvalidName(name);
+ }
+ }
+
+ void assertValid(List names) {
+ for (String name : names) {
+ assertValidName(name);
+ }
+ }
+
+ @Test
+ public void testEmptyName() throws Throwable {
+ assertInvalidName("");
+ }
+
+ @Test
+ public void testSpaceName() throws Throwable {
+ assertInvalidName(" ");
+ }
+
+
+ @Test
+ public void testLeadingHyphen() throws Throwable {
+ assertInvalidName("-hyphen");
+ }
+
+ @Test
+ public void testTitleLetters() throws Throwable {
+ assertInvalidName("Title");
+ }
+
+ @Test
+ public void testCapitalLetters() throws Throwable {
+ assertInvalidName("UPPER-CASE-CLUSTER");
+ }
+
+ @Test
+ public void testInnerBraced() throws Throwable {
+ assertInvalidName("a[a");
+ }
+
+ @Test
+ public void testLeadingBrace() throws Throwable {
+ assertInvalidName("[");
+ }
+
+ @Test
+ public void testNonalphaLeadingChars() throws Throwable {
+ assertInvalid(Arrays.asList(
+ "[a", "#", "@", "=", "*", "."
+ ));
+ }
+
+ @Test
+ public void testNonalphaInnerChars() throws Throwable {
+ assertInvalid(Arrays.asList(
+ "a[a", "b#", "c@", "d=", "e*", "f.", "g ", "h i"
+ ));
+ }
+
+ @Test
+ public void testClusterValid() throws Throwable {
+ assertValidName("cluster");
+ }
+
+ @Test
+ public void testValidNames() throws Throwable {
+ assertValid(Arrays.asList(
+ "cluster",
+ "cluster1",
+ "very-very-very-long-cluster-name",
+ "c1234567890"
+ ));
+
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelper.java
new file mode 100644
index 0000000..93ba06c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelper.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.test.YarnMiniClusterTestBase;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.util.Map;
+
+public class TestConfigHelper extends YarnMiniClusterTestBase {
+
+
+ @Test
+ public void testConfigLoaderIteration() throws Throwable {
+
+ String xml = "" +
+ "keyvalue" +
+ "programatically";
+ InputStream ins = new ByteArrayInputStream(xml.getBytes("UTF8"));
+ Configuration conf = new Configuration(false);
+ conf.addResource(ins);
+ Configuration conf2 = new Configuration(false);
+ for (Map.Entry entry : conf) {
+ conf2.set(entry.getKey(), entry.getValue(), "src");
+ }
+
+ }
+
+ @Test
+ public void testConfigDeprecation() throws Throwable {
+ ConfigHelper.registerDeprecatedConfigItems();
+ Configuration conf = new Configuration(false);
+ conf.set(SliderXmlConfKeys.REGISTRY_PATH, "path");
+ assertEquals("path", conf.get(SliderXmlConfKeys.REGISTRY_PATH));
+ assertEquals("path", conf.get(RegistryConstants.KEY_REGISTRY_ZK_ROOT));
+
+ conf.set(SliderXmlConfKeys.REGISTRY_ZK_QUORUM, "localhost");
+ assertEquals("localhost", conf.get(SliderXmlConfKeys.REGISTRY_ZK_QUORUM));
+ assertEquals("localhost", conf.get(RegistryConstants
+ .KEY_REGISTRY_ZK_QUORUM));
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelperHDFS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelperHDFS.java
new file mode 100644
index 0000000..4a88fb6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestConfigHelperHDFS.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.test.YarnMiniClusterTestBase;
+import org.junit.Test;
+
+import java.net.URI;
+
+public class TestConfigHelperHDFS extends YarnMiniClusterTestBase {
+
+ @Test
+ public void testConfigHelperHDFS() throws Throwable {
+ YarnConfiguration config = getConfiguration();
+ createMiniHDFSCluster("testConfigHelperHDFS", config);
+
+ Configuration conf = new Configuration(false);
+ conf.set("key","value");
+ URI fsURI = new URI(getFsDefaultName());
+ Path root = new Path(fsURI);
+ Path confPath = new Path(root, "conf.xml");
+ FileSystem dfs = FileSystem.get(fsURI,config);
+ ConfigHelper.saveConfig(dfs,confPath, conf);
+ //load time
+ Configuration loaded = ConfigHelper.loadConfiguration(dfs,confPath);
+ log.info(ConfigHelper.dumpConfigToString(loaded));
+ assertEquals("value", loaded.get("key"));
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestExecutionEnvironment.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestExecutionEnvironment.java
new file mode 100644
index 0000000..f8c2eab
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestExecutionEnvironment.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.slider.test.SliderTestBase;
+import org.junit.Test;
+
+public class TestExecutionEnvironment extends SliderTestBase {
+
+ @Test
+ public void testClientEnv() throws Throwable {
+ SliderUtils.validateSliderClientEnvironment(log);
+ }
+
+ @Test
+ public void testWinutils() throws Throwable {
+ SliderUtils.maybeVerifyWinUtilsValid();
+ }
+
+ @Test
+ public void testServerEnv() throws Throwable {
+ SliderUtils.validateSliderServerEnvironment(log, true);
+ }
+
+ @Test
+ public void testServerEnvNoDependencies() throws Throwable {
+ SliderUtils.validateSliderServerEnvironment(log, false);
+ }
+
+ @Test
+ public void testopenSSLEnv() throws Throwable {
+ SliderUtils.validateOpenSSLEnv(log);
+ }
+
+ @Test
+ public void testValidatePythonEnv() throws Throwable {
+ SliderUtils.validatePythonEnv(log);
+ }
+
+ @Test
+ public void testNativeLibs() throws Throwable {
+ assertNativeLibrariesPresent();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java
new file mode 100644
index 0000000..976cd39
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestMiscSliderUtils.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.test.SliderTestBase;
+import org.junit.Test;
+
+import java.net.URI;
+
+public class TestMiscSliderUtils extends SliderTestBase {
+
+
+ public static final String CLUSTER1 = "cluster1";
+
+ @Test
+ public void testPurgeTempDir() throws Throwable {
+
+ Configuration configuration = new Configuration();
+ FileSystem fs = FileSystem.get(new URI("file:///"), configuration);
+ SliderFileSystem sliderFileSystem = new SliderFileSystem(fs, configuration);
+ Path inst = sliderFileSystem.createAppInstanceTempPath(CLUSTER1, "001");
+
+ assertTrue(fs.exists(inst));
+ sliderFileSystem.purgeAppInstanceTempFiles(CLUSTER1);
+ assertFalse(fs.exists(inst));
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestPortScan.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestPortScan.java
new file mode 100644
index 0000000..deee95b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestPortScan.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.core.exceptions.SliderException;
+import org.junit.Test;
+
+import java.net.ServerSocket;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.*;
+
+public class TestPortScan {
+
+ @Test
+ public void testScanPorts() throws Throwable {
+
+ ServerSocket server = new ServerSocket(0);
+
+ try {
+ int serverPort = server.getLocalPort();
+ assertFalse(SliderUtils.isPortAvailable(serverPort));
+ int port = SliderUtils.findFreePort(serverPort, 10);
+ assertTrue(port > 0 && serverPort < port);
+ } finally {
+ server.close();
+ }
+ }
+
+ @Test
+ public void testRequestedPortsLogic() throws Throwable {
+ PortScanner portScanner = new PortScanner();
+ portScanner.setPortRange("5,6,8-10, 11,14 ,20 - 22");
+ List ports = portScanner.getRemainingPortsToCheck();
+ List expectedPorts =
+ Arrays.asList(5, 6, 8, 9, 10, 11, 14, 20, 21, 22);
+ assertEquals(expectedPorts, ports);
+ }
+
+ @Test
+ public void testRequestedPortsOutOfOrder() throws Throwable {
+ PortScanner portScanner = new PortScanner();
+ portScanner.setPortRange("8-10,5,6, 11,20 - 22, 14 ");
+ List ports = portScanner.getRemainingPortsToCheck();
+ List expectedPorts =
+ Arrays.asList(5, 6, 8, 9, 10, 11, 14, 20, 21, 22);
+ assertEquals(expectedPorts, ports);
+ }
+
+ @Test
+ public void testFindAvailablePortInRange() throws Throwable {
+ ServerSocket server = new ServerSocket(0);
+ try {
+ int serverPort = server.getLocalPort();
+
+ PortScanner portScanner = new PortScanner();
+ portScanner.setPortRange("" + (serverPort-1) + "-" + (serverPort + 3));
+ int port = portScanner.getAvailablePort();
+ assertNotEquals(port, serverPort);
+ assertTrue(port >= serverPort -1 && port <= serverPort + 3);
+ } finally {
+ server.close();
+ }
+ }
+
+ @Test
+ public void testFindAvailablePortInList() throws Throwable {
+ ServerSocket server = new ServerSocket(0);
+ try {
+ int serverPort = server.getLocalPort();
+
+ PortScanner portScanner = new PortScanner();
+ portScanner.setPortRange("" + (serverPort-1) + ", " + (serverPort + 1));
+ int port = portScanner.getAvailablePort();
+ assertNotEquals(port, serverPort);
+ assertTrue(port == serverPort -1 || port == serverPort + 1);
+ } finally {
+ server.close();
+ }
+ }
+
+ @Test
+ public void testNoAvailablePorts() throws Throwable {
+ ServerSocket server1 = new ServerSocket(0);
+ ServerSocket server2 = new ServerSocket(0);
+ try {
+ int serverPort1 = server1.getLocalPort();
+ int serverPort2 = server2.getLocalPort();
+
+ PortScanner portScanner = new PortScanner();
+ portScanner.setPortRange("" + serverPort1+ ", " + serverPort2);
+ try {
+ portScanner.getAvailablePort();
+ fail("expected SliderException");
+ } catch (SliderException e) {
+ // expected
+ }
+ } finally {
+ server1.close();
+ server2.close();
+ }
+ }
+
+ @Test
+ public void testPortRemovedFromRange() throws Throwable {
+ ServerSocket server = new ServerSocket(0);
+ try {
+ int serverPort = server.getLocalPort();
+
+ PortScanner portScanner = new PortScanner();
+ portScanner.setPortRange("" + (serverPort-1) + "-" + (serverPort + 3));
+ int port = portScanner.getAvailablePort();
+ assertNotEquals(port, serverPort);
+ assertTrue(port >= serverPort -1 && port <= serverPort + 3);
+ assertFalse(portScanner.getRemainingPortsToCheck().contains(port));
+ } finally {
+ server.close();
+ }
+ }
+
+ @Test(expected = BadConfigException.class)
+ public void testBadRange() throws BadConfigException {
+ PortScanner portScanner = new PortScanner();
+ // note the em dash
+ portScanner.setPortRange("2000–2010");
+ }
+
+ @Test(expected = BadConfigException.class)
+ public void testEndBeforeStart() throws BadConfigException {
+ PortScanner portScanner = new PortScanner();
+ portScanner.setPortRange("2001-2000");
+ }
+
+ @Test(expected = BadConfigException.class)
+ public void testEmptyRange() throws BadConfigException {
+ PortScanner portScanner = new PortScanner();
+ portScanner.setPortRange("");
+ }
+
+ @Test(expected = BadConfigException.class)
+ public void testBlankRange() throws BadConfigException {
+ PortScanner portScanner = new PortScanner();
+ portScanner.setPortRange(" ");
+ }
+
+ @Test
+ public void testExtraComma() throws BadConfigException {
+ PortScanner portScanner = new PortScanner();
+ portScanner.setPortRange("2000-2001, ");
+ List ports = portScanner.getRemainingPortsToCheck();
+ List expectedPorts = Arrays.asList(2000, 2001);
+ assertEquals(expectedPorts, ports);
+ }
+
+ @Test
+ public void testExtraCommas() throws BadConfigException {
+ PortScanner portScanner = new PortScanner();
+ portScanner.setPortRange("2000-2001,, ,2003,");
+ List ports = portScanner.getRemainingPortsToCheck();
+ List expectedPorts = Arrays.asList(2000, 2001, 2003);
+ assertEquals(expectedPorts, ports);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderFileSystem.java
new file mode 100644
index 0000000..8734613
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderFileSystem.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.test.SliderTestBase;
+import org.junit.Test;
+
+public class TestSliderFileSystem extends SliderTestBase {
+ private static Configuration defaultConfiguration() {
+ return new Configuration();
+ }
+
+ private static Configuration createConfigurationWithKV(String key, String value) {
+ Configuration conf = defaultConfiguration();
+ conf.set(key, value);
+ return conf;
+ }
+
+ @Test
+ public void testSliderBasePathDefaultValue() throws Throwable {
+ Configuration configuration = defaultConfiguration();
+ FileSystem fileSystem = FileSystem.get(configuration);
+
+ SliderFileSystem fs2 = new SliderFileSystem(fileSystem, configuration);
+ assertEquals(fs2.getBaseApplicationPath(), new Path(fileSystem
+ .getHomeDirectory(), ".slider"));
+ }
+
+ @Test
+ public void testSliderBasePathCustomValue() throws Throwable {
+ Configuration configuration = createConfigurationWithKV(SliderXmlConfKeys
+ .KEY_SLIDER_BASE_PATH, "/slider/cluster");
+ FileSystem fileSystem = FileSystem.get(configuration);
+ SliderFileSystem fs2 = new SliderFileSystem(fileSystem, configuration);
+
+ assertEquals(fs2.getBaseApplicationPath(), new Path("/slider/cluster"));
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderTestUtils.java
new file mode 100644
index 0000000..0126798
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestSliderTestUtils.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.slider.test.SliderTestUtils;
+import org.junit.Test;
+import org.junit.internal.AssumptionViolatedException;
+
+public class TestSliderTestUtils extends SliderTestUtils {
+
+
+ @Test
+ public void testAssumeTrue() throws Throwable {
+
+ try {
+ assume(true, "true");
+ } catch (AssumptionViolatedException e) {
+ throw new Exception(e);
+ }
+ }
+
+ @Test
+ public void testAssumeFalse() throws Throwable {
+
+ try {
+ assume(false, "false");
+ fail("expected an exception");
+ } catch (AssumptionViolatedException ignored) {
+ //expected
+ }
+ }
+
+ @Test
+ public void testAssumeBoolOptionSetInConf() throws Throwable {
+ Configuration conf = new Configuration(false);
+ conf.set("key", "true");
+ try {
+ assumeBoolOption(conf, "key", false);
+ } catch (AssumptionViolatedException e) {
+ throw new Exception(e);
+ }
+ }
+
+ @Test
+ public void testAssumeBoolOptionUnsetInConf() throws Throwable {
+ Configuration conf = new Configuration(false);
+ try {
+ assumeBoolOption(conf, "key", true);
+ } catch (AssumptionViolatedException e) {
+ throw new Exception(e);
+ }
+ }
+
+
+ @Test
+ public void testAssumeBoolOptionFalseInConf() throws Throwable {
+ Configuration conf = new Configuration(false);
+ conf.set("key", "false");
+ try {
+ assumeBoolOption(conf, "key", true);
+ fail("expected an exception");
+ } catch (AssumptionViolatedException ignored) {
+ //expected
+ }
+ }
+
+ @Test
+ public void testAssumeBoolOptionFalseUnsetInConf() throws Throwable {
+ Configuration conf = new Configuration(false);
+ try {
+ assumeBoolOption(conf, "key", false);
+ fail("expected an exception");
+ } catch (AssumptionViolatedException ignored) {
+ //expected
+ }
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestWindowsSupport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestWindowsSupport.java
new file mode 100644
index 0000000..00b50f0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestWindowsSupport.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumFileSystem;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.service.ServiceStateException;
+import org.apache.hadoop.util.Shell;
+import org.apache.slider.test.YarnMiniClusterTestBase;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Pattern;
+
+public class TestWindowsSupport extends YarnMiniClusterTestBase {
+
+ private static final Pattern hasDriveLetterSpecifier =
+ Pattern.compile("^/?[a-zA-Z]:");
+ public static final String windowsFile =
+ "C:\\Users\\Administrator\\AppData\\Local\\Temp" +
+ "\\junit3180177850133852404\\testpkg\\appdef_1.zip";
+
+
+ private static boolean hasWindowsDrive(String path) {
+ return hasDriveLetterSpecifier.matcher(path).find();
+ }
+
+ private static int startPositionWithoutWindowsDrive(String path) {
+ if (hasWindowsDrive(path)) {
+ return path.charAt(0) == '/' ? 3 : 2;
+ } else {
+ return 0;
+ }
+ }
+
+ @Test
+ public void testHasWindowsDrive() throws Throwable {
+ assertTrue(hasWindowsDrive(windowsFile));
+ }
+
+ @Test
+ public void testStartPosition() throws Throwable {
+ assertEquals(2, startPositionWithoutWindowsDrive(windowsFile));
+ }
+
+ @Test
+ public void testPathHandling() throws Throwable {
+ assumeWindows();
+
+ Path path = new Path(windowsFile);
+ URI uri = path.toUri();
+// assert "file" == uri.scheme
+ assertNull(uri.getAuthority());
+
+ Configuration conf = new Configuration();
+
+ FileSystem localfs = FileSystem.get(uri, conf);
+ assertTrue(localfs instanceof ChecksumFileSystem);
+ try {
+ FileStatus stat = localfs.getFileStatus(path);
+ fail("expected an exception, got " + stat);
+ } catch (FileNotFoundException fnfe) {
+ // expected
+ }
+
+ try {
+ FSDataInputStream appStream = localfs.open(path);
+ } catch (FileNotFoundException fnfe) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testExecNonexistentBinary() throws Throwable {
+ assumeWindows();
+ List commands = Arrays.asList("undefined-application", "--version");
+ try {
+ exec(0, commands);
+ fail("expected an exception");
+ } catch (ServiceStateException e) {
+ if (!(e.getCause() instanceof FileNotFoundException)) {
+ throw e;
+ }
+ }
+ }
+ @Test
+ public void testExecNonexistentBinary2() throws Throwable {
+ assumeWindows();
+ assertFalse(doesAppExist(Arrays.asList("undefined-application",
+ "--version")));
+ }
+
+ @Test
+ public void testEmitKillCommand() throws Throwable {
+
+ int result = killJavaProcesses("regionserver", 9);
+ // we know the exit code if there is no supported kill operation
+ assertTrue(kill_supported || result == -1);
+ }
+
+ @Test
+ public void testHadoopHomeDefined() throws Throwable {
+ assumeWindows();
+ String hadoopHome = Shell.getHadoopHome();
+ log.info("HADOOP_HOME={}", hadoopHome);
+ }
+
+ @Test
+ public void testHasWinutils() throws Throwable {
+ assumeWindows();
+ SliderUtils.maybeVerifyWinUtilsValid();
+ }
+
+ @Test
+ public void testExecWinutils() throws Throwable {
+ assumeWindows();
+ String winUtilsPath = Shell.getWinUtilsPath();
+ assertTrue(SliderUtils.isSet(winUtilsPath));
+ File winUtils = new File(winUtilsPath);
+ log.debug("Winutils is at {}", winUtils);
+
+ exec(0, Arrays.asList(winUtilsPath, "systeminfo"));
+ }
+
+ @Test
+ public void testPath() throws Throwable {
+ String path = extractPath();
+ log.info("Path value = {}", path);
+ }
+
+ @Test
+ public void testFindJavac() throws Throwable {
+ String name = Shell.WINDOWS ? "javac.exe" : "javac";
+ assertNotNull(locateExecutable(name));
+ }
+
+ @Test
+ public void testHadoopDLL() throws Throwable {
+ assumeWindows();
+ // split the path
+ File exepath = locateExecutable("HADOOP.DLL");
+ assertNotNull(exepath);
+ log.info("Hadoop DLL at: {}", exepath);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestZKIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestZKIntegration.java
new file mode 100644
index 0000000..994068e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/common/tools/TestZKIntegration.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.registry.server.services.MicroZookeeperServiceKeys;
+import org.apache.slider.client.SliderClient;
+import org.apache.slider.core.zk.ZKIntegration;
+import org.apache.slider.test.KeysForTests;
+import org.apache.slider.test.YarnZKMiniClusterTestBase;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooDefs;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+
+public class TestZKIntegration extends YarnZKMiniClusterTestBase implements
+ KeysForTests {
+ public static final String USER = KeysForTests.USERNAME;
+ public static final int CONNECT_TIMEOUT = 5000;
+ private ZKIntegration zki;
+
+ @Before
+ public void createCluster() {
+ Configuration conf = getConfiguration();
+ String name = methodName.getMethodName();
+ File zkdir = new File("target/zk/${name}");
+ FileUtil.fullyDelete(zkdir);
+ conf.set(MicroZookeeperServiceKeys.KEY_ZKSERVICE_DIR, zkdir.getAbsolutePath());
+ createMicroZKCluster("-"+ name, conf);
+ }
+
+ @After
+ public void closeZKI() throws IOException {
+ if (zki != null) {
+ zki.close();
+ zki = null;
+ }
+ }
+
+ public ZKIntegration initZKI() throws IOException, InterruptedException {
+ zki = createZKIntegrationInstance(
+ getZKBinding(), methodName.getMethodName(), true, false,
+ CONNECT_TIMEOUT);
+ return zki;
+ }
+
+ @Test
+ public void testListUserClustersWithoutAnyClusters() throws Throwable {
+ assertHasZKCluster();
+ initZKI();
+ String userPath = ZKIntegration.mkSliderUserPath(USER);
+ List clusters = this.zki.getClusters();
+ assertTrue(SliderUtils.isEmpty(clusters));
+ }
+
+ @Test
+ public void testListUserClustersWithOneCluster() throws Throwable {
+ assertHasZKCluster();
+
+ initZKI();
+ String userPath = ZKIntegration.mkSliderUserPath(USER);
+ String fullPath = zki.createPath(userPath, "/cluster-",
+ ZooDefs.Ids.OPEN_ACL_UNSAFE,
+ CreateMode.EPHEMERAL_SEQUENTIAL);
+ log.info("Ephemeral path {}", fullPath);
+ List clusters = zki.getClusters();
+ assertEquals(1, clusters.size());
+ assertTrue(fullPath.endsWith(clusters.get(0)));
+ }
+
+ @Test
+ public void testListUserClustersWithTwoCluster() throws Throwable {
+ initZKI();
+ String userPath = ZKIntegration.mkSliderUserPath(USER);
+ String c1 = createEphemeralChild(zki, userPath);
+ log.info("Ephemeral path $c1");
+ String c2 = createEphemeralChild(zki, userPath);
+ log.info("Ephemeral path $c2");
+ List clusters = zki.getClusters();
+ assertEquals(2, clusters.size());
+ assertTrue((c1.endsWith(clusters.get(0)) && c2.endsWith(clusters.get(1))) ||
+ (c1.endsWith(clusters.get(1)) && c2.endsWith(clusters.get(0))));
+ }
+
+ @Test
+ public void testCreateAndDeleteDefaultZKPath() throws Throwable {
+ MockSliderClient client = new MockSliderClient();
+
+ String path = client.createZookeeperNodeInner("cl1", true);
+ zki = client.getLastZKIntegration();
+
+ String zkPath = ZKIntegration.mkClusterPath(USER, "cl1");
+ assertEquals("zkPath must be as expected", zkPath,
+ "/services/slider/users/" + USER + "/cl1");
+ assertEquals(path, zkPath);
+ assertNull("ZKIntegration should be null.", zki);
+ zki = createZKIntegrationInstance(getZKBinding(), "cl1", true, false, CONNECT_TIMEOUT);
+ assertFalse(zki.exists(zkPath));
+
+ path = client.createZookeeperNodeInner("cl1", false);
+ zki = client.getLastZKIntegration();
+ assertNotNull(zki);
+ assertEquals("zkPath must be as expected", zkPath,
+ "/services/slider/users/" + USER + "/cl1");
+ assertEquals(path, zkPath);
+ assertTrue(zki.exists(zkPath));
+ zki.createPath(zkPath, "/cn", ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+ assertTrue(zki.exists(zkPath + "/cn"));
+ client.deleteZookeeperNode("cl1");
+ assertFalse(zki.exists(zkPath));
+ }
+
+ public String createEphemeralChild(ZKIntegration zki, String userPath)
+ throws KeeperException, InterruptedException {
+ return zki.createPath(userPath, "/cluster-",
+ ZooDefs.Ids.OPEN_ACL_UNSAFE,
+ CreateMode.EPHEMERAL_SEQUENTIAL);
+ }
+
+ public class MockSliderClient extends SliderClient {
+ private ZKIntegration zki;
+
+ @Override
+ public String getUsername() {
+ return USER;
+ }
+
+ @Override
+ protected ZKIntegration getZkClient(String clusterName, String user) {
+ try {
+ zki = createZKIntegrationInstance(getZKBinding(), clusterName, true, false, CONNECT_TIMEOUT);
+ } catch (Exception e) {
+ fail("creating ZKIntergration threw an exception");
+ }
+ return zki;
+ }
+
+ @Override
+ public Configuration getConfig() {
+ return new Configuration();
+ }
+
+ public ZKIntegration getLastZKIntegration() {
+ return zki;
+ }
+
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java
new file mode 100644
index 0000000..06875ee
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/ExampleConfResources.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.conf;
+
+import org.apache.slider.core.persist.JsonSerDeser;
+import org.apache.slider.providers.slideram.SliderAMClientProvider;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/*
+ names of the example configs
+ */
+
+public class ExampleConfResources {
+
+ public static final String overridden = "overridden.json";
+ public static final String overriddenRes = "overridden-resolved.json";
+ public static final String internal = "internal.json";
+ public static final String internalRes = "internal-resolved.json";
+ public static final String app_configuration = "app_configuration.json";
+ public static final String app_configurationRes = "app_configuration-resolved.json";
+ public static final String resources = "resources.json";
+ public static final String empty = "empty.json";
+
+ public static final String PACKAGE = "/org/apache/slider/core/conf/examples/";
+
+
+ public static final String[] all_examples = {overridden, overriddenRes,
+ internal, internalRes, app_configuration, app_configurationRes,
+ resources, empty};
+
+ public static final List all_example_resources = new ArrayList<>();
+ static {
+ for (String example : all_examples) {
+ all_example_resources.add(PACKAGE + example);
+ }
+
+ all_example_resources.add(SliderAMClientProvider.RESOURCES_JSON);
+ all_example_resources.add(SliderAMClientProvider.INTERNAL_JSON);
+ all_example_resources.add(SliderAMClientProvider.APPCONF_JSON);
+
+ }
+
+ /**
+ * Build up an aggregate conf by loading in the details of the individual resources
+ * and then aggregating them
+ * @return a new instance
+ */
+ public static AggregateConf loadExampleAggregateResource() throws
+ IOException {
+ JsonSerDeser confTreeJsonSerDeser =
+ new JsonSerDeser<>(ConfTree.class);
+ ConfTree internalConf = confTreeJsonSerDeser.fromResource(PACKAGE +
+ internal);
+ ConfTree appConf = confTreeJsonSerDeser.fromResource(PACKAGE +
+ app_configuration);
+ ConfTree resourcesConf = confTreeJsonSerDeser.fromResource(PACKAGE +
+ resources);
+ AggregateConf aggregateConf = new AggregateConf(
+ resourcesConf,
+ appConf,
+ internalConf);
+ return aggregateConf;
+ }
+
+ static ConfTree loadResource(String name) throws IOException {
+ JsonSerDeser confTreeJsonSerDeser =
+ new JsonSerDeser<>(ConfTree.class);
+ return confTreeJsonSerDeser.fromResource(PACKAGE + name);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java
new file mode 100644
index 0000000..b48ac9c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeLoadExamples.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.conf;
+
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.core.persist.JsonSerDeser;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+
+/**
+ * Test
+ */
+@RunWith(value = Parameterized.class)
+public class TestConfTreeLoadExamples extends Assert {
+
+ String resource;
+
+ static final JsonSerDeser confTreeJsonSerDeser =
+ new JsonSerDeser<>(ConfTree.class);
+
+ public TestConfTreeLoadExamples(String resource) {
+ this.resource = resource;
+ }
+
+ @Parameterized.Parameters
+ public static Collection filenames() {
+ String[][] stringArray = new String[ExampleConfResources
+ .all_example_resources.size()][1];
+ int i = 0;
+ for (String s : ExampleConfResources.all_example_resources) {
+ stringArray[i++][0] = s;
+ }
+ return Arrays.asList(stringArray);
+ }
+
+ @Test
+ public void testLoadResource() throws Throwable {
+ ConfTree confTree = confTreeJsonSerDeser.fromResource(resource);
+ ConfTreeOperations ops = new ConfTreeOperations(confTree);
+ ops.resolve();
+ ops.validate();
+ }
+
+ @Test
+ public void testLoadResourceWithValidator() throws Throwable {
+ ConfTree confTree = confTreeJsonSerDeser.fromResource(resource);
+ ConfTreeOperations ops = new ConfTreeOperations(confTree);
+ ops.resolve();
+ if (resource.endsWith("resources.json")) {
+ // these should pass since they are configured conrrectly with "yarn."
+ // properties
+ ops.validate(new ResourcesInputPropertiesValidator());
+ } else if (resource.startsWith("app_configuration")) {
+ ops.validate(new TemplateInputPropertiesValidator());
+ }
+ else {
+ // these have properties with other prefixes so they should generate
+ // BadConfigExceptions
+ try {
+ ops.validate(new ResourcesInputPropertiesValidator());
+ if ( !resource.endsWith(ExampleConfResources.empty)) {
+ fail (resource + " should have generated validation exception");
+ }
+ } catch (BadConfigException e) {
+ // ignore
+ }
+
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeResolve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeResolve.java
new file mode 100644
index 0000000..1383089
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/conf/TestConfTreeResolve.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.conf;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+
+import static org.apache.slider.api.InternalKeys.CHAOS_MONKEY_INTERVAL;
+import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS;
+import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS;
+import static org.apache.slider.api.InternalKeys.DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES;
+import static org.apache.slider.core.conf.ExampleConfResources.internal;
+import static org.apache.slider.core.conf.ExampleConfResources.overridden;
+
+/**
+ * Test
+ */
+public class TestConfTreeResolve extends Assert {
+ protected static final Logger log =
+ LoggerFactory.getLogger(TestConfTreeResolve.class);
+
+ @Test
+ public void testOverride() throws Throwable {
+
+ ConfTree orig = ExampleConfResources.loadResource(overridden);
+
+ ConfTreeOperations origOperations = new ConfTreeOperations(orig);
+ origOperations.validate();
+
+
+ MapOperations global = origOperations.getGlobalOptions();
+ assertEquals("a", global.get("g1"));
+ assertEquals("b", global.get("g2"));
+
+ MapOperations simple = origOperations.getMandatoryComponent("simple");
+ assertEquals(0, simple.size());
+
+ MapOperations master = origOperations.getMandatoryComponent("master");
+ assertEquals("m", master.get("name"));
+ assertEquals("overridden", master.get("g1"));
+
+ MapOperations worker = origOperations.getMandatoryComponent("worker");
+ log.info("worker = {}", worker);
+ assertEquals(3, worker.size());
+
+ assertEquals("worker", worker.get("name"));
+ assertEquals("overridden-by-worker", worker.get("g1"));
+ assertNull(worker.get("g2"));
+ assertEquals("1000", worker.get("timeout"));
+
+ // here is the resolution
+ origOperations.resolve();
+
+ global = origOperations.getGlobalOptions();
+ log.info("global = {}", global);
+ assertEquals("a", global.get("g1"));
+ assertEquals("b", global.get("g2"));
+
+ simple = origOperations.getMandatoryComponent("simple");
+ assertEquals(2, simple.size());
+ simple.getMandatoryOption("g1");
+ assertNotNull(simple.get("g1"));
+
+
+ master = origOperations.getMandatoryComponent("master");
+ log.info("master = {}", master);
+ assertEquals(3, master.size());
+ assertEquals("m", master.get("name"));
+ assertEquals("overridden", master.get("g1"));
+ assertEquals("b", master.get("g2"));
+
+ worker = origOperations.getMandatoryComponent("worker");
+ log.info("worker = {}", worker);
+ assertEquals(4, worker.size());
+
+ assertEquals("worker", worker.get("name"));
+ assertEquals("overridden-by-worker", worker.get("g1"));
+ assertEquals("b", worker.get("g2"));
+ assertEquals("1000", worker.get("timeout"));
+
+ }
+
+ @Test
+ public void testTimeIntervalLoading() throws Throwable {
+
+ ConfTree orig = ExampleConfResources.loadResource(internal);
+
+ MapOperations internals = new MapOperations("internal", orig.global);
+ int s = internals.getOptionInt(
+ CHAOS_MONKEY_INTERVAL + MapOperations.SECONDS,
+ 0);
+ assertEquals(60, s);
+ long monkeyInterval = internals.getTimeRange(
+ CHAOS_MONKEY_INTERVAL,
+ DEFAULT_CHAOS_MONKEY_INTERVAL_DAYS,
+ DEFAULT_CHAOS_MONKEY_INTERVAL_HOURS,
+ DEFAULT_CHAOS_MONKEY_INTERVAL_MINUTES,
+ 0);
+ assertEquals(60L, monkeyInterval);
+ }
+
+ @Test
+ public void testPrefix() throws Throwable {
+ ConfTree orig = ExampleConfResources.loadResource(overridden);
+ ConfTreeOperations cto = new ConfTreeOperations(orig);
+ cto.resolve();
+ Map prefixed =
+ cto.getComponent("worker").prefixedWith("g");
+ assertEquals(2, prefixed.size());
+ assertEquals("overridden-by-worker", prefixed.get("g1"));
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java
index b955931..cb51a82 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncher.java
@@ -18,12 +18,6 @@
package org.apache.slider.core.launch;
-import java.lang.reflect.Method;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.LogAggregationContext;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
@@ -35,6 +29,12 @@
import org.junit.Before;
import org.junit.Test;
+import java.lang.reflect.Method;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
public class TestAppMasterLauncher {
SliderYarnClientImpl mockYarnClient;
YarnClientApplication yarnClientApp;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java
index a8f6b26..c11f493 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/launch/TestAppMasterLauncherWithAmReset.java
@@ -18,11 +18,6 @@
package org.apache.slider.core.launch;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
@@ -35,6 +30,11 @@
import org.junit.Before;
import org.junit.Test;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
public class TestAppMasterLauncherWithAmReset {
SliderYarnClientImpl mockYarnClient;
YarnClientApplication yarnClientApp;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/main/ServiceLauncherBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/main/ServiceLauncherBaseTest.java
new file mode 100644
index 0000000..067984e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/main/ServiceLauncherBaseTest.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.main;
+
+import org.apache.slider.test.SliderTestBase;
+
+/**
+ * Base class for tests that use the service launcher
+ */
+public class ServiceLauncherBaseTest extends SliderTestBase {
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterLocksHDFS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterLocksHDFS.java
new file mode 100644
index 0000000..03c4a7f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterLocksHDFS.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.persist;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.common.tools.CoreFileSystem;
+import org.apache.slider.test.YarnMiniClusterTestBase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+public class TestConfPersisterLocksHDFS extends YarnMiniClusterTestBase {
+ public static MiniDFSCluster hdfs;
+ public static YarnConfiguration conf = new YarnConfiguration();
+ public static CoreFileSystem coreFileSystem;
+ public static URI fsURI;
+ public static FileSystem dfsClient;
+
+ public TestConfPersisterLocksHDFS() {
+
+ }
+
+ @BeforeClass
+ public static void createCluster() throws IOException, URISyntaxException {
+ hdfs = buildMiniHDFSCluster(
+ "TestConfPersister",
+ conf);
+
+ fsURI = new URI(buildFsDefaultName(hdfs));
+ dfsClient = FileSystem.get(fsURI, conf);
+ coreFileSystem = new CoreFileSystem(dfsClient, conf);
+ }
+
+ @AfterClass
+ public static void destroyCluster() {
+ if (hdfs != null) {
+ hdfs.shutdown();
+ }
+ hdfs = null;
+ }
+
+ /**
+ * Create the persister. This also creates the destination directory
+ * @param name name of cluster
+ * @return a conf persister
+ */
+ public ConfPersister createPersister(String name) throws IOException {
+ Path path = coreFileSystem.buildClusterDirPath(name);
+ ConfPersister persister = new ConfPersister(
+ coreFileSystem,
+ path);
+ coreFileSystem.getFileSystem().mkdirs(path);
+ return persister;
+ }
+
+ @Test
+ public void testReleaseNonexistentWritelock() throws Exception {
+
+ ConfPersister persister = createPersister
+ ("testReleaseNonexistentWritelock");
+ assertFalse(persister.releaseWritelock());
+ }
+
+
+ @Test
+ public void testAcqRelWriteLock() throws Throwable {
+ ConfPersister persister = createPersister("testAcqRelWriteLock");
+ persister.acquireWritelock();
+ assertTrue(persister.releaseWritelock());
+ assertFalse(persister.releaseWritelock());
+ }
+
+ @Test
+ public void testSecondWriteLockAcqFails() throws Throwable {
+ ConfPersister persister = createPersister("testSecondWriteLockAcqFails");
+ persister.acquireWritelock();
+ try {
+ persister.acquireWritelock();
+ fail("write lock acquired twice");
+ } catch (LockAcquireFailedException lafe) {
+ //expected
+ assertTrue(lafe.getPath().toString().endsWith(Filenames.WRITELOCK));
+ }
+ assertTrue(persister.releaseWritelock());
+
+ //now we can ask for it
+ persister.acquireWritelock();
+ }
+
+ @Test
+ public void testReleaseNonexistentReadlockOwner() throws Exception {
+ ConfPersister persister = createPersister
+ ("testReleaseNonexistentReadlock");
+ assertFalse(persister.releaseReadlock(true));
+ }
+
+ @Test
+ public void testReleaseNonexistentReadlock() throws Exception {
+ ConfPersister persister = createPersister
+ ("testReleaseNonexistentReadlock");
+ assertFalse(persister.releaseReadlock(false));
+ }
+
+ @Test
+ public void testAcqRelReadlock() throws Exception {
+ ConfPersister persister = createPersister("testAcqRelReadlock");
+ assertTrue(persister.acquireReadLock());
+ assertTrue(persister.readLockExists());
+
+ assertFalse(persister.releaseReadlock(false));
+ assertTrue(persister.readLockExists());
+ assertTrue(persister.releaseReadlock(true));
+ }
+
+ @Test
+ public void testAcqAcqRelReadlock() throws Exception {
+ ConfPersister persister = createPersister("testAcqRelReadlock");
+ assertTrue(persister.acquireReadLock());
+ assertTrue(persister.readLockExists());
+ assertFalse(persister.acquireReadLock());
+ assertTrue(persister.readLockExists());
+
+ assertFalse(persister.releaseReadlock(false));
+ assertTrue(persister.readLockExists());
+ assertTrue(persister.releaseReadlock(true));
+ assertFalse(persister.readLockExists());
+ }
+
+ @Test
+ public void testAcqAcqRelReadlockOtherOrderOfRelease() throws Exception {
+ ConfPersister persister = createPersister("testAcqRelReadlock");
+ assertTrue(persister.acquireReadLock());
+ assertTrue(persister.readLockExists());
+ assertFalse(persister.acquireReadLock());
+ assertTrue(persister.readLockExists());
+
+ assertTrue(persister.releaseReadlock(true));
+ assertFalse(persister.readLockExists());
+ assertFalse(persister.releaseReadlock(false));
+
+ }
+
+
+ @Test
+ public void testNoReadlockWhenWriteHeld() throws Throwable {
+ ConfPersister persister = createPersister("testNoReadlockWhenWriteHeld");
+ persister.acquireWritelock();
+ try {
+ persister.acquireReadLock();
+ fail("read lock acquired");
+ } catch (LockAcquireFailedException lafe) {
+ //expected
+ assertWritelockBlocked(lafe);
+ }
+ assertTrue(persister.releaseWritelock());
+ assertFalse(persister.writelockExists());
+
+ //now we can ask for it
+ persister.acquireReadLock();
+ }
+
+ public void assertWritelockBlocked(LockAcquireFailedException lafe) {
+ assertTrue(lafe.getPath().toString().endsWith(Filenames.WRITELOCK));
+ }
+
+ public void assertReadlockBlocked(LockAcquireFailedException lafe) {
+ assertTrue(lafe.getPath().toString().endsWith(Filenames.READLOCK));
+ }
+
+ @Test
+ public void testNoWritelockWhenReadHeld() throws Throwable {
+ ConfPersister persister = createPersister("testNoWritelockWhenReadHeld");
+ assertTrue(persister.acquireReadLock());
+ try {
+ persister.acquireWritelock();
+ fail("write lock acquired");
+ } catch (LockAcquireFailedException lafe) {
+ //expected
+ assertReadlockBlocked(lafe);
+ }
+ assertTrue(persister.releaseReadlock(true));
+
+ //now we can ask for it
+ persister.acquireWritelock();
+ }
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterReadWrite.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterReadWrite.java
new file mode 100644
index 0000000..c421a57
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/core/persist/TestConfPersisterReadWrite.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.persist;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.common.tools.CoreFileSystem;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTree;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.core.conf.ExampleConfResources;
+import org.apache.slider.core.conf.MapOperations;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.test.YarnMiniClusterTestBase;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+public class TestConfPersisterReadWrite extends YarnMiniClusterTestBase {
+ private static YarnConfiguration conf = new YarnConfiguration();
+ static CoreFileSystem coreFileSystem;
+ static URI fsURI;
+ static FileSystem dfsClient;
+ static final JsonSerDeser confTreeJsonSerDeser =
+ new JsonSerDeser<>(ConfTree.class);
+ AggregateConf aggregateConf = ExampleConfResources
+ .loadExampleAggregateResource();
+
+
+ public TestConfPersisterReadWrite() throws IOException {
+ }
+
+ @BeforeClass
+ public static void createCluster() throws URISyntaxException, IOException {
+ fsURI = new URI(buildFsDefaultName(null));
+ dfsClient = FileSystem.get(fsURI, conf);
+ coreFileSystem = new CoreFileSystem(dfsClient, conf);
+ }
+
+ /**
+ * Create the persister. This also creates the destination directory
+ * @param name name of cluster
+ * @return a conf persister
+ */
+ public ConfPersister createPersister(String name) throws IOException {
+ Path path = coreFileSystem.buildClusterDirPath(name);
+ ConfPersister persister = new ConfPersister(
+ coreFileSystem,
+ path);
+ coreFileSystem.getFileSystem().mkdirs(path);
+ return persister;
+ }
+
+ @Test
+ public void testSaveLoadEmptyConf() throws Throwable {
+ AggregateConf aggregateConf = new AggregateConf();
+
+ ConfPersister persister = createPersister("testSaveLoad");
+ persister.save(aggregateConf, null);
+ AggregateConf loaded = new AggregateConf();
+ persister.load(loaded);
+ loaded.validate();
+ }
+
+
+ @Test
+ public void testSaveLoadTestConf() throws Throwable {
+ ConfPersister persister = createPersister("testSaveLoadTestConf");
+ persister.save(aggregateConf, null);
+ AggregateConf loaded = new AggregateConf();
+ persister.load(loaded);
+ loaded.validate();
+ }
+
+
+
+ @Test
+ public void testSaveLoadTestConfResolveAndCheck() throws Throwable {
+ ConfTreeOperations appConfOperations = aggregateConf.getAppConfOperations();
+ appConfOperations.getMandatoryComponent("master").put("PATH", ".");
+ ConfPersister persister = createPersister("testSaveLoadTestConf");
+ persister.save(aggregateConf, null);
+ AggregateConf loaded = new AggregateConf();
+ persister.load(loaded);
+ loaded.validate();
+ loaded.resolve();
+ ConfTreeOperations resources = loaded.getResourceOperations();
+ MapOperations master = resources.getMandatoryComponent("master");
+ assertEquals("1024", master.get("yarn.memory"));
+
+ ConfTreeOperations appConfOperations2 = loaded.getAppConfOperations();
+ assertEquals(".", appConfOperations2.getMandatoryComponent("master")
+ .get("PATH"));
+ }
+
+ @Test
+ public void testSaveFailsIfWritelocked() throws Throwable {
+ ConfPersister persister = createPersister("testSaveFailsIfWritelocked");
+ persister.releaseWritelock();
+ persister.acquireWritelock();
+ try {
+ expectSaveToFailOnLock(persister, aggregateConf);
+ } finally {
+ persister.releaseWritelock();
+ }
+ }
+
+ @Test
+ public void testSaveFailsIfReadlocked() throws Throwable {
+ ConfPersister persister = createPersister("testSaveFailsIfReadlocked");
+ persister.releaseWritelock();
+ persister.acquireReadLock();
+ try {
+ expectSaveToFailOnLock(persister, aggregateConf);
+ } finally {
+ persister.releaseReadlock(true);
+ }
+ }
+
+ @Test
+ public void testLoadFailsIfWritelocked() throws Throwable {
+ ConfPersister persister = createPersister("testLoadFailsIfWritelocked");
+ persister.acquireWritelock();
+ try {
+ expectLoadToFailOnLock(persister, aggregateConf);
+ } finally {
+ persister.releaseWritelock();
+ }
+ }
+
+ @Test
+ public void testLoadFailsIfDestDoesNotExist() throws Throwable {
+ ConfPersister persister = createPersister
+ ("testLoadFailsIfDestDoesNotExist");
+ try {
+ persister.load(aggregateConf);
+ fail("expected save to fail to find a file");
+ } catch (FileNotFoundException e) {
+ //expected
+ }
+ }
+
+ @Test
+ public void testLoadSucceedsIfReadlocked() throws Throwable {
+ ConfPersister persister = createPersister("testLoadSucceedsIfReadlocked");
+ persister.releaseReadlock(true);
+ try {
+ persister.save(aggregateConf, null);
+ persister.acquireReadLock();
+ AggregateConf loaded = new AggregateConf();
+ persister.load(loaded);
+ loaded.validate();
+ loaded.resolve();
+ } finally {
+ persister.releaseReadlock(true);
+ }
+ }
+
+ public void expectSaveToFailOnLock(
+ ConfPersister persister,
+ AggregateConf aggregateConf) throws IOException, SliderException {
+ try {
+ persister.save(aggregateConf, null);
+ fail("expected save to fail to get a lock");
+ } catch (LockAcquireFailedException lafe) {
+ //expected
+ }
+ }
+
+
+ public void expectLoadToFailOnLock(
+ ConfPersister persister,
+ AggregateConf aggregateConf) throws IOException, SliderException {
+ try {
+ persister.load(aggregateConf);
+ fail("expected save to fail to get a lock");
+ } catch (LockAcquireFailedException lafe) {
+ //expected
+ }
+ }
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestFilesystemPermissions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestFilesystemPermissions.java
new file mode 100644
index 0000000..48fb9ce
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestFilesystemPermissions.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.other;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.DiskChecker;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
+import org.apache.slider.test.YarnMiniClusterTestBase;
+import org.junit.After;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * This test class exists to look at permissions of the filesystem, especially
+ * that created by Mini YARN clusters. On some windows jenkins machines,
+ * YARN actions were failing as the directories had the wrong permissions
+ * (i.e. too lax)
+ */
+public class TestFilesystemPermissions extends YarnMiniClusterTestBase {
+
+ static final Logger LOG = LoggerFactory.getLogger(TestFilesystemPermissions
+ .class);
+
+ List filesToDelete = new ArrayList<>();
+
+ @After
+ public void deleteFiles() {
+ for (File f : filesToDelete) {
+ FileUtil.fullyDelete(f, true);
+ }
+ }
+
+ @Test
+ public void testJavaFSOperations() throws Throwable {
+ assertNativeLibrariesPresent();
+ File subdir = testDir();
+ subdir.mkdir();
+ assertTrue(subdir.isDirectory());
+ assertTrue(FileUtil.canRead(subdir));
+ assertTrue(FileUtil.canWrite(subdir));
+ assertTrue(FileUtil.canExecute(subdir));
+ }
+
+ @Test
+ public void testDiskCheckerOperations() throws Throwable {
+ assertNativeLibrariesPresent();
+ File subdir = testDir();
+ subdir.mkdir();
+ DiskChecker checker = new DiskChecker();
+ checker.checkDir(subdir);
+ }
+
+ @Test
+ public void testDiskCheckerMkdir() throws Throwable {
+ assertNativeLibrariesPresent();
+ File subdir = testDir();
+ subdir.mkdirs();
+ DiskChecker checker = new DiskChecker();
+ checker.checkDir(subdir);
+ }
+
+ /**
+ * Get a test dir for this method; one that will be deleted on teardown
+ * @return a filename unique to this test method
+ */
+ File testDir() {
+ File parent = new File("target/testfspermissions");
+ parent.mkdir();
+ File testdir = new File(parent, methodName.getMethodName());
+ filesToDelete.add(testdir);
+ return testdir;
+ }
+
+
+ @Test
+ public void testPermsMap() throws Throwable {
+ File dir = testDir();
+ String diruri = dir.toURI().toString();
+ FileContext lfs = createLocalFS(dir, getConfiguration());
+ getLocalDirsPathPermissionsMap(lfs, diruri);
+ }
+
+ @Test
+ public void testInitLocaldir() throws Throwable {
+ File dir = testDir();
+ String diruri = dir.toURI().toString();
+ FileContext lfs = createLocalFS(dir, getConfiguration());
+ initializeLocalDir(lfs, diruri);
+ List localDirs = getInitializedLocalDirs(lfs, Arrays.asList
+ (diruri));
+ assertEquals(1, localDirs.size());
+ }
+
+
+ @Test
+ public void testValidateMiniclusterPerms() throws Throwable {
+ int numLocal = 1;
+ String cluster = createMiniCluster("", getConfiguration(), 1, numLocal, 1,
+ false);
+ File workDir = miniCluster.getTestWorkDir();
+ List localdirs = new ArrayList<>();
+ for (File file : workDir.listFiles()) {
+ if (file.isDirectory() && file.getAbsolutePath().contains("-local")) {
+ // local dir
+ localdirs.add(file);
+ }
+ }
+ assertEquals(numLocal, localdirs.size());
+ FileContext lfs = createLocalFS(workDir, getConfiguration());
+ for (File file : localdirs) {
+ checkLocalDir(lfs, file.toURI().toString());
+ }
+ }
+
+ FileContext createLocalFS(File dir, Configuration conf)
+ throws UnsupportedFileSystemException {
+ return FileContext.getFileContext(dir.toURI(), conf);
+ }
+
+ /**
+ * extracted from ResourceLocalizationService
+ * @param lfs
+ * @param localDir
+ * @return perms map
+ * @see ResourceLocalizationService
+ */
+ private Map getLocalDirsPathPermissionsMap(
+ FileContext lfs,
+ String localDir) {
+ Map localDirPathFsPermissionsMap = new HashMap<>();
+
+ FsPermission defaultPermission =
+ FsPermission.getDirDefault().applyUMask(lfs.getUMask());
+ FsPermission nmPrivatePermission =
+ ResourceLocalizationService.NM_PRIVATE_PERM.applyUMask(lfs.getUMask());
+
+ Path userDir = new Path(localDir, ContainerLocalizer.USERCACHE);
+ Path fileDir = new Path(localDir, ContainerLocalizer.FILECACHE);
+ Path sysDir = new Path(
+ localDir,
+ ResourceLocalizationService.NM_PRIVATE_DIR);
+
+ localDirPathFsPermissionsMap.put(userDir, defaultPermission);
+ localDirPathFsPermissionsMap.put(fileDir, defaultPermission);
+ localDirPathFsPermissionsMap.put(sysDir, nmPrivatePermission);
+ return localDirPathFsPermissionsMap;
+ }
+
+ private boolean checkLocalDir(FileContext lfs, String localDir)
+ throws IOException {
+
+ Map pathPermissionMap =
+ getLocalDirsPathPermissionsMap(lfs, localDir);
+
+ for (Map.Entry entry : pathPermissionMap.entrySet()) {
+ FileStatus status;
+ status = lfs.getFileStatus(entry.getKey());
+
+ if (!status.getPermission().equals(entry.getValue())) {
+ String msg =
+ "Permissions incorrectly set for dir " + entry.getKey() +
+ ", should be " + entry.getValue() + ", actual value = " +
+ status.getPermission();
+ throw new YarnRuntimeException(msg);
+ }
+ }
+ return true;
+ }
+
+
+ private void initializeLocalDir(FileContext lfs, String localDir)
+ throws IOException {
+
+ Map pathPermissionMap =
+ getLocalDirsPathPermissionsMap(lfs, localDir);
+ for (Map.Entry entry : pathPermissionMap.entrySet()) {
+ FileStatus status;
+ try {
+ status = lfs.getFileStatus(entry.getKey());
+ }
+ catch (FileNotFoundException fs) {
+ status = null;
+ }
+
+ if (status == null) {
+ lfs.mkdir(entry.getKey(), entry.getValue(), true);
+ status = lfs.getFileStatus(entry.getKey());
+ }
+ FsPermission perms = status.getPermission();
+ if (!perms.equals(entry.getValue())) {
+ lfs.setPermission(entry.getKey(), entry.getValue());
+ }
+ }
+ }
+
+ synchronized private List getInitializedLocalDirs(FileContext lfs,
+ List dirs) throws IOException {
+ List checkFailedDirs = new ArrayList();
+ for (String dir : dirs) {
+ try {
+ checkLocalDir(lfs, dir);
+ } catch (YarnRuntimeException e) {
+ checkFailedDirs.add(dir);
+ }
+ }
+ for (String dir : checkFailedDirs) {
+ LOG.info("Attempting to initialize " + dir);
+ initializeLocalDir(lfs, dir);
+ checkLocalDir(lfs, dir);
+ }
+ return dirs;
+ }
+
+
+ private void createDir(FileContext localFs, Path dir, FsPermission perm)
+ throws IOException {
+ if (dir == null) {
+ return;
+ }
+ try {
+ localFs.getFileStatus(dir);
+ } catch (FileNotFoundException e) {
+ createDir(localFs, dir.getParent(), perm);
+ localFs.mkdir(dir, perm, false);
+ if (!perm.equals(perm.applyUMask(localFs.getUMask()))) {
+ localFs.setPermission(dir, perm);
+ }
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestLocalDirStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestLocalDirStatus.java
new file mode 100644
index 0000000..aea38f3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/other/TestLocalDirStatus.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.other;
+
+import org.apache.slider.test.SliderTestUtils;
+import org.apache.slider.tools.TestUtility;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+
+/**
+ * This test exists to diagnose local FS permissions
+ */
+public class TestLocalDirStatus extends SliderTestUtils {
+
+
+ public static final int SIZE = 0x200000;
+
+ @Test
+ public void testTempDir() throws Throwable {
+ File tmpf = null;
+ try {
+ tmpf = File.createTempFile("testl", ".bin");
+ createAndReadFile(tmpf, SIZE);
+ tmpf.delete();
+ assertFalse(tmpf.exists());
+ } finally {
+ if (tmpf != null) {
+ tmpf.delete();
+ }
+ }
+ }
+
+ @Test
+ public void testTargetDir() throws Throwable {
+ File target = target();
+ File tmpf = null;
+ try {
+ tmpf = File.createTempFile("testl", ".bin", target);
+ createAndReadFile(tmpf, SIZE);
+ tmpf.delete();
+ assertFalse(tmpf.exists());
+ } finally {
+ if (tmpf != null) {
+ tmpf.delete();
+ }
+
+ }
+ }
+
+ public File target() {
+ File target = new File("target").getAbsoluteFile();
+ assertTrue(target.exists());
+ return target;
+ }
+
+ @Test
+ public void testRenameInTargetDir() throws Throwable {
+ File target = target();
+ File tmpf = null;
+ File dst= null;
+ try {
+ tmpf = File.createTempFile("testl", ".bin", target);
+ dst = File.createTempFile("test-dest", ".bin", target);
+ createRenameAndReadFile(tmpf, dst, SIZE);
+ assertFalse(tmpf.exists());
+ dst.delete();
+ } finally {
+ if (tmpf != null) {
+ tmpf.delete();
+ }
+ if (dst != null) {
+ dst.delete();
+ }
+ }
+ }
+
+ @Test
+ public void testRenameInTmpDir() throws Throwable {
+ File tmpf = null;
+ File dst= null;
+ try {
+ tmpf = File.createTempFile("testl", ".bin");
+ dst = File.createTempFile("test-dest", ".bin");
+ createRenameAndReadFile(tmpf, dst, SIZE);
+ assertFalse(tmpf.exists());
+ dst.delete();
+ } finally {
+ if (tmpf != null) {
+ tmpf.delete();
+ }
+ if (dst != null) {
+ dst.delete();
+ }
+ }
+ }
+
+ protected void createAndReadFile(File path, int len) throws IOException {
+ byte[] dataset = TestUtility.dataset(len, 32, 128);
+ writeFile(path, dataset);
+ assertTrue(path.exists());
+ assertEquals(len, path.length());
+ byte[] persisted = readFile(path);
+ TestUtility.compareByteArrays(dataset, persisted, len);
+ }
+
+ protected void createRenameAndReadFile(File src, File dst , int len)
+ throws IOException {
+ byte[] dataset = TestUtility.dataset(len, 32, 128);
+ writeFile(src, dataset);
+ assertTrue(src.exists());
+ assertEquals(len, src.length());
+ dst.delete();
+ assertFalse(dst.exists());
+ assertTrue(src.renameTo(dst));
+ assertEquals(len, dst.length());
+ byte[] persisted = readFile(dst);
+ TestUtility.compareByteArrays(dataset, persisted, len);
+ }
+
+ protected void writeFile(File path, byte[] dataset)
+ throws IOException {
+ FileOutputStream out = new FileOutputStream(path);
+ try {
+ out.write(dataset);
+ out.flush();
+ } finally {
+ out.close();
+ }
+ }
+
+ protected byte[] readFile(File path) throws IOException {
+ assertTrue(path.getAbsoluteFile().exists());
+ assertTrue(path.getAbsoluteFile().isFile());
+ int len = (int)path.length();
+ byte[] dataset = new byte[len];
+ FileInputStream ins = new FileInputStream(path);
+ try {
+ ins.read(dataset);
+ } finally {
+ ins.close();
+ }
+ return dataset;
+ }
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java
new file mode 100644
index 0000000..6302f63
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/providers/TestProviderFactory.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.providers;
+
+import org.apache.slider.providers.docker.DockerKeys;
+import org.apache.slider.providers.docker.DockerProviderFactory;
+import org.junit.Test;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+public class TestProviderFactory {
+ @Test
+ public void testLoadAgentProvider() throws Throwable {
+ SliderProviderFactory factory = SliderProviderFactory
+ .createSliderProviderFactory(DockerKeys.PROVIDER_DOCKER);
+ assertTrue(factory instanceof DockerProviderFactory);
+ }
+
+ @Test
+ public void testCreateClientProvider() throws Throwable {
+ SliderProviderFactory factory = SliderProviderFactory.createSliderProviderFactory(
+ DockerKeys.PROVIDER_DOCKER);
+ assertNotNull(factory.createClientProvider());
+ }
+
+ @Test
+ public void testCreateProviderByClassname() throws Throwable {
+ SliderProviderFactory factory = SliderProviderFactory.createSliderProviderFactory(
+ DockerKeys.PROVIDER_DOCKER);
+ assertNotNull(factory.createServerProvider());
+ assertTrue(factory instanceof DockerProviderFactory);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestConfigSetNaming.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestConfigSetNaming.java
new file mode 100644
index 0000000..9dbb52e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestConfigSetNaming.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.registry;
+
+import org.apache.slider.core.registry.docstore.PublishedConfigSet;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+public class TestConfigSetNaming {
+
+ void assertValid(String name) {
+ PublishedConfigSet.validateName(name);
+ }
+
+ void assertInvalid(String name) {
+ try {
+ PublishedConfigSet.validateName(name);
+ Assert.fail("Invalid name was unexpectedly parsed: " + name);
+ } catch (IllegalArgumentException expected) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testLowerCase() throws Throwable {
+ assertValid("abcdefghijklmnopqrstuvwxyz");
+ }
+
+ @Test
+ public void testUpperCaseInvalid() throws Throwable {
+ assertInvalid("ABCDEFGHIJKLMNOPQRSTUVWXYZ");
+ }
+
+ @Test
+ public void testNumbers() throws Throwable {
+ assertValid("01234567890");
+ }
+
+ @Test
+ public void testChars() throws Throwable {
+ assertValid("a-_+");
+ }
+
+ @Test
+ public void testInvalids() throws Throwable {
+ for (String s : Arrays.asList(
+ "",
+ " ",
+ "*",
+ "a/b",
+ "b\\a",
+ "\"",
+ "'",
+ "\u0000",
+ "\u0f00",
+ "key.value",
+ "-",
+ "+",
+ "_",
+ "?")) {
+ assertInvalid(s);
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestRegistryPaths.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestRegistryPaths.java
new file mode 100644
index 0000000..11d94c0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/registry/TestRegistryPaths.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.registry;
+
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.slider.core.registry.SliderRegistryUtils;
+import org.apache.slider.test.SliderTestUtils;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class TestRegistryPaths {
+
+ @Test
+ public void testHomedirKerberos() throws Throwable {
+ String home = RegistryUtils.homePathForUser("hbase@HADOOP.APACHE.ORG");
+ try {
+ assertEquals("/users/hbase", home);
+ } catch (AssertionError e) {
+ SliderTestUtils.skip("homedir filtering not yet in hadoop registry " +
+ "module");
+ }
+ }
+
+ @Test
+ public void testHomedirKerberosHost() throws Throwable {
+ String home = RegistryUtils.homePathForUser("hbase/localhost@HADOOP" +
+ ".APACHE.ORG");
+ try {
+ assertEquals("/users/hbase", home);
+ } catch (AssertionError e) {
+ SliderTestUtils.skip("homedir filtering not yet in hadoop registry " +
+ "module");
+ }
+ }
+
+ @Test
+ public void testRegistryPathForInstance() throws Throwable {
+ String path = SliderRegistryUtils.registryPathForInstance("instance");
+ assertTrue(path.endsWith("/instance"));
+ }
+
+ @Test
+ public void testPathResolution() throws Throwable {
+ String home = RegistryUtils.homePathForCurrentUser();
+ assertEquals(home, SliderRegistryUtils.resolvePath("~"));
+ assertEquals(home +"/", SliderRegistryUtils.resolvePath("~/"));
+ assertEquals(home +"/something", SliderRegistryUtils.resolvePath(
+ "~/something"));
+ assertEquals("~unresolved", SliderRegistryUtils.resolvePath(
+ "~unresolved"));
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/actions/TestActions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/actions/TestActions.java
new file mode 100644
index 0000000..8a35078
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/actions/TestActions.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.actions;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.ServiceOperations;
+import org.apache.slider.server.appmaster.SliderAppMaster;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.services.workflow.ServiceThreadFactory;
+import org.apache.slider.server.services.workflow.WorkflowExecutorService;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestActions {
+ protected static final Logger log =
+ LoggerFactory.getLogger(TestActions.class);
+
+ QueueService queues;
+ WorkflowExecutorService executorService;
+
+
+ @Before
+ public void createService() {
+ queues = new QueueService();
+
+ Configuration conf = new Configuration();
+ queues.init(conf);
+
+ queues.start();
+
+ executorService = new WorkflowExecutorService<>("AmExecutor",
+ Executors.newCachedThreadPool(
+ new ServiceThreadFactory("AmExecutor", true)));
+
+ executorService.init(conf);
+ executorService.start();
+ }
+
+ @After
+ public void destroyService() {
+ ServiceOperations.stop(executorService);
+ ServiceOperations.stop(queues);
+ }
+
+ @Test
+ public void testBasicService() throws Throwable {
+ queues.start();
+ }
+
+ @Test
+ public void testDelayLogic() throws Throwable {
+ ActionNoteExecuted action = new ActionNoteExecuted("", 1000);
+ long now = System.currentTimeMillis();
+
+ long delay = action.getDelay(TimeUnit.MILLISECONDS);
+ assertTrue(delay >= 800);
+ assertTrue(delay <= 1800);
+
+ ActionNoteExecuted a2 = new ActionNoteExecuted("a2", 10000);
+ assertTrue(action.compareTo(a2) < 0);
+ assertTrue(a2.compareTo(action) > 0);
+ assertEquals(0, action.compareTo(action));
+
+ }
+
+ @Test
+ public void testActionDelayedExecutorTermination() throws Throwable {
+ long start = System.currentTimeMillis();
+
+ ActionStopQueue stopAction = new ActionStopQueue(1000);
+ queues.scheduledActions.add(stopAction);
+ queues.run();
+ AsyncAction take = queues.actionQueue.take();
+ assertEquals(take, stopAction);
+ long stop = System.currentTimeMillis();
+ assertTrue(stop - start > 500);
+ assertTrue(stop - start < 1500);
+ }
+
+ @Test
+ public void testImmediateQueue() throws Throwable {
+ ActionNoteExecuted noteExecuted = new ActionNoteExecuted("executed", 0);
+ queues.put(noteExecuted);
+ queues.put(new ActionStopQueue(0));
+ QueueExecutor ex = new QueueExecutor(queues);
+ ex.run();
+ assertTrue(queues.actionQueue.isEmpty());
+ assertTrue(noteExecuted.executed.get());
+ }
+
+ @Test
+ public void testActionOrdering() throws Throwable {
+
+ ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
+ ActionStopQueue stop = new ActionStopQueue(1500);
+ ActionNoteExecuted note2 = new ActionNoteExecuted("note2", 800);
+
+ List actions = Arrays.asList(note1, stop, note2);
+ Collections.sort(actions);
+ assertEquals(actions.get(0), note1);
+ assertEquals(actions.get(1), note2);
+ assertEquals(actions.get(2), stop);
+ }
+
+ @Test
+ public void testDelayedQueueWithReschedule() throws Throwable {
+
+ ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
+ ActionStopQueue stop = new ActionStopQueue(1500);
+ ActionNoteExecuted note2 = new ActionNoteExecuted("note2", 800);
+
+ assertTrue(note2.compareTo(stop) < 0);
+ assertTrue(note1.getNanos() < note2.getNanos());
+ assertTrue(note2.getNanos() < stop.getNanos());
+ queues.schedule(note1);
+ queues.schedule(note2);
+ queues.schedule(stop);
+ // async to sync expected to run in order
+ runQueuesToCompletion();
+ assertTrue(note1.executed.get());
+ assertTrue(note2.executed.get());
+ }
+
+ public void runQueuesToCompletion() {
+ queues.run();
+ assertTrue(queues.scheduledActions.isEmpty());
+ assertFalse(queues.actionQueue.isEmpty());
+ QueueExecutor ex = new QueueExecutor(queues);
+ ex.run();
+ // flush all stop commands from the queue
+ queues.flushActionQueue(ActionStopQueue.class);
+
+ assertTrue(queues.actionQueue.isEmpty());
+ }
+
+ @Test
+ public void testRenewedActionFiresOnceAtLeast() throws Throwable {
+ ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
+ RenewingAction renewer = new RenewingAction(
+ note1,
+ 500,
+ 100,
+ TimeUnit.MILLISECONDS,
+ 3);
+ queues.schedule(renewer);
+ ActionStopQueue stop = new ActionStopQueue(4, TimeUnit.SECONDS);
+ queues.schedule(stop);
+ // this runs all the delayed actions FIRST, so can't be used
+ // to play tricks of renewing actions ahead of the stop action
+ runQueuesToCompletion();
+ assertEquals(1, renewer.executionCount.intValue());
+ assertEquals(1, note1.executionCount.intValue());
+ // assert the renewed item is back in
+ assertTrue(queues.scheduledActions.contains(renewer));
+ }
+
+
+ @Test
+ public void testRenewingActionOperations() throws Throwable {
+ ActionNoteExecuted note1 = new ActionNoteExecuted("note1", 500);
+ RenewingAction renewer = new RenewingAction(
+ note1,
+ 100,
+ 100,
+ TimeUnit.MILLISECONDS,
+ 3);
+ queues.renewing("note", renewer);
+ assertTrue(queues.removeRenewingAction("note"));
+ queues.stop();
+ assertTrue(queues.waitForServiceToStop(10000));
+ }
+
+ public class ActionNoteExecuted extends AsyncAction {
+ public final AtomicBoolean executed = new AtomicBoolean(false);
+ public final AtomicLong executionTimeNanos = new AtomicLong();
+ private final AtomicLong executionCount = new AtomicLong();
+
+ public ActionNoteExecuted(String text, int delay) {
+ super(text, delay);
+ }
+
+ @Override
+ public void execute(
+ SliderAppMaster appMaster,
+ QueueAccess queueService,
+ AppState appState) throws Exception {
+ log.info("Executing {}", name);
+ executed.set(true);
+ executionTimeNanos.set(System.nanoTime());
+ executionCount.incrementAndGet();
+ log.info(this.toString());
+
+ synchronized (this) {
+ this.notify();
+ }
+ }
+
+ @Override
+ public String toString() {
+ return super.toString() + " executed=" + executed.get() + "; count=" +
+ executionCount.get() + ";";
+ }
+
+ public long getExecutionCount() {
+ return executionCount.get();
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java
new file mode 100644
index 0000000..4a34929
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/BaseMockAppStateAATest.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+
+import java.util.Arrays;
+
+/**
+ * class for basis of Anti-affine placement tests; sets up role2
+ * for anti-affinity
+ */
+public class BaseMockAppStateAATest extends BaseMockAppStateTest
+ implements MockRoles {
+
+ /** Role status for the base AA role */
+ protected RoleStatus aaRole;
+
+ /** Role status for the AA role requiring a node with the gpu label */
+ RoleStatus gpuRole;
+
+ @Override
+ public AppStateBindingInfo buildBindingInfo() {
+ AppStateBindingInfo bindingInfo = super.buildBindingInfo();
+ bindingInfo.roles = Arrays.asList(
+ MockFactory.PROVIDER_ROLE0,
+ MockFactory.AAROLE_1_GPU,
+ MockFactory.AAROLE_2
+ );
+ return bindingInfo;
+ }
+
+ @Override
+ public void setup() throws Exception {
+ super.setup();
+ aaRole = lookupRole(MockFactory.AAROLE_2.name);
+ gpuRole = lookupRole(MockFactory.AAROLE_1_GPU.name);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAOvercapacity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAOvercapacity.java
new file mode 100644
index 0000000..e8fd7a8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAOvercapacity.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.slider.core.main.LauncherExitCodes;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.NodeMap;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test Anti-affine placement with a cluster of size 1
+ */
+public class TestMockAppStateAAOvercapacity extends BaseMockAppStateAATest
+ implements MockRoles {
+
+ private int NODES = 1;
+
+ @Override
+ public MockYarnEngine createYarnEngine() {
+ return new MockYarnEngine(NODES, 1);
+ }
+
+ void assertAllContainersAA() {
+ assertAllContainersAA(aaRole.getKey());
+ }
+
+ /**
+ *
+ * @throws Throwable
+ */
+ @Test
+ public void testOvercapacityRecovery() throws Throwable {
+
+ describe("Ask for 1 more than the no of available nodes;" +
+ "verify the state. kill the allocated container and review");
+ //more than expected
+ long desired = 3;
+ aaRole.setDesired(desired);
+ assertTrue(appState.getRoleHistory().canPlaceAANodes());
+
+ //first request
+ List operations =
+ appState.reviewRequestAndReleaseNodes();
+ assertTrue(aaRole.isAARequestOutstanding());
+ assertEquals(desired - 1, aaRole.getPendingAntiAffineRequests());
+ List operationsOut = new ArrayList<>();
+ // allocate and re-submit
+ List instances = submitOperations(operations,
+ EMPTY_ID_LIST, operationsOut);
+ assertEquals(1, instances.size());
+ assertAllContainersAA();
+
+ // expect an outstanding AA request to be unsatisfied
+ assertTrue(aaRole.getActual() < aaRole.getDesired());
+ assertEquals(0, aaRole.getRequested());
+ assertFalse(aaRole.isAARequestOutstanding());
+ assertEquals(desired - 1, aaRole.getPendingAntiAffineRequests());
+ List allocatedContainers = engine.execute(operations,
+ EMPTY_ID_LIST);
+ assertEquals(0, allocatedContainers.size());
+
+ // now lets trigger a failure
+ NodeMap nodemap = cloneNodemap();
+ assertEquals(1, nodemap.size());
+
+ RoleInstance instance = instances.get(0);
+ ContainerId cid = instance.getContainerId();
+
+ AppState.NodeCompletionResult result = appState.onCompletedNode(containerStatus(cid,
+ LauncherExitCodes.EXIT_TASK_LAUNCH_FAILURE));
+ assertTrue(result.containerFailed);
+
+ assertEquals(1, aaRole.getFailed());
+ assertEquals(0, aaRole.getActual());
+ List availablePlacements = appState.getRoleHistory()
+ .findNodeForNewAAInstance(aaRole);
+ assertEquals(1, availablePlacements.size());
+ describe("expecting a successful review with available placements of " +
+ availablePlacements);
+ operations = appState.reviewRequestAndReleaseNodes();
+ assertEquals(1, operations.size());
+ }
+
+ }
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java
new file mode 100644
index 0000000..4540f0a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAAPlacement.java
@@ -0,0 +1,361 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.types.NodeInformation;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.AppState.NodeUpdatedOutcome;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.ContainerAssignment;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test Anti-affine placement
+ */
+public class TestMockAppStateAAPlacement extends BaseMockAppStateAATest
+ implements MockRoles {
+
+ private int NODES = 3;
+
+ /**
+ * The YARN engine has a cluster with very few nodes (3) and lots of containers, so
+ * if AA placement isn't working, there will be affine placements surfacing.
+ * @return
+ */
+ @Override
+ public MockYarnEngine createYarnEngine() {
+ return new MockYarnEngine(NODES, 8);
+ }
+
+ /**
+ * This is the simplest AA allocation: no labels, so allocate anywhere
+ * @throws Throwable
+ */
+ @Test
+ public void testAllocateAANoLabel() throws Throwable {
+ assertTrue(cloneNodemap().size() > 0);
+
+ // want multiple instances, so there will be iterations
+ aaRole.setDesired(2);
+
+ List ops = appState.reviewRequestAndReleaseNodes();
+ AMRMClient.ContainerRequest request = getSingleRequest(ops);
+ assertFalse(request.getRelaxLocality());
+ assertEquals(request.getNodes().size(), engine.cluster.clusterSize);
+ assertNull(request.getRacks());
+ assertNotNull(request.getCapability());
+
+ Container allocated = engine.allocateContainer(request);
+
+ // notify the container ane expect
+ List assignments = new ArrayList<>();
+ List operations = new ArrayList<>();
+ appState.onContainersAllocated(Arrays.asList(allocated), assignments,
+ operations);
+
+ String host = allocated.getNodeId().getHost();
+ NodeInstance hostInstance = cloneNodemap().get(host);
+ assertEquals(1, hostInstance.get(aaRole.getKey()).getStarting());
+ assertFalse(hostInstance.canHost(aaRole.getKey(), ""));
+ assertFalse(hostInstance.canHost(aaRole.getKey(), null));
+
+ // assignment
+ assertEquals(1, assignments.size());
+
+ // verify the release matches the allocation
+ assertEquals(2, operations.size());
+ assertNotNull(getCancel(operations, 0).getCapability().equals(allocated
+ .getResource()));
+
+ // we also expect a new allocation request to have been issued
+
+ ContainerRequest req2 = getRequest(operations, 1);
+ assertEquals(req2.getNodes().size(), engine.cluster.clusterSize - 1);
+
+ assertFalse(req2.getNodes().contains(host));
+ assertFalse(request.getRelaxLocality());
+
+ // verify the pending couner is down
+ assertEquals(0L, aaRole.getPendingAntiAffineRequests());
+ Container allocated2 = engine.allocateContainer(req2);
+
+ // placement must be on a different host
+ assertNotEquals(allocated2.getNodeId(), allocated.getNodeId());
+
+ ContainerAssignment assigned = assignments.get(0);
+ Container container = assigned.container;
+ RoleInstance ri = roleInstance(assigned);
+ //tell the app it arrived
+ appState.containerStartSubmitted(container, ri);
+ assertNotNull(appState.onNodeManagerContainerStarted(container.getId()));
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertEquals(0, ops.size());
+ assertAllContainersAA();
+
+ // identify those hosts with an aa role on
+ Map naming = appState.buildNamingMap();
+ assertEquals(3, naming.size());
+
+ String name = aaRole.getName();
+ assertEquals(name, naming.get(aaRole.getKey()));
+ Map info =
+ appState.getRoleHistory().getNodeInformationSnapshot(naming);
+ assertTrue(SliderUtils.isNotEmpty(info));
+
+ NodeInformation nodeInformation = info.get(host);
+ assertNotNull(nodeInformation);
+ assertTrue(SliderUtils.isNotEmpty(nodeInformation.entries));
+ assertNotNull(nodeInformation.entries.get(name));
+ assertEquals(1, nodeInformation.entries.get(name).live);
+ }
+
+ @Test
+ public void testAllocateFlexUp() throws Throwable {
+ // want multiple instances, so there will be iterations
+ aaRole.setDesired(2);
+ List ops = appState.reviewRequestAndReleaseNodes();
+ getSingleRequest(ops);
+ assertEquals(1, aaRole.getRequested());
+ assertEquals(1, aaRole.getPendingAntiAffineRequests());
+ assertEquals( aaRole.getActualAndRequested() + aaRole
+ .getPendingAntiAffineRequests(), aaRole.getDesired());
+
+ // now trigger that flex up
+ aaRole.setDesired(3);
+
+ // expect: no new reqests, pending count ++
+ List ops2 = appState.reviewRequestAndReleaseNodes();
+ assertTrue(ops2.isEmpty());
+ assertEquals(aaRole.getActual() + aaRole.getPendingAntiAffineRequests() +
+ aaRole.getOutstandingAARequestCount(), aaRole.getDesired());
+
+ // 1 outstanding
+ assertEquals(0, aaRole.getActual());
+ assertTrue(aaRole.isAARequestOutstanding());
+ // and one AA
+ assertEquals(2, aaRole.getPendingAntiAffineRequests());
+ assertAllContainersAA();
+
+ // next iter
+ assertEquals(1, submitOperations(ops, EMPTY_ID_LIST, ops2).size());
+ assertEquals(2, ops2.size());
+ assertEquals(1, aaRole.getPendingAntiAffineRequests());
+ assertAllContainersAA();
+
+ assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
+ // now trigger the next execution cycle
+ List ops3 = new ArrayList<>();
+ assertEquals(1, submitOperations(ops2, EMPTY_ID_LIST, ops3).size());
+ assertEquals(2, ops3.size());
+ assertEquals(0, aaRole.getPendingAntiAffineRequests());
+ assertAllContainersAA();
+
+ }
+
+ @Test
+ public void testAllocateFlexDownDecrementsPending() throws Throwable {
+ // want multiple instances, so there will be iterations
+ aaRole.setDesired(2);
+ List ops = appState.reviewRequestAndReleaseNodes();
+ getSingleRequest(ops);
+ assertEquals(1, aaRole.getPendingAntiAffineRequests());
+ assertTrue(aaRole.isAARequestOutstanding());
+
+ // flex down so that the next request should be cancelled
+ aaRole.setDesired(1);
+
+ // expect: no new requests, pending count --
+ List ops2 = appState.reviewRequestAndReleaseNodes();
+ assertTrue(ops2.isEmpty());
+ assertTrue(aaRole.isAARequestOutstanding());
+ assertEquals(0, aaRole.getPendingAntiAffineRequests());
+ assertAllContainersAA();
+
+ // next iter
+ submitOperations(ops, EMPTY_ID_LIST, ops2).size();
+ assertEquals(1, ops2.size());
+ assertAllContainersAA();
+ }
+
+ /**
+ * Here flex down while there is only one outstanding request.
+ * The outstanding flex should be cancelled
+ * @throws Throwable
+ */
+ @Test
+ public void testAllocateFlexDownForcesCancel() throws Throwable {
+ // want multiple instances, so there will be iterations
+ aaRole.setDesired(1);
+ List ops = appState.reviewRequestAndReleaseNodes();
+ getSingleRequest(ops);
+ assertEquals(0, aaRole.getPendingAntiAffineRequests());
+ assertTrue(aaRole.isAARequestOutstanding());
+
+ // flex down so that the next request should be cancelled
+ aaRole.setDesired(0);
+ // expect: no new requests, pending count --
+ List ops2 = appState.reviewRequestAndReleaseNodes();
+ assertEquals(0, aaRole.getPendingAntiAffineRequests());
+ assertFalse(aaRole.isAARequestOutstanding());
+ assertEquals(1, ops2.size());
+ getSingleCancel(ops2);
+
+ // next iter
+ submitOperations(ops, EMPTY_ID_LIST, ops2).size();
+ assertEquals(1, ops2.size());
+ }
+
+ void assertAllContainersAA() {
+ assertAllContainersAA(aaRole.getKey());
+ }
+
+ /**
+ *
+ * @throws Throwable
+ */
+ @Test
+ public void testAskForTooMany() throws Throwable {
+
+ describe("Ask for 1 more than the no of available nodes;" +
+ " expect the final request to be unsatisfied until the cluster " +
+ "changes size");
+ //more than expected
+ aaRole.setDesired(NODES + 1);
+ List operations = appState
+ .reviewRequestAndReleaseNodes();
+ assertTrue(aaRole.isAARequestOutstanding());
+ assertEquals(NODES, aaRole.getPendingAntiAffineRequests());
+ for (int i = 0; i < NODES; i++) {
+ String iter = "Iteration " + i + " role = " + aaRole;
+ log.info(iter);
+ List operationsOut = new ArrayList<>();
+ assertEquals(1, submitOperations(operations, EMPTY_ID_LIST,
+ operationsOut).size());
+ operations = operationsOut;
+ if (i + 1 < NODES) {
+ assertEquals(2, operations.size());
+ } else {
+ assertEquals(1, operations.size());
+ }
+ assertAllContainersAA();
+ }
+ // expect an outstanding AA request to be unsatisfied
+ assertTrue(aaRole.getActual() < aaRole.getDesired());
+ assertTrue(aaRole.getRequested() == 0);
+ assertFalse(aaRole.isAARequestOutstanding());
+ List allocatedContainers = engine.execute(operations,
+ EMPTY_ID_LIST);
+ assertEquals(0, allocatedContainers.size());
+ // in a review now, no more requests can be generated, as there is no space for AA placements,
+ // even though there is cluster capacity
+ assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
+
+ // now do a node update (this doesn't touch the YARN engine; the node isn't really there)
+ NodeUpdatedOutcome outcome = addNewNode();
+ assertEquals(cloneNodemap().size(), NODES + 1);
+ assertTrue(outcome.clusterChanged);
+ // no active calls to empty
+ assertTrue(outcome.operations.isEmpty());
+ assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
+ }
+
+ protected AppState.NodeUpdatedOutcome addNewNode() {
+ return updateNodes(MockFactory.instance.newNodeReport("4", NodeState
+ .RUNNING, "gpu"));
+ }
+
+ @Test
+ public void testClusterSizeChangesDuringRequestSequence() throws Throwable {
+ describe("Change the cluster size where the cluster size changes during " +
+ "a test sequence.");
+ aaRole.setDesired(NODES + 1);
+ appState.reviewRequestAndReleaseNodes();
+ assertTrue(aaRole.isAARequestOutstanding());
+ assertEquals(NODES, aaRole.getPendingAntiAffineRequests());
+ NodeUpdatedOutcome outcome = addNewNode();
+ assertTrue(outcome.clusterChanged);
+ // one call to cancel
+ assertEquals(1, outcome.operations.size());
+ // and on a review, one more to rebuild
+ assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
+ }
+
+ @Test
+ public void testBindingInfoMustHaveNodeMap() throws Throwable {
+ AppStateBindingInfo bindingInfo = buildBindingInfo();
+ bindingInfo.nodeReports = null;
+ try {
+ MockAppState state = new MockAppState(bindingInfo);
+ fail("Expected an exception, got " + state);
+ } catch (IllegalArgumentException expected) {
+ }
+ }
+
+ @Test
+ public void testAMRestart() throws Throwable {
+ int desiredAA = 3;
+ aaRole.setDesired(desiredAA);
+ List instances = createAndStartNodes();
+ List containers = new ArrayList<>();
+ for (RoleInstance instance : instances) {
+ containers.add(instance.container);
+ }
+
+ // now destroy the app state
+ AppStateBindingInfo bindingInfo = buildBindingInfo();
+ bindingInfo.instanceDefinition = factory.newInstanceDefinition(0, 0,
+ desiredAA);
+ ConfTreeOperations cto = new ConfTreeOperations(bindingInfo
+ .instanceDefinition.getResources());
+ cto.setComponentOpt(ROLE2,
+ ResourceKeys.COMPONENT_PLACEMENT_POLICY,
+ PlacementPolicy.ANTI_AFFINITY_REQUIRED);
+ bindingInfo.liveContainers = containers;
+ appState = new MockAppState(bindingInfo);
+
+ RoleStatus aaRole = lookupRole(MockFactory.AAROLE_2.name);
+ RoleStatus gpuRole = lookupRole(MockFactory.AAROLE_1_GPU.name);
+ appState.reviewRequestAndReleaseNodes();
+ assertTrue(aaRole.isAntiAffinePlacement());
+ assertTrue(aaRole.isAARequestOutstanding());
+
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAppRestIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAppRestIntegration.java
new file mode 100644
index 0000000..7841ac2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateAppRestIntegration.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.slider.api.types.ContainerInformation;
+import org.apache.slider.core.exceptions.SliderInternalStateException;
+import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
+import org.apache.slider.core.persist.JsonSerDeser;
+import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockProviderService;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.state.ProviderAppState;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.apache.slider.server.appmaster.web.WebAppApi;
+import org.apache.slider.server.appmaster.web.WebAppApiImpl;
+import org.apache.slider.server.appmaster.web.rest.application.ApplicationResouceContentCacheFactory;
+import org.apache.slider.server.appmaster.web.rest.application.ApplicationResource;
+import org.apache.slider.server.appmaster.web.rest.application.resources.CachedContent;
+import org.apache.slider.server.appmaster.web.rest.application.resources.ContentCache;
+import org.apache.slider.server.appmaster.web.rest.application.resources.LiveContainersRefresher;
+import org.apache.slider.server.appmaster.web.rest.application.resources.ResourceRefresher;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+public class TestMockAppStateAppRestIntegration extends BaseMockAppStateTest
+ implements MockRoles {
+
+ @Test
+ public void testCachedIntDocument() throws Throwable {
+ ContentCache cache = new ContentCache();
+
+ IntRefresher refresher = new IntRefresher();
+ assertEquals(0, refresher.count);
+ CachedContentManagedTimer entry = new CachedContentManagedTimer(refresher);
+ cache.put("/int", entry);
+ CachedContent content1 = cache.get("/int");
+ assertEquals(entry, content1);
+
+ assertEquals(0, entry.get());
+ assertEquals(1, refresher.count);
+ assertEquals(0, entry.getCachedValue());
+ assertEquals(1, entry.getRefreshCounter());
+
+ Object got = entry.get();
+ assertEquals(2, entry.getRefreshCounter());
+ assertEquals(1, got);
+ }
+
+ @Test
+ public void testContainerListRefresher() throws Throwable {
+ LiveContainersRefresher clr = new LiveContainersRefresher(stateAccess);
+ Map map = clr.refresh();
+ assertEquals(0, map.size());
+ List instances = startNodes();
+ map = clr.refresh();
+ assertEquals(map.size(), instances.size());
+ log.info("{}", map);
+ JsonSerDeser serDeser =
+ new JsonSerDeser<>(ContainerInformation.class);
+ for (Entry entry : map.entrySet()) {
+ String key = entry.getKey();
+ ContainerInformation value = entry.getValue();
+ log.info("{} -> {}", key, serDeser.toJson(value));
+ }
+ }
+
+ public List startNodes()
+ throws TriggerClusterTeardownException, SliderInternalStateException {
+ int r0 = 1;
+ int r1 = 2;
+ int r2 = 3;
+ getRole0Status().setDesired(r0);
+ getRole1Status().setDesired(r1);
+ getRole2Status().setDesired(r2);
+ List instances = createAndStartNodes();
+ assertEquals(instances.size(), r0 + r1 + r2);
+ return instances;
+ }
+
+ @Test
+ public void testApplicationResource() throws Throwable {
+ List instances = startNodes();
+ ApplicationResource applicationResource =
+ new ApplicationResource(getWebAppApi());
+ Map containers =
+ applicationResource.getLiveContainers();
+ assertEquals(containers.size(), instances.size());
+ }
+
+ /**
+ * Get a state accessor for the appState field
+ * @return something to hand down to refreshers and resources
+ */
+ public StateAccessForProviders getStateAccess() {
+ StateAccessForProviders state = new ProviderAppState("name", appState);
+ return state;
+ }
+
+ public WebAppApi getWebAppApi() {
+ WebAppApi api = new WebAppApiImpl(stateAccess,
+ new MockProviderService(),
+ null,
+ new MetricsAndMonitoring("metrics"), null,null,
+ ApplicationResouceContentCacheFactory.createContentCache(stateAccess)
+ );
+ return api;
+ }
+
+ /**
+ * Little class to do integer refreshing & so test refresh logic
+ */
+ public class IntRefresher implements ResourceRefresher {
+ int count ;
+ @Override
+ public Integer refresh() throws Exception {
+ log.info("Refresh at {}", count);
+ int result = count;
+ count += 1;
+ return result;
+ }
+
+ @Override
+ public String toString() {
+ return "IntRefresher at " + count;
+ }
+
+ }
+
+ public class CachedContentManagedTimer extends CachedContent {
+ int time = 0;
+
+ @Override
+ protected long now() {
+ return time++;
+ }
+
+ CachedContentManagedTimer(ResourceRefresher refresh) {
+ super(1, refresh);
+ }
+
+ @Override
+ public String toString() {
+ return "CachedContentManagedTimer at " + time + "; " + super.toString();
+ }
+ }
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java
new file mode 100644
index 0000000..98f1308
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateContainerFailure.java
@@ -0,0 +1,377 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.MapOperations;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
+import org.apache.slider.server.appmaster.actions.ResetFailureWindow;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAM;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.ContainerOutcome;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+
+import java.util.List;
+
+/**
+ * Test that if you have >1 role, the right roles are chosen for release.
+ */
+public class TestMockAppStateContainerFailure extends BaseMockAppStateTest
+ implements MockRoles {
+ MockRMOperationHandler operationHandler = new MockRMOperationHandler();
+ MockAM mockAM = new MockAM();
+
+ @Override
+ public String getTestName() {
+ return "TestMockAppStateContainerFailure";
+ }
+
+ /**
+ * Small cluster with multiple containers per node,
+ * to guarantee many container allocations on each node
+ * @return
+ */
+ @Override
+ public MockYarnEngine createYarnEngine() {
+ return new MockYarnEngine(4, 8000);
+ }
+
+ @Override
+ public AggregateConf buildInstanceDefinition() {
+ AggregateConf aggregateConf = super.buildInstanceDefinition();
+ MapOperations globalOptions = aggregateConf.getResourceOperations()
+ .getGlobalOptions();
+ globalOptions.put(ResourceKeys.CONTAINER_FAILURE_THRESHOLD, "10");
+
+ return aggregateConf;
+ }
+
+ @Test
+ public void testShortLivedFail() throws Throwable {
+
+ getRole0Status().setDesired(1);
+ List instances = createAndStartNodes();
+ assertEquals(1, instances.size());
+
+ RoleInstance instance = instances.get(0);
+ long created = instance.createTime;
+ long started = instance.startTime;
+ assertTrue(created > 0);
+ assertTrue(started >= created);
+ List ids = extractContainerIds(instances, 0);
+
+ ContainerId cid = ids.get(0);
+ assertTrue(appState.isShortLived(instance));
+ AppState.NodeCompletionResult result = appState.onCompletedNode
+ (containerStatus(cid, 1));
+ assertNotNull(result.roleInstance);
+ assertTrue(result.containerFailed);
+ RoleStatus status = getRole0Status();
+ assertEquals(1, status.getFailed());
+ assertEquals(1, status.getStartFailed());
+
+ //view the world
+ appState.getRoleHistory().dump();
+ List queue = appState.getRoleHistory().cloneRecentNodeList(0);
+ assertEquals(0, queue.size());
+
+ }
+
+ @Test
+ public void testLongLivedFail() throws Throwable {
+
+ getRole0Status().setDesired(1);
+ List instances = createAndStartNodes();
+ assertEquals(1, instances.size());
+
+ RoleInstance instance = instances.get(0);
+ instance.startTime = System.currentTimeMillis() - 60 * 60 * 1000;
+ assertFalse(appState.isShortLived(instance));
+ List ids = extractContainerIds(instances, 0);
+
+ ContainerId cid = ids.get(0);
+ AppState.NodeCompletionResult result = appState.onCompletedNode(
+ containerStatus(cid, 1));
+ assertNotNull(result.roleInstance);
+ assertTrue(result.containerFailed);
+ RoleStatus status = getRole0Status();
+ assertEquals(1, status.getFailed());
+ assertEquals(0, status.getStartFailed());
+
+ //view the world
+ appState.getRoleHistory().dump();
+ List queue = appState.getRoleHistory().cloneRecentNodeList(0);
+ assertEquals(1, queue.size());
+
+ }
+
+ @Test
+ public void testNodeStartFailure() throws Throwable {
+
+ getRole0Status().setDesired(1);
+ List instances = createAndSubmitNodes();
+ assertEquals(1, instances.size());
+
+ RoleInstance instance = instances.get(0);
+
+ List ids = extractContainerIds(instances, 0);
+
+ ContainerId cid = ids.get(0);
+ appState.onNodeManagerContainerStartFailed(cid, new SliderException("oops"));
+ RoleStatus status = getRole0Status();
+ assertEquals(1, status.getFailed());
+ assertEquals(1, status.getStartFailed());
+
+
+ RoleHistory history = appState.getRoleHistory();
+ history.dump();
+ List queue = history.cloneRecentNodeList(0);
+ assertEquals(0, queue.size());
+
+ NodeInstance ni = history.getOrCreateNodeInstance(instance.container);
+ NodeEntry re = ni.get(0);
+ assertEquals(1, re.getFailed());
+ assertEquals(1, re.getStartFailed());
+ }
+
+ @Test
+ public void testRecurrentStartupFailure() throws Throwable {
+
+ getRole0Status().setDesired(1);
+ try {
+ for (int i = 0; i< 100; i++) {
+ List instances = createAndSubmitNodes();
+ assertEquals(1, instances.size());
+
+ List ids = extractContainerIds(instances, 0);
+
+ ContainerId cid = ids.get(0);
+ log.info("{} instance {} {}", i, instances.get(0), cid);
+ assertNotNull(cid);
+ appState.onNodeManagerContainerStartFailed(cid,
+ new SliderException("failure #" + i));
+ AppState.NodeCompletionResult result = appState.onCompletedNode(
+ containerStatus(cid));
+ assertTrue(result.containerFailed);
+ }
+ fail("Cluster did not fail from too many startup failures");
+ } catch (TriggerClusterTeardownException teardown) {
+ log.info("Exception {} : {}", teardown.getExitCode(), teardown);
+ }
+ }
+
+ @Test
+ public void testRecurrentStartupFailureWithUnlimitedFailures() throws Throwable {
+ // Update instance definition to allow containers to fail any number of times
+ AppStateBindingInfo bindingInfo = buildBindingInfo();
+ MapOperations globalResourceOptions = bindingInfo.instanceDefinition
+ .getResourceOperations().getGlobalOptions();
+ globalResourceOptions.put(ResourceKeys.CONTAINER_FAILURE_THRESHOLD, "0");
+ appState = new MockAppState(bindingInfo);
+
+ getRole0Status().setDesired(1);
+ try {
+ for (int i = 0; i < 100; i++) {
+ List instances = createAndSubmitNodes();
+ assertEquals(1, instances.size());
+
+ List ids = extractContainerIds(instances, 0);
+
+ ContainerId cid = ids.get(0);
+ log.info("{} instance {} {}", i, instances.get(0), cid);
+ assertNotNull(cid);
+ appState.onNodeManagerContainerStartFailed(cid,
+ new SliderException("failure #" + i));
+ AppState.NodeCompletionResult result = appState.onCompletedNode(
+ containerStatus(cid));
+ assertTrue(result.containerFailed);
+ }
+ } catch (TriggerClusterTeardownException teardown) {
+ log.info("Exception {} : {}", teardown.getExitCode(), teardown);
+ fail("Cluster failed despite " + ResourceKeys
+ .CONTAINER_FAILURE_THRESHOLD + " = 0");
+ }
+ }
+
+ @Test
+ public void testRoleStatusFailureWindow() throws Throwable {
+
+ ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
+
+ // initial reset
+ resetter.execute(mockAM, null, appState);
+
+ getRole0Status().setDesired(1);
+ for (int i = 0; i < 100; i++) {
+ resetter.execute(mockAM, null, appState);
+ List instances = createAndSubmitNodes();
+ assertEquals(1, instances.size());
+
+ List ids = extractContainerIds(instances, 0);
+
+ ContainerId cid = ids.get(0);
+ log.info("{} instance {} {}", i, instances.get(0), cid);
+ assertNotNull(cid);
+ appState.onNodeManagerContainerStartFailed(
+ cid,
+ new SliderException("failure #" + i));
+ AppState.NodeCompletionResult result = appState.onCompletedNode(
+ containerStatus(cid));
+ assertTrue(result.containerFailed);
+ }
+ }
+
+ @Test
+ public void testRoleStatusFailed() throws Throwable {
+ RoleStatus status = getRole0Status();
+ // limits exceeded
+ status.noteFailed(false, "text", ContainerOutcome.Failed);
+ assertEquals(1, status.getFailed());
+ assertEquals(1L, status.getFailedRecently());
+ assertEquals(0L, status.getLimitsExceeded());
+ assertEquals(0L, status.getPreempted());
+ assertEquals(0L, status.getNodeFailed());
+
+ ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
+ resetter.execute(mockAM, null, appState);
+ assertEquals(1, status.getFailed());
+ assertEquals(0L, status.getFailedRecently());
+ }
+
+ @Test
+ public void testRoleStatusFailedLimitsExceeded() throws Throwable {
+ RoleStatus status = getRole0Status();
+ // limits exceeded
+ status.noteFailed(false, "text",ContainerOutcome.Failed_limits_exceeded);
+ assertEquals(1, status.getFailed());
+ assertEquals(1L, status.getFailedRecently());
+ assertEquals(1L, status.getLimitsExceeded());
+ assertEquals(0L, status.getPreempted());
+ assertEquals(0L, status.getNodeFailed());
+
+ ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
+ resetter.execute(mockAM, null, appState);
+ assertEquals(1, status.getFailed());
+ assertEquals(0L, status.getFailedRecently());
+ assertEquals(1L, status.getLimitsExceeded());
+ }
+
+
+ @Test
+ public void testRoleStatusFailedPrempted() throws Throwable {
+ RoleStatus status = getRole0Status();
+ // limits exceeded
+ status.noteFailed(false, "text", ContainerOutcome.Preempted);
+ assertEquals(0, status.getFailed());
+ assertEquals(1L, status.getPreempted());
+ assertEquals(0L, status.getFailedRecently());
+ assertEquals(0L, status.getNodeFailed());
+
+ ResetFailureWindow resetter = new ResetFailureWindow(operationHandler);
+ resetter.execute(mockAM, null, appState);
+ assertEquals(1L, status.getPreempted());
+ }
+
+
+ @Test
+ public void testRoleStatusFailedNode() throws Throwable {
+ RoleStatus status = getRole0Status();
+ // limits exceeded
+ status.noteFailed(false, "text", ContainerOutcome.Node_failure);
+ assertEquals(1, status.getFailed());
+ assertEquals(0L, status.getFailedRecently());
+ assertEquals(0L, status.getLimitsExceeded());
+ assertEquals(0L, status.getPreempted());
+ assertEquals(1L, status.getNodeFailed());
+ }
+
+ @Test
+ public void testNodeEntryCompleted() throws Throwable {
+ NodeEntry nodeEntry = new NodeEntry(1);
+ nodeEntry.containerCompleted(true, ContainerOutcome.Completed);
+ assertEquals(0, nodeEntry.getFailed());
+ assertEquals(0, nodeEntry.getFailedRecently());
+ assertEquals(0, nodeEntry.getStartFailed());
+ assertEquals(0, nodeEntry.getPreempted());
+ assertEquals(0, nodeEntry.getActive());
+ assertTrue(nodeEntry.isAvailable());
+ }
+
+ @Test
+ public void testNodeEntryFailed() throws Throwable {
+ NodeEntry nodeEntry = new NodeEntry(1);
+ nodeEntry.containerCompleted(false, ContainerOutcome.Failed);
+ assertEquals(1, nodeEntry.getFailed());
+ assertEquals(1, nodeEntry.getFailedRecently());
+ assertEquals(0, nodeEntry.getStartFailed());
+ assertEquals(0, nodeEntry.getPreempted());
+ assertEquals(0, nodeEntry.getActive());
+ assertTrue(nodeEntry.isAvailable());
+ nodeEntry.resetFailedRecently();
+ assertEquals(1, nodeEntry.getFailed());
+ assertEquals(0, nodeEntry.getFailedRecently());
+ }
+
+ @Test
+ public void testNodeEntryLimitsExceeded() throws Throwable {
+ NodeEntry nodeEntry = new NodeEntry(1);
+ nodeEntry.containerCompleted(false, ContainerOutcome.Failed_limits_exceeded);
+ assertEquals(0, nodeEntry.getFailed());
+ assertEquals(0, nodeEntry.getFailedRecently());
+ assertEquals(0, nodeEntry.getStartFailed());
+ assertEquals(0, nodeEntry.getPreempted());
+ }
+
+ @Test
+ public void testNodeEntryPreempted() throws Throwable {
+ NodeEntry nodeEntry = new NodeEntry(1);
+ nodeEntry.containerCompleted(false, ContainerOutcome.Preempted);
+ assertEquals(0, nodeEntry.getFailed());
+ assertEquals(0, nodeEntry.getFailedRecently());
+ assertEquals(0, nodeEntry.getStartFailed());
+ assertEquals(1, nodeEntry.getPreempted());
+ }
+
+ @Test
+ public void testNodeEntryNodeFailure() throws Throwable {
+ NodeEntry nodeEntry = new NodeEntry(1);
+ nodeEntry.containerCompleted(false, ContainerOutcome.Node_failure);
+ assertEquals(1, nodeEntry.getFailed());
+ assertEquals(1, nodeEntry.getFailedRecently());
+ assertEquals(0, nodeEntry.getStartFailed());
+ assertEquals(0, nodeEntry.getPreempted());
+ }
+
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java
new file mode 100644
index 0000000..d964775
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicHistory.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.core.conf.MapOperations;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test that if you have >1 role, the right roles are chosen for release.
+ */
+public class TestMockAppStateDynamicHistory extends BaseMockAppStateTest
+ implements MockRoles {
+
+ /**
+ * Small cluster with multiple containers per node,
+ * to guarantee many container allocations on each node
+ * @return
+ */
+ @Override
+ public MockYarnEngine createYarnEngine() {
+ return new MockYarnEngine(8, 1);
+ }
+
+ @Test
+ public void testDynamicRoleHistory() throws Throwable {
+
+ String dynamic = "dynamicRole";
+ int role_priority_8 = 8;
+ int desired = 1;
+ int placementPolicy = PlacementPolicy.DEFAULT;
+ // snapshot and patch existing spec
+ ConfTreeOperations resources = ConfTreeOperations.fromInstance(
+ appState.getResourcesSnapshot().confTree);
+ Map opts = new HashMap<>();
+ opts.put(ResourceKeys.COMPONENT_INSTANCES, "" + desired);
+ opts.put(ResourceKeys.COMPONENT_PRIORITY, "" +role_priority_8);
+ opts.put(ResourceKeys.COMPONENT_PLACEMENT_POLICY, "" + placementPolicy);
+
+ resources.getComponents().put(dynamic, opts);
+
+
+ // write the definitions
+ List updates = appState.updateResourceDefinitions(resources.confTree);
+ assertEquals(1, updates.size());
+ ProviderRole updatedRole = updates.get(0);
+ assertEquals(updatedRole.placementPolicy, placementPolicy);
+
+ // verify the new role was persisted
+ MapOperations snapshotDefinition = appState.getResourcesSnapshot()
+ .getMandatoryComponent(dynamic);
+ assertEquals(snapshotDefinition.getMandatoryOptionInt(
+ ResourceKeys.COMPONENT_PRIORITY), role_priority_8);
+
+ // now look at the role map
+ assertNotNull(appState.getRoleMap().get(dynamic));
+ ProviderRole mappedRole = appState.getRoleMap().get(dynamic);
+ assertEquals(mappedRole.id, role_priority_8);
+
+ Map priorityMap = appState.getRolePriorityMap();
+ assertEquals(priorityMap.size(), 4);
+ ProviderRole dynamicProviderRole = priorityMap.get(role_priority_8);
+ assertNotNull(dynamicProviderRole);
+ assertEquals(dynamicProviderRole.id, role_priority_8);
+
+ assertNotNull(appState.getRoleStatusMap().get(role_priority_8));
+ RoleStatus dynamicRoleStatus =
+ appState.getRoleStatusMap().get(role_priority_8);
+ assertEquals(dynamicRoleStatus.getDesired(), desired);
+
+
+ // before allocating the nodes, fill up the capacity of some of the
+ // hosts
+ engine.allocator.nextIndex();
+
+ int targetNode = 2;
+ assertEquals(targetNode, engine.allocator.nextIndex());
+ String targetHostname = engine.cluster.nodeAt(targetNode).hostname;
+
+ // clock is set to a small value
+ appState.time = 100000;
+
+ // allocate the nodes
+ List actions = appState.reviewRequestAndReleaseNodes();
+ assertEquals(1, actions.size());
+ ContainerRequestOperation action0 = (ContainerRequestOperation)actions.get(0);
+
+ ContainerRequest request = action0.getRequest();
+ assertTrue(SliderUtils.isEmpty(request.getNodes()));
+
+ List released = new ArrayList<>();
+ List allocations = submitOperations(actions, released);
+ processSubmissionOperations(allocations, new ArrayList<>(), released);
+ assertEquals(1, allocations.size());
+ RoleInstance ri = allocations.get(0);
+
+ assertEquals(ri.role, dynamic);
+ assertEquals(ri.roleId, role_priority_8);
+ assertEquals(ri.host, targetHostname);
+
+ // now look at the role history
+
+ RoleHistory roleHistory = appState.getRoleHistory();
+ List activeNodes = roleHistory.listActiveNodes(role_priority_8);
+ assertEquals(activeNodes.size(), 1);
+ NodeInstance activeNode = activeNodes.get(0);
+ assertNotNull(activeNode.get(role_priority_8));
+ NodeEntry entry8 = activeNode.get(role_priority_8);
+ assertEquals(entry8.getActive(), 1);
+
+ assertEquals(activeNode.hostname, targetHostname);
+
+ NodeInstance activeNodeInstance =
+ roleHistory.getOrCreateNodeInstance(ri.container);
+
+ assertEquals(activeNode, activeNodeInstance);
+ NodeEntry entry = activeNodeInstance.get(role_priority_8);
+ assertNotNull(entry);
+ assertTrue(entry.getActive() > 0);
+ assertTrue(entry.getLive() > 0);
+
+
+ // now trigger a termination event on that role
+
+ // increment time for a long-lived failure event
+ appState.time = appState.time + 100000;
+
+ log.debug("Triggering failure");
+ ContainerId cid = ri.getContainerId();
+ AppState.NodeCompletionResult result = appState.onCompletedNode(
+ containerStatus(cid, 1));
+ assertEquals(result.roleInstance, ri);
+ assertTrue(result.containerFailed);
+
+ roleHistory.dump();
+ // values should have changed
+ assertEquals(1, entry.getFailed());
+ assertEquals(0, entry.getStartFailed());
+ assertEquals(0, entry.getActive());
+ assertEquals(0, entry.getLive());
+
+
+ List nodesForRoleId =
+ roleHistory.getRecentNodesForRoleId(role_priority_8);
+ assertNotNull(nodesForRoleId);
+
+ // make sure new nodes will default to a different host in the engine
+ assertTrue(targetNode < engine.allocator.nextIndex());
+
+ actions = appState.reviewRequestAndReleaseNodes();
+ assertEquals(1, actions.size());
+ ContainerRequestOperation action1 = (ContainerRequestOperation) actions.get(0);
+ ContainerRequest request1 = action1.getRequest();
+ assertTrue(SliderUtils.isNotEmpty(request1.getNodes()));
+ }
+
+ @Test(expected = BadConfigException.class)
+ public void testRoleHistoryRoleAdditions() throws Throwable {
+ MockRoleHistory roleHistory = new MockRoleHistory(new ArrayList<>());
+ roleHistory.addNewRole(new RoleStatus(new ProviderRole("one", 1)));
+ roleHistory.addNewRole(new RoleStatus(new ProviderRole("two", 1)));
+ roleHistory.dump();
+ }
+
+ @Test(expected = BadConfigException.class)
+ public void testRoleHistoryRoleStartupConflict() throws Throwable {
+ MockRoleHistory roleHistory = new MockRoleHistory(Arrays.asList(
+ new ProviderRole("one", 1), new ProviderRole("two", 1)
+ ));
+ roleHistory.dump();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicRoles.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicRoles.java
new file mode 100644
index 0000000..32a82f6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateDynamicRoles.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.AppState.NodeCompletionResult;
+import org.apache.slider.server.appmaster.state.ContainerPriority;
+import org.apache.slider.server.appmaster.state.RoleHistoryUtils;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test that if you have >1 role, the right roles are chosen for release.
+ */
+public class TestMockAppStateDynamicRoles extends BaseMockAppStateTest
+ implements MockRoles {
+ private static final String ROLE4 = "4";
+ private static final String ROLE5 = "5";
+ private static final int ID4 = 4;
+ private static final int ID5 = 5;
+
+ @Override
+ public String getTestName() {
+ return "TestMockAppStateDynamicRoles";
+ }
+
+ /**
+ * Small cluster with multiple containers per node,
+ * to guarantee many container allocations on each node
+ * @return
+ */
+ @Override
+ public MockYarnEngine createYarnEngine() {
+ return new MockYarnEngine(8, 2);
+ }
+
+ @Override
+ public AggregateConf buildInstanceDefinition() {
+ AggregateConf instance = factory.newInstanceDefinition(0, 0, 0);
+ Map opts = new HashMap<>();
+ opts.put(ResourceKeys.COMPONENT_PRIORITY, ROLE4);
+ opts.put(ResourceKeys.COMPONENT_INSTANCES, "1");
+
+ instance.getResourceOperations().getOrAddComponent(ROLE4).putAll(opts);
+
+ Map opts5 = new HashMap<>();
+ opts5.put(ResourceKeys.COMPONENT_PRIORITY, ROLE5);
+ opts5.put(ResourceKeys.COMPONENT_INSTANCES, "1");
+ opts5.put(ResourceKeys.COMPONENT_PLACEMENT_POLICY, Integer.toString
+ (PlacementPolicy.STRICT));
+ opts5.put(ResourceKeys.NODE_FAILURE_THRESHOLD, Integer.toString(2));
+
+ instance.getResourceOperations().getOrAddComponent(ROLE5).putAll(opts5);
+ return instance;
+ }
+
+ @Test
+ public void testAllocateReleaseRealloc() throws Throwable {
+
+ createAndStartNodes();
+ appState.reviewRequestAndReleaseNodes();
+ appState.getRoleHistory().dump();
+ }
+
+ /**
+ * Find all allocations for a specific role
+ * @param role role Id/priority
+ * @param actions source list
+ * @return found list
+ */
+ List findAllocationsForRole(int role,
+ List actions) {
+ List ops = new ArrayList<>();
+ for (AbstractRMOperation op : actions) {
+ if (op instanceof ContainerRequestOperation && role ==
+ ContainerPriority.extractRole(((ContainerRequestOperation) op)
+ .getRequest().getPriority())) {
+ ops.add((ContainerRequestOperation) op);
+ }
+ }
+ return ops;
+ }
+
+ @Test
+ public void testStrictPlacementInitialRequest() throws Throwable {
+ log.info("Initial engine state = {}", engine);
+ List actions = appState.reviewRequestAndReleaseNodes();
+ assertEquals(2, actions.size());
+
+ // neither have locality at this point
+ assertRelaxLocalityFlag(ID4, null, true, actions);
+ assertRelaxLocalityFlag(ID5, null, true, actions);
+ }
+
+ @Test
+ public void testPolicyPropagation() throws Throwable {
+ assertEquals(0, (appState.lookupRoleStatus(ROLE4).getPlacementPolicy() &
+ PlacementPolicy.STRICT));
+ assertNotEquals(0, (appState.lookupRoleStatus(ROLE5).getPlacementPolicy() &
+ PlacementPolicy.STRICT));
+
+ }
+
+ @Test
+ public void testNodeFailureThresholdPropagation() throws Throwable {
+ assertEquals(3, appState.lookupRoleStatus(ROLE4).getNodeFailureThreshold());
+ assertEquals(2, appState.lookupRoleStatus(ROLE5).getNodeFailureThreshold());
+ }
+
+ @Test
+ public void testLaxPlacementSecondRequestRole4() throws Throwable {
+ log.info("Initial engine state = {}", engine);
+ RoleStatus role4 = appState.lookupRoleStatus(ROLE4);
+ RoleStatus role5 = appState.lookupRoleStatus(ROLE5);
+ role4.setDesired(1);
+ role5.setDesired(0);
+
+ List instances = createStartAndStopNodes(new ArrayList<>());
+ assertEquals(1, instances.size());
+
+ RoleInstance instanceA = null;
+ for (RoleInstance instance : instances) {
+ if (instance.roleId == ID4) {
+ instanceA = instance;
+ }
+ }
+ assertNotNull(instanceA);
+ String hostname = RoleHistoryUtils.hostnameOf(instanceA.container);
+
+ log.info("Allocated engine state = {}", engine);
+ assertEquals(1, engine.containerCount());
+
+ assertEquals(1, role4.getActual());
+ // shrinking cluster
+
+ role4.setDesired(0);
+ appState.lookupRoleStatus(ROLE4).setDesired(0);
+ List completionResults = new ArrayList<>();
+ createStartAndStopNodes(completionResults);
+ assertEquals(0, engine.containerCount());
+ assertEquals(1, completionResults.size());
+
+ // expanding: expect hostnames now
+ role4.setDesired(1);
+ List actions = appState.reviewRequestAndReleaseNodes();
+ assertEquals(1, actions.size());
+
+ ContainerRequestOperation cro = (ContainerRequestOperation) actions.get(0);
+ List nodes = cro.getRequest().getNodes();
+ assertEquals(1, nodes.size());
+ assertEquals(hostname, nodes.get(0));
+ }
+
+ @Test
+ public void testStrictPlacementSecondRequestRole5() throws Throwable {
+ log.info("Initial engine state = {}", engine);
+ RoleStatus role4 = appState.lookupRoleStatus(ROLE4);
+ RoleStatus role5 = appState.lookupRoleStatus(ROLE5);
+ role4.setDesired(0);
+ role5.setDesired(1);
+
+ List instances = createStartAndStopNodes(new ArrayList<>());
+ assertEquals(1, instances.size());
+
+ RoleInstance instanceA = null;
+ for (RoleInstance instance : instances) {
+ if (instance.roleId == ID5) {
+ instanceA = instance;
+ }
+ }
+ assertNotNull(instanceA);
+ String hostname = RoleHistoryUtils.hostnameOf(instanceA.container);
+
+ log.info("Allocated engine state = {}", engine);
+ assertEquals(1, engine.containerCount());
+
+ assertEquals(1, role5.getActual());
+
+ // shrinking cluster
+ role5.setDesired(0);
+ List completionResults = new ArrayList<>();
+ createStartAndStopNodes(completionResults);
+ assertEquals(0, engine.containerCount());
+ assertEquals(1, completionResults.size());
+ assertEquals(0, role5.getActual());
+
+ role5.setDesired(1);
+ List actions = appState.reviewRequestAndReleaseNodes();
+ assertEquals(1, actions.size());
+ assertRelaxLocalityFlag(ID5, "", false, actions);
+ ContainerRequestOperation cro = (ContainerRequestOperation) actions.get(0);
+ List nodes = cro.getRequest().getNodes();
+ assertEquals(1, nodes.size());
+ assertEquals(hostname, nodes.get(0));
+ }
+
+ public void assertRelaxLocalityFlag(
+ int role,
+ String expectedHost,
+ boolean expectedRelaxFlag,
+ List actions) {
+ List requests = findAllocationsForRole(
+ role, actions);
+ assertEquals(1, requests.size());
+ ContainerRequestOperation req = requests.get(0);
+ assertEquals(expectedRelaxFlag, req.getRequest().getRelaxLocality());
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
new file mode 100644
index 0000000..9c8613c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexDynamicRoles.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTree;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.core.exceptions.SliderInternalStateException;
+import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.avro.LoadedRoleHistory;
+import org.apache.slider.server.avro.RoleHistoryWriter;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Test that if you have more than one role, the right roles are chosen for release.
+ */
+public class TestMockAppStateFlexDynamicRoles extends BaseMockAppStateTest
+ implements MockRoles {
+
+ @Override
+ public String getTestName() {
+ return "TestMockAppStateFlexDynamicRoles";
+ }
+
+ /**
+ * Small cluster with multiple containers per node,
+ * to guarantee many container allocations on each node
+ * @return
+ */
+ @Override
+ public MockYarnEngine createYarnEngine() {
+ return new MockYarnEngine(4, 4);
+ }
+
+ @Override
+ public AppStateBindingInfo buildBindingInfo() {
+ AppStateBindingInfo bindingInfo = super.buildBindingInfo();
+ bindingInfo.releaseSelector = new MostRecentContainerReleaseSelector();
+ return bindingInfo;
+ }
+
+ @Override
+ public AggregateConf buildInstanceDefinition() {
+ AggregateConf instance = factory.newInstanceDefinition(0, 0, 0);
+
+ Map opts = new HashMap<>();
+ opts.put(ResourceKeys.COMPONENT_INSTANCES, "1");
+ opts.put(ResourceKeys.COMPONENT_PRIORITY, "6");
+
+ instance.getResourceOperations().getOrAddComponent("dynamic-6")
+ .putAll(opts);
+ return instance;
+ }
+
+ private ConfTreeOperations init()
+ throws TriggerClusterTeardownException, SliderInternalStateException {
+ createAndStartNodes();
+ ConfTree resources = appState.getInstanceDefinition().getResources();
+ return new ConfTreeOperations(resources);
+ }
+
+ @Test
+ public void testDynamicFlexAddRole() throws Throwable {
+ ConfTreeOperations cd = init();
+ Map opts = new HashMap<>();
+ opts.put(ResourceKeys.COMPONENT_INSTANCES, "1");
+ opts.put(ResourceKeys.COMPONENT_PRIORITY, "7");
+
+ cd.getOrAddComponent("dynamicAdd7").putAll(opts);
+ appState.updateResourceDefinitions(cd.confTree);
+ createAndStartNodes();
+ dumpClusterDescription("updated CD", appState.getClusterStatus());
+ appState.lookupRoleStatus(7);
+ appState.lookupRoleStatus(6);
+ //gaps are still there
+ try {
+ assertNull(appState.lookupRoleStatus(5));
+ } catch (RuntimeException expected) {
+ }
+ }
+
+ @Test
+ public void testDynamicFlexAddRoleConflictingPriority() throws Throwable {
+ ConfTreeOperations cd = init();
+ Map opts = new HashMap<>();
+ opts.put(ResourceKeys.COMPONENT_INSTANCES, "1");
+ opts.put(ResourceKeys.COMPONENT_PRIORITY, "6");
+
+ cd.getOrAddComponent("conflictingPriority").putAll(opts);
+ try {
+ appState.updateResourceDefinitions(cd.confTree);
+
+ ClusterDescription status = appState.getClusterStatus();
+ dumpClusterDescription("updated CD", status);
+ fail("Expected an exception, got " + status);
+ } catch (BadConfigException expected) {
+ log.info("Expected: {}", expected);
+ log.debug("Expected: {}", expected, expected);
+ // expected
+ }
+ }
+
+ @Test
+ public void testDynamicFlexDropRole() throws Throwable {
+ ConfTreeOperations cd = init();
+ cd.getComponents().remove("dynamic");
+ appState.updateResourceDefinitions(cd.confTree);
+
+ ClusterDescription getCD = appState.getClusterStatus();
+ dumpClusterDescription("updated CD", getCD);
+ //status is retained for future
+ appState.lookupRoleStatus(6);
+ }
+
+
+ @Test
+ public void testHistorySaveFlexLoad() throws Throwable {
+ ConfTreeOperations cd = init();
+ RoleHistory roleHistory = appState.getRoleHistory();
+ Path history = roleHistory.saveHistory(0x0001);
+ RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+ Map opts = new HashMap<>();
+ opts.put(ResourceKeys.COMPONENT_INSTANCES, "1");
+ opts.put(ResourceKeys.COMPONENT_PRIORITY, "9");
+
+ cd.getOrAddComponent("HistorySaveFlexLoad").putAll(opts);
+ appState.updateResourceDefinitions(cd.confTree);
+ createAndStartNodes();
+ LoadedRoleHistory loadedRoleHistory =
+ historyWriter.read(fs, history);
+ assertEquals(0, appState.getRoleHistory().rebuild(loadedRoleHistory));
+ }
+
+ @Test
+ public void testHistoryFlexSaveResetLoad() throws Throwable {
+ ConfTreeOperations cd = init();
+ Map opts = new HashMap<>();
+ opts.put(ResourceKeys.COMPONENT_INSTANCES, "1");
+ opts.put(ResourceKeys.COMPONENT_PRIORITY, "10");
+
+ cd.getOrAddComponent("HistoryFlexSaveLoad").putAll(opts);
+ appState.updateResourceDefinitions(cd.confTree);
+ createAndStartNodes();
+ RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+ RoleHistory roleHistory = appState.getRoleHistory();
+ Path history = roleHistory.saveHistory(0x0002);
+ //now reset the app state
+ File historyWorkDir2 = new File("target/history" + getTestName() +
+ "-0002");
+ Path historyPath2 = new Path(historyWorkDir2.toURI());
+ appState = new MockAppState();
+ AppStateBindingInfo binding2 = buildBindingInfo();
+ binding2.instanceDefinition = factory.newInstanceDefinition(0, 0, 0);
+ binding2.historyPath = historyPath2;
+ appState.buildInstance(binding2);
+ // on this read there won't be the right number of roles
+ LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history);
+ assertEquals(0, appState.getRoleHistory().rebuild(loadedRoleHistory));
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexing.java
new file mode 100644
index 0000000..2e6244f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateFlexing.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.types.ApplicationLivenessInformation;
+import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.ContainerAssignment;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class TestMockAppStateFlexing extends BaseMockAppStateTest implements
+ MockRoles {
+
+ @Override
+ public String getTestName() {
+ return "TestMockAppStateFlexing";
+ }
+
+ @Test
+ public void testFlexDuringLaunchPhase() throws Throwable {
+
+ // ask for one instance of role0
+ getRole0Status().setDesired(1);
+
+ List ops = appState.reviewRequestAndReleaseNodes();
+
+ // at this point there's now one request in the list
+ assertEquals(1, ops.size());
+ // and in a liveness check, one outstanding
+ ApplicationLivenessInformation liveness =
+ appState.getApplicationLivenessInformation();
+ assertEquals(1, liveness.requestsOutstanding);
+ assertFalse(liveness.allRequestsSatisfied);
+
+ List allocations = engine.execute(ops);
+ List assignments = new ArrayList<>();
+ List releases = new ArrayList<>();
+ appState.onContainersAllocated(allocations, assignments, releases);
+ assertEquals(1, assignments.size());
+ ContainerAssignment assigned = assignments.get(0);
+ Container target = assigned.container;
+ RoleInstance ri = roleInstance(assigned);
+
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertTrue(ops.isEmpty());
+
+ liveness = appState.getApplicationLivenessInformation();
+ assertEquals(0, liveness.requestsOutstanding);
+ assertTrue(liveness.allRequestsSatisfied);
+
+ //now this is the start point.
+ appState.containerStartSubmitted(target, ri);
+
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertTrue(ops.isEmpty());
+
+ appState.innerOnNodeManagerContainerStarted(target.getId());
+ }
+
+ @Test
+ public void testFlexBeforeAllocationPhase() throws Throwable {
+ getRole0Status().setDesired(1);
+
+ List ops = appState.reviewRequestAndReleaseNodes();
+ assertFalse(ops.isEmpty());
+
+ // second scan will find the first run outstanding, so not re-issue
+ // any more container requests
+ List ops2 = appState.reviewRequestAndReleaseNodes();
+ assertTrue(ops2.isEmpty());
+
+ // and in a liveness check, one outstanding
+ ApplicationLivenessInformation liveness = appState
+ .getApplicationLivenessInformation();
+ assertEquals(1, liveness.requestsOutstanding);
+ assertFalse(liveness.allRequestsSatisfied);
+
+ appState.refreshClusterStatus(null);
+ ClusterDescription cd = appState.getClusterStatus();
+ assertEquals(1, cd.liveness.requestsOutstanding);
+
+ }
+
+
+ @Test
+ public void testFlexDownTwice() throws Throwable {
+ int r0 = 6;
+ int r1 = 0;
+ int r2 = 0;
+ getRole0Status().setDesired(r0);
+ getRole1Status().setDesired(r1);
+ getRole2Status().setDesired(r2);
+ List instances = createAndStartNodes();
+
+ int clusterSize = r0 + r1 + r2;
+ assertEquals(instances.size(), clusterSize);
+ log.info("shrinking cluster");
+ r0 = 4;
+ getRole0Status().setDesired(r0);
+ List completionResults = new ArrayList<>();
+ instances = createStartAndStopNodes(completionResults);
+ assertEquals(0, instances.size());
+ // assert two nodes were released
+ assertEquals(2, completionResults.size());
+
+ // no-op review
+ completionResults = new ArrayList<>();
+ instances = createStartAndStopNodes(completionResults);
+ assertEquals(0, instances.size());
+ // assert two nodes were released
+ assertEquals(0, completionResults.size());
+
+
+ // now shrink again
+ getRole0Status().setDesired(1);
+ completionResults = new ArrayList<>();
+ instances = createStartAndStopNodes(completionResults);
+ assertEquals(0, instances.size());
+ // assert two nodes were released
+ assertEquals(3, completionResults.size());
+
+ }
+
+ @Test
+ public void testFlexNegative() throws Throwable {
+ int r0 = 6;
+ int r1 = 0;
+ int r2 = 0;
+ getRole0Status().setDesired(r0);
+ getRole1Status().setDesired(r1);
+ getRole2Status().setDesired(r2);
+ List instances = createAndStartNodes();
+
+ int clusterSize = r0 + r1 + r2;
+ assertEquals(instances.size(), clusterSize);
+ log.info("shrinking cluster");
+ getRole0Status().setDesired(-2);
+ List completionResults = new ArrayList<>();
+ try {
+ createStartAndStopNodes(completionResults);
+ fail("expected an exception");
+ } catch (TriggerClusterTeardownException e) {
+ }
+
+ }
+
+ @Test
+ public void testCancelWithRequestsOutstanding() throws Throwable {
+ // flex cluster size before the original set were allocated
+
+
+ getRole0Status().setDesired(6);
+ // build the ops
+ List ops = appState.reviewRequestAndReleaseNodes();
+ // here the data structures exist
+
+ // go down
+ getRole0Status().setDesired(3);
+ List ops2 = appState.reviewRequestAndReleaseNodes();
+ assertEquals(3, ops2.size());
+ for (AbstractRMOperation op : ops2) {
+ assertTrue(op instanceof CancelSingleRequest);
+ }
+
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java
new file mode 100644
index 0000000..391f742
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRMOperations.java
@@ -0,0 +1,377 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRMOperationHandler;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
+import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.ContainerAssignment;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.slider.server.appmaster.state.ContainerPriority.buildPriority;
+import static org.apache.slider.server.appmaster.state.ContainerPriority.extractRole;
+
+public class TestMockAppStateRMOperations extends BaseMockAppStateTest
+ implements MockRoles {
+
+ @Override
+ public String getTestName() {
+ return "TestMockAppStateRMOperations";
+ }
+
+ @Test
+ public void testPriorityOnly() throws Throwable {
+ assertEquals(5, extractRole(buildPriority(5, false)));
+ }
+
+ @Test
+ public void testPriorityRoundTrip() throws Throwable {
+ assertEquals(5, extractRole(buildPriority(5, false)));
+ }
+
+ @Test
+ public void testPriorityRoundTripWithRequest() throws Throwable {
+ int priority = buildPriority(5, false);
+ assertEquals(5, extractRole(priority));
+ }
+
+ @Test
+ public void testMockAddOp() throws Throwable {
+ getRole0Status().setDesired(1);
+ List ops = appState.reviewRequestAndReleaseNodes();
+ assertListLength(ops, 1);
+ ContainerRequestOperation operation = (ContainerRequestOperation)ops.get(0);
+ int priority = operation.getRequest().getPriority().getPriority();
+ assertEquals(extractRole(priority), MockFactory.PROVIDER_ROLE0.id);
+ MockRMOperationHandler handler = new MockRMOperationHandler();
+ handler.execute(ops);
+
+ AbstractRMOperation op = handler.operations.get(0);
+ assertTrue(op instanceof ContainerRequestOperation);
+ }
+
+ /**
+ * Test of a flex up and down op which verifies that outstanding
+ * requests are cancelled first.
+ *
+ * - request 5 nodes, assert 5 request made
+ * - allocate 1 of them
+ * - flex cluster size to 3
+ * - assert this generates 2 cancel requests
+ *
+ */
+ @Test
+ public void testRequestThenCancelOps() throws Throwable {
+ RoleStatus role0 = getRole0Status();
+ role0.setDesired(5);
+ List ops = appState.reviewRequestAndReleaseNodes();
+ assertListLength(ops, 5);
+ // now 5 outstanding requests.
+ assertEquals(5, role0.getRequested());
+
+ // allocate one
+ List processed = new ArrayList<>();
+ processed.add(ops.get(0));
+ List released = new ArrayList<>();
+ List completionResults = new ArrayList<>();
+ submitOperations(processed, released);
+ List instances = createAndSubmitNodes(released);
+ processSubmissionOperations(instances, completionResults, released);
+
+
+ // four outstanding
+ assertEquals(4, role0.getRequested());
+
+ // flex cluster to 3
+ role0.setDesired(3);
+ ops = appState.reviewRequestAndReleaseNodes();
+
+ // expect two cancel operation from review
+ assertListLength(ops, 2);
+ for (AbstractRMOperation op : ops) {
+ assertTrue(op instanceof CancelSingleRequest);
+ }
+
+ MockRMOperationHandler handler = new MockRMOperationHandler();
+ handler.availableToCancel = 4;
+ handler.execute(ops);
+ assertEquals(2, handler.availableToCancel);
+ assertEquals(2, role0.getRequested());
+
+ // flex down one more
+ role0.setDesired(2);
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertListLength(ops, 1);
+ for (AbstractRMOperation op : ops) {
+ assertTrue(op instanceof CancelSingleRequest);
+ }
+ handler.execute(ops);
+ assertEquals(1, handler.availableToCancel);
+ assertEquals(1, role0.getRequested());
+ }
+
+ @Test
+ public void testCancelNoActualContainers() throws Throwable {
+ RoleStatus role0 = getRole0Status();
+ role0.setDesired(5);
+ List ops = appState.reviewRequestAndReleaseNodes();
+ assertListLength(ops, 5);
+ // now 5 outstanding requests.
+ assertEquals(5, role0.getRequested());
+ role0.setDesired(0);
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertListLength(ops, 5);
+
+ }
+
+
+ @Test
+ public void testFlexDownOutstandingRequests() throws Throwable {
+ // engine only has two nodes, so > 2 will be outstanding
+ engine = new MockYarnEngine(1, 2);
+ List ops;
+ // role: desired = 2, requested = 1, actual=1
+ RoleStatus role0 = getRole0Status();
+ role0.setDesired(4);
+ createAndSubmitNodes();
+
+ assertEquals(2, role0.getRequested());
+ assertEquals(2, role0.getActual());
+ // there are now two outstanding, two actual
+ // Release 3 and verify that the two
+ // cancellations were combined with a release
+ role0.setDesired(1);
+ assertEquals(-3, role0.getDelta());
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertListLength(ops, 3);
+ int numCancel = 0;
+ int numRelease = 0;
+ for (AbstractRMOperation op : ops) {
+ if (op instanceof CancelSingleRequest) {
+ numCancel++;
+ }
+ if (op instanceof ContainerReleaseOperation) {
+ numRelease++;
+ }
+ }
+ assertEquals(2, numCancel);
+ assertEquals(1, numRelease);
+ assertEquals(0, role0.getRequested());
+ assertEquals(1, role0.getReleasing());
+ }
+
+ @Test
+ public void testCancelAllOutstandingRequests() throws Throwable {
+
+ // role: desired = 2, requested = 1, actual=1
+ RoleStatus role0 = getRole0Status();
+ role0.setDesired(2);
+ List ops;
+ ops = appState.reviewRequestAndReleaseNodes();
+ int count = 0;
+ for (AbstractRMOperation op : ops) {
+ if (op instanceof ContainerRequestOperation) {
+ count++;
+ }
+ }
+ assertEquals(2, count);
+
+ // there are now two outstanding, two actual
+ // Release 3 and verify that the two
+ // cancellations were combined with a release
+ role0.setDesired(0);
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertEquals(2, ops.size());
+
+ for (AbstractRMOperation op : ops) {
+ assertTrue(op instanceof CancelSingleRequest);
+ }
+ }
+
+
+ @Test
+ public void testFlexUpOutstandingRequests() throws Throwable {
+
+ List ops;
+ // role: desired = 2, requested = 1, actual=1
+ RoleStatus role0 = getRole0Status();
+ role0.setDesired(2);
+ role0.incActual();
+ role0.incRequested();
+
+
+
+ // flex up 2 nodes, yet expect only one node to be requested,
+ // as the outstanding request is taken into account
+ role0.setDesired(4);
+ role0.incRequested();
+
+ assertEquals(1, role0.getActual());
+ assertEquals(2, role0.getRequested());
+ assertEquals(3, role0.getActualAndRequested());
+ assertEquals(1, role0.getDelta());
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertListLength(ops, 1);
+ assertTrue(ops.get(0) instanceof ContainerRequestOperation);
+ assertEquals(3, role0.getRequested());
+ }
+
+ @Test
+ public void testFlexUpNoSpace() throws Throwable {
+ // engine only has two nodes, so > 2 will be outstanding
+ engine = new MockYarnEngine(1, 2);
+ // role: desired = 2, requested = 1, actual=1
+ RoleStatus role0 = getRole0Status();
+ role0.setDesired(4);
+ createAndSubmitNodes();
+
+ assertEquals(2, role0.getRequested());
+ assertEquals(2, role0.getActual());
+ role0.setDesired(8);
+ assertEquals(4, role0.getDelta());
+ createAndSubmitNodes();
+ assertEquals(6, role0.getRequested());
+ }
+
+
+ @Test
+ public void testAllocateReleaseOp() throws Throwable {
+ getRole0Status().setDesired(1);
+
+ List ops = appState.reviewRequestAndReleaseNodes();
+ ContainerRequestOperation operation = (ContainerRequestOperation)ops.get(0);
+ AMRMClient.ContainerRequest request = operation.getRequest();
+ Container cont = engine.allocateContainer(request);
+ List allocated = new ArrayList<>();
+ allocated.add(cont);
+ List assignments = new ArrayList<>();
+ List operations = new ArrayList<>();
+ appState.onContainersAllocated(allocated, assignments, operations);
+
+ assertListLength(ops, 1);
+ assertListLength(assignments, 1);
+ ContainerAssignment assigned = assignments.get(0);
+ Container target = assigned.container;
+ assertEquals(target.getId(), cont.getId());
+ int roleId = assigned.role.getPriority();
+ assertEquals(roleId, extractRole(request.getPriority()));
+ assertEquals(assigned.role.getName(), ROLE0);
+ RoleInstance ri = roleInstance(assigned);
+ //tell the app it arrived
+ appState.containerStartSubmitted(target, ri);
+ appState.innerOnNodeManagerContainerStarted(target.getId());
+ assertEquals(1, getRole0Status().getStarted());
+
+ //now release it by changing the role status
+ getRole0Status().setDesired(0);
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertListLength(ops, 1);
+
+ assertTrue(ops.get(0) instanceof ContainerReleaseOperation);
+ ContainerReleaseOperation release = (ContainerReleaseOperation) ops.get(0);
+ assertEquals(release.getContainerId(), cont.getId());
+ }
+
+ @Test
+ public void testComplexAllocation() throws Throwable {
+ getRole0Status().setDesired(1);
+ getRole1Status().setDesired(3);
+
+ List ops = appState.reviewRequestAndReleaseNodes();
+ List allocations = engine.execute(ops);
+ List assignments = new ArrayList<>();
+ List releases = new ArrayList<>();
+ appState.onContainersAllocated(allocations, assignments, releases);
+ // we expect four release requests here for all the allocated containers
+ assertListLength(releases, 4);
+ for (AbstractRMOperation op : releases) {
+ assertTrue(op instanceof CancelSingleRequest);
+ }
+ assertListLength(assignments, 4);
+ for (ContainerAssignment assigned : assignments) {
+ Container target = assigned.container;
+ RoleInstance ri = roleInstance(assigned);
+ appState.containerStartSubmitted(target, ri);
+ }
+ //insert some async operation here
+ for (ContainerAssignment assigned : assignments) {
+ Container target = assigned.container;
+ appState.innerOnNodeManagerContainerStarted(target.getId());
+ }
+ assertEquals(4, engine.containerCount());
+ getRole1Status().setDesired(0);
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertListLength(ops, 3);
+ allocations = engine.execute(ops);
+ assertEquals(1, engine.containerCount());
+
+ appState.onContainersAllocated(allocations, assignments, releases);
+ assertTrue(assignments.isEmpty());
+ assertTrue(releases.isEmpty());
+ }
+
+ @Test
+ public void testDoubleNodeManagerStartEvent() throws Throwable {
+ getRole0Status().setDesired(1);
+
+ List ops = appState.reviewRequestAndReleaseNodes();
+ List allocations = engine.execute(ops);
+ List assignments = new ArrayList<>();
+ List releases = new ArrayList<>();
+ appState.onContainersAllocated(allocations, assignments, releases);
+ assertListLength(assignments, 1);
+ ContainerAssignment assigned = assignments.get(0);
+ Container target = assigned.container;
+ RoleInstance ri = roleInstance(assigned);
+ appState.containerStartSubmitted(target, ri);
+ RoleInstance ri2 = appState.innerOnNodeManagerContainerStarted(target
+ .getId());
+ assertEquals(ri2, ri);
+ //try a second time, expect an error
+ try {
+ appState.innerOnNodeManagerContainerStarted(target.getId());
+ fail("Expected an exception");
+ } catch (RuntimeException expected) {
+ // expected
+ }
+ //and non-faulter should not downgrade to a null
+ log.warn("Ignore any exception/stack trace that appears below");
+ log.warn("===============================================================");
+ RoleInstance ri3 = appState.onNodeManagerContainerStarted(target.getId());
+ log.warn("===============================================================");
+ log.warn("Ignore any exception/stack trace that appeared above");
+ assertNull(ri3);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java
new file mode 100644
index 0000000..c9ffc2d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRebuildOnAMRestart.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.StatusKeys;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.NodeMap;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test that app state is rebuilt on a restart
+ */
+public class TestMockAppStateRebuildOnAMRestart extends BaseMockAppStateTest
+ implements MockRoles {
+
+ @Override
+ public String getTestName() {
+ return "TestMockAppStateRebuildOnAMRestart";
+ }
+
+ @Test
+ public void testRebuild() throws Throwable {
+
+ int r0 = 1;
+ int r1 = 2;
+ int r2 = 3;
+ getRole0Status().setDesired(r0);
+ getRole1Status().setDesired(r1);
+ getRole2Status().setDesired(r2);
+ List instances = createAndStartNodes();
+
+ int clusterSize = r0 + r1 + r2;
+ assertEquals(instances.size(), clusterSize);
+
+ //clone the list
+ List containers = new ArrayList<>();
+ for (RoleInstance ri : instances) {
+ containers.add(ri.container);
+ }
+ NodeMap nodemap = appState.getRoleHistory().cloneNodemap();
+
+ //and rebuild
+
+ AppStateBindingInfo bindingInfo = buildBindingInfo();
+ bindingInfo.instanceDefinition = factory.newInstanceDefinition(r0, r1, r2);
+ bindingInfo.liveContainers = containers;
+ appState = new MockAppState(bindingInfo);
+
+ assertEquals(appState.getStartedCountainerCount(), clusterSize);
+
+ appState.getRoleHistory().dump();
+
+ //check that the app state direct structures match
+ List r0live = appState.enumLiveNodesInRole(ROLE0);
+ List r1live = appState.enumLiveNodesInRole(ROLE1);
+ List r2live = appState.enumLiveNodesInRole(ROLE2);
+
+ assertEquals(r0, r0live.size());
+ assertEquals(r1, r1live.size());
+ assertEquals(r2, r2live.size());
+
+ //now examine the role history
+ NodeMap newNodemap = appState.getRoleHistory().cloneNodemap();
+
+ for (NodeInstance nodeInstance : newNodemap.values()) {
+ String hostname = nodeInstance.hostname;
+ NodeInstance orig = nodemap.get(hostname);
+ assertNotNull("Null entry in original nodemap for " + hostname, orig);
+
+ for (int i = 0; i < ROLE_COUNT; i++) {
+ assertEquals(nodeInstance.getActiveRoleInstances(i), orig.getActiveRoleInstances(i));
+ NodeEntry origRE = orig.getOrCreate(i);
+ NodeEntry newRE = nodeInstance.getOrCreate(i);
+ assertEquals(origRE.getLive(), newRE.getLive());
+ assertEquals(0, newRE.getStarting());
+ }
+ }
+ assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
+
+ ClusterDescription status = appState.getClusterStatus();
+ // verify the AM restart container count was set
+ String restarted = status.getInfo(StatusKeys.INFO_CONTAINERS_AM_RESTART);
+ assertNotNull(restarted);
+ //and that the count == 1 master + the region servers
+ assertEquals(Integer.parseInt(restarted), containers.size());
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRolePlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRolePlacement.java
new file mode 100644
index 0000000..ceb04c4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRolePlacement.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
+import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.ContainerAssignment;
+import org.apache.slider.server.appmaster.state.RoleHistoryUtils;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.apache.slider.server.appmaster.state.ContainerPriority.extractRole;
+
+/**
+ * Test that the app state lets you ask for nodes, get a specific host,
+ * release it and then get that one back again.
+ */
+public class TestMockAppStateRolePlacement extends BaseMockAppStateTest
+ implements MockRoles {
+
+ @Override
+ public String getTestName() {
+ return "TestMockAppStateRolePlacement";
+ }
+
+
+ @Test
+ public void testAllocateReleaseRealloc() throws Throwable {
+ getRole0Status().setDesired(1);
+
+ List ops = appState.reviewRequestAndReleaseNodes();
+ ContainerRequestOperation operation = (ContainerRequestOperation) ops.get(0);
+ AMRMClient.ContainerRequest request = operation.getRequest();
+ assertTrue(request.getRelaxLocality());
+ assertNull(request.getNodes());
+ assertNull(request.getRacks());
+ assertNotNull(request.getCapability());
+
+ Container allocated = engine.allocateContainer(request);
+ List assignments = new ArrayList<>();
+ List releaseOperations = new ArrayList<>();
+ appState.onContainersAllocated(Arrays.asList((Container)allocated),
+ assignments, releaseOperations);
+ // verify the release matches the allocation
+ assertEquals(releaseOperations.size(), 1);
+ CancelSingleRequest cancelOp = (CancelSingleRequest)releaseOperations.get(0);
+ assertNotNull(cancelOp.getRequest());
+ assertNotNull(cancelOp.getRequest().getCapability());
+ assertEquals(cancelOp.getRequest().getCapability(), allocated.getResource());
+ // now the assignment
+ assertEquals(assignments.size(), 1);
+ ContainerAssignment assigned = assignments.get(0);
+ Container container = assigned.container;
+ assertEquals(container.getId(), allocated.getId());
+ int roleId = assigned.role.getPriority();
+ assertEquals(roleId, extractRole(request.getPriority()));
+ assertEquals(assigned.role.getName(), ROLE0);
+ String containerHostname = RoleHistoryUtils.hostnameOf(container);
+ RoleInstance ri = roleInstance(assigned);
+ //tell the app it arrived
+ appState.containerStartSubmitted(container, ri);
+ assertNotNull(appState.onNodeManagerContainerStarted(container.getId()));
+ assertEquals(getRole0Status().getStarted(), 1);
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertEquals(ops.size(), 0);
+
+ //now it is surplus
+ getRole0Status().setDesired(0);
+ ops = appState.reviewRequestAndReleaseNodes();
+ ContainerReleaseOperation release = (ContainerReleaseOperation) ops.get(0);
+
+ assertEquals(release.getContainerId(), container.getId());
+ engine.execute(ops);
+ assertNotNull(appState.onCompletedNode(containerStatus(container.getId()))
+ .roleInstance);
+
+ //view the world
+ appState.getRoleHistory().dump();
+
+ //now ask for a new one
+ getRole0Status().setDesired(1);
+ ops = appState.reviewRequestAndReleaseNodes();
+ assertEquals(ops.size(), 1);
+ operation = (ContainerRequestOperation) ops.get(0);
+ AMRMClient.ContainerRequest request2 = operation.getRequest();
+ assertNotNull(request2);
+ assertEquals(request2.getNodes().get(0), containerHostname);
+ assertFalse(request2.getRelaxLocality());
+ engine.execute(ops);
+
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRoleRelease.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRoleRelease.java
new file mode 100644
index 0000000..175c834
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateRoleRelease.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test that if you have >1 role, the right roles are chosen for release.
+ */
+public class TestMockAppStateRoleRelease extends BaseMockAppStateTest
+ implements MockRoles {
+
+ @Override
+ public String getTestName() {
+ return "TestMockAppStateRoleRelease";
+ }
+
+ /**
+ * Small cluster with multiple containers per node,
+ * to guarantee many container allocations on each node
+ * @return
+ */
+ @Override
+ public MockYarnEngine createYarnEngine() {
+ return new MockYarnEngine(4, 4);
+ }
+
+ @Test
+ public void testAllocateReleaseRealloc() throws Throwable {
+ /**
+ * Allocate to all nodes
+ */
+ getRole0Status().setDesired(6);
+ getRole1Status().setDesired(5);
+ getRole2Status().setDesired(4);
+ List instances = createAndStartNodes();
+ assertEquals(instances.size(), 15);
+
+ //now it is surplus
+ getRole0Status().setDesired(0);
+ List ops = appState.reviewRequestAndReleaseNodes();
+
+ List released = new ArrayList<>();
+ engine.execute(ops, released);
+ List ids = extractContainerIds(instances, 0);
+ for (ContainerId cid : released) {
+ assertNotNull(appState.onCompletedNode(containerStatus(cid))
+ .roleInstance);
+ assertTrue(ids.contains(cid));
+ }
+
+ //view the world
+ appState.getRoleHistory().dump();
+
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
new file mode 100644
index 0000000..e9229cb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockAppStateUniqueNames.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.RoleKeys;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTree;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.core.exceptions.SliderInternalStateException;
+import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.MostRecentContainerReleaseSelector;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Test that if you have more than one role, the right roles are chosen for release.
+ */
+public class TestMockAppStateUniqueNames extends BaseMockAppStateTest
+ implements MockRoles {
+
+ @Override
+ public String getTestName() {
+ return "TestMockAppStateUniqueNames";
+ }
+
+ /**
+ * Small cluster with multiple containers per node,
+ * to guarantee many container allocations on each node
+ * @return
+ */
+ @Override
+ public MockYarnEngine createYarnEngine() {
+ return new MockYarnEngine(4, 4);
+ }
+
+ @Override
+ public AppStateBindingInfo buildBindingInfo() {
+ AppStateBindingInfo bindingInfo = super.buildBindingInfo();
+ bindingInfo.releaseSelector = new MostRecentContainerReleaseSelector();
+ return bindingInfo;
+ }
+
+ @Override
+ public AggregateConf buildInstanceDefinition() {
+ AggregateConf instance = factory.newInstanceDefinition(0, 0, 0);
+
+ Map opts = new HashMap<>();
+ opts.put(ResourceKeys.COMPONENT_INSTANCES, "1");
+ opts.put(ResourceKeys.COMPONENT_PRIORITY, "6");
+ opts.put(ResourceKeys.YARN_MEMORY, "1024");
+ opts.put(ResourceKeys.YARN_CORES, "2");
+ opts.put(ResourceKeys.UNIQUE_NAMES, "true");
+
+ instance.getResourceOperations().getOrAddComponent("group1").putAll(opts);
+ return instance;
+ }
+
+ private ConfTreeOperations init()
+ throws TriggerClusterTeardownException, SliderInternalStateException {
+ createAndStartNodes();
+ ConfTree resources = appState.getInstanceDefinition().getResources();
+ return new ConfTreeOperations(resources);
+ }
+
+ private static void checkRole(MockAppState appState,
+ String roleName,
+ String roleGroup,
+ Map expectedOpts)
+ throws BadConfigException {
+
+ for (String key : expectedOpts.keySet()) {
+ if (ResourceKeys.COMPONENT_PRIORITY.equals(key) ||
+ ResourceKeys.COMPONENT_INSTANCES.equals(key)) {
+ continue;
+ }
+ assertEquals(expectedOpts.get(key), appState.getClusterStatus()
+ .getMandatoryRoleOpt(roleName, key));
+ }
+ assertEquals(1, appState.getClusterStatus().getMandatoryRoleOptInt(
+ roleName, ResourceKeys.COMPONENT_INSTANCES));
+ assertEquals(roleGroup, appState.getClusterStatus().getMandatoryRoleOpt(
+ roleName, RoleKeys.ROLE_GROUP));
+ }
+
+ @Test
+ public void testDynamicFlexAddRole() throws Throwable {
+ ConfTreeOperations cd = init();
+ Map opts = new HashMap<>();
+ opts.put(ResourceKeys.COMPONENT_INSTANCES, "2");
+ opts.put(ResourceKeys.COMPONENT_PRIORITY, "7");
+ opts.put(ResourceKeys.YARN_MEMORY, "384");
+ opts.put(ResourceKeys.YARN_CORES, "4");
+ opts.put(ResourceKeys.UNIQUE_NAMES, "true");
+
+ cd.getOrAddComponent("group2").putAll(opts);
+ appState.updateResourceDefinitions(cd.confTree);
+ createAndStartNodes();
+ dumpClusterDescription("updated CD", appState.getClusterStatus());
+ assertEquals(1, appState.lookupRoleStatus("group11").getDesired());
+ assertEquals(1, appState.lookupRoleStatus("group21").getDesired());
+ assertEquals(1, appState.lookupRoleStatus("group22").getDesired());
+ assertEquals(6, appState.lookupRoleStatus("group11").getPriority());
+ assertEquals(7, appState.lookupRoleStatus("group21").getPriority());
+ assertEquals(8, appState.lookupRoleStatus("group22").getPriority());
+ assertEquals(1024, appState.lookupRoleStatus("group11").getResourceRequirements().getMemory());
+ assertEquals(384, appState.lookupRoleStatus("group21").getResourceRequirements().getMemory());
+ assertEquals(384, appState.lookupRoleStatus("group22").getResourceRequirements().getMemory());
+ assertEquals(2, appState.lookupRoleStatus("group11").getResourceRequirements().getVirtualCores());
+ assertEquals(4, appState.lookupRoleStatus("group21").getResourceRequirements().getVirtualCores());
+ assertEquals(4, appState.lookupRoleStatus("group22").getResourceRequirements().getVirtualCores());
+
+ appState.refreshClusterStatus();
+ checkRole(appState, "group21", "group2", opts);
+ checkRole(appState, "group22", "group2", opts);
+ }
+
+ @Test
+ public void testDynamicFlexDown() throws Throwable {
+ ConfTreeOperations cd = init();
+ Map opts = new HashMap<>();
+ opts.put(ResourceKeys.COMPONENT_INSTANCES, "0");
+ opts.put(ResourceKeys.COMPONENT_PRIORITY, "6");
+ opts.put(ResourceKeys.YARN_MEMORY, "384");
+ opts.put(ResourceKeys.YARN_CORES, "4");
+ opts.put(ResourceKeys.UNIQUE_NAMES, "true");
+
+ cd.getOrAddComponent("group1").putAll(opts);
+ appState.updateResourceDefinitions(cd.confTree);
+ createAndStartNodes();
+ dumpClusterDescription("updated CD", appState.getClusterStatus());
+ appState.lookupRoleStatus(6);
+ assertEquals(0, appState.lookupRoleStatus("group11").getDesired());
+ assertEquals(6, appState.lookupRoleStatus("group11").getPriority());
+ assertEquals(384, appState.lookupRoleStatus("group11").getResourceRequirements().getMemory());
+ assertEquals(4, appState.lookupRoleStatus("group11").getResourceRequirements().getVirtualCores());
+ }
+
+ @Test
+ public void testDynamicFlexUp() throws Throwable {
+ ConfTreeOperations cd = init();
+ Map opts = new HashMap<>();
+ opts.put(ResourceKeys.COMPONENT_INSTANCES, "3");
+ opts.put(ResourceKeys.COMPONENT_PRIORITY, "6");
+ opts.put(ResourceKeys.YARN_MEMORY, "640");
+ opts.put(ResourceKeys.YARN_CORES, "8");
+ opts.put(ResourceKeys.UNIQUE_NAMES, "true");
+
+ cd.getOrAddComponent("group1").putAll(opts);
+ appState.updateResourceDefinitions(cd.confTree);
+ createAndStartNodes();
+ dumpClusterDescription("updated CD", appState.getClusterStatus());
+ appState.lookupRoleStatus(6);
+ appState.lookupRoleStatus(7);
+ appState.lookupRoleStatus(8);
+ assertEquals(1, appState.lookupRoleStatus("group11").getDesired());
+ assertEquals(1, appState.lookupRoleStatus("group12").getDesired());
+ assertEquals(1, appState.lookupRoleStatus("group13").getDesired());
+ assertEquals(6, appState.lookupRoleStatus("group11").getPriority());
+ assertEquals(7, appState.lookupRoleStatus("group12").getPriority());
+ assertEquals(8, appState.lookupRoleStatus("group13").getPriority());
+ assertEquals(640, appState.lookupRoleStatus("group11").getResourceRequirements().getMemory());
+ assertEquals(640, appState.lookupRoleStatus("group12").getResourceRequirements().getMemory());
+ assertEquals(640, appState.lookupRoleStatus("group13").getResourceRequirements().getMemory());
+ assertEquals(8, appState.lookupRoleStatus("group11").getResourceRequirements().getVirtualCores());
+ assertEquals(8, appState.lookupRoleStatus("group12").getResourceRequirements().getVirtualCores());
+ assertEquals(8, appState.lookupRoleStatus("group13").getResourceRequirements().getVirtualCores());
+
+ appState.refreshClusterStatus();
+ checkRole(appState, "group11", "group1", opts);
+ checkRole(appState, "group12", "group1", opts);
+ checkRole(appState, "group13", "group1", opts);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
new file mode 100644
index 0000000..e8b83a8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockContainerResourceAllocations.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.core.conf.ConfTree;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.junit.Test;
+
+import java.util.List;
+
+/**
+ * Test the container resource allocation logic
+ */
+public class TestMockContainerResourceAllocations extends BaseMockAppStateTest {
+
+ @Test
+ public void testNormalAllocations() throws Throwable {
+ ConfTree clusterSpec = factory.newConfTree(1, 0, 0);
+ ConfTreeOperations cto = new ConfTreeOperations(clusterSpec);
+
+ cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_MEMORY, 512);
+ cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_CORES, 2);
+ appState.updateResourceDefinitions(clusterSpec);
+ List ops = appState.reviewRequestAndReleaseNodes();
+ assertEquals(ops.size(), 1);
+ ContainerRequestOperation operation = (ContainerRequestOperation) ops.get(0);
+ Resource requirements = operation.getRequest().getCapability();
+ assertEquals(requirements.getMemory(), 512);
+ assertEquals(requirements.getVirtualCores(), 2);
+ }
+
+ @Test
+ public void testMaxMemAllocations() throws Throwable {
+ ConfTree clusterSpec = factory.newConfTree(1, 0, 0);
+ ConfTreeOperations cto = new ConfTreeOperations(clusterSpec);
+
+ cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_MEMORY,
+ ResourceKeys.YARN_RESOURCE_MAX);
+ cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_CORES, 2);
+ appState.updateResourceDefinitions(clusterSpec);
+ List ops = appState.reviewRequestAndReleaseNodes();
+ assertEquals(ops.size(), 1);
+ ContainerRequestOperation operation = (ContainerRequestOperation) ops.get(0);
+ Resource requirements = operation.getRequest().getCapability();
+ assertEquals(requirements.getMemory(), MockAppState.RM_MAX_RAM);
+ assertEquals(requirements.getVirtualCores(), 2);
+ }
+
+ @Test
+ public void testMaxCoreAllocations() throws Throwable {
+ ConfTree clusterSpec = factory.newConfTree(1, 0, 0);
+ ConfTreeOperations cto = new ConfTreeOperations(clusterSpec);
+ cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_MEMORY,
+ 512);
+ cto.setComponentOpt(MockRoles.ROLE0, ResourceKeys.YARN_CORES,
+ ResourceKeys.YARN_RESOURCE_MAX);
+ appState.updateResourceDefinitions(clusterSpec);
+ List ops = appState.reviewRequestAndReleaseNodes();
+ assertEquals(ops.size(), 1);
+ ContainerRequestOperation operation = (ContainerRequestOperation) ops.get(0);
+ Resource requirements = operation.getRequest().getCapability();
+ assertEquals(requirements.getMemory(), 512);
+ assertEquals(requirements.getVirtualCores(), MockAppState.RM_MAX_CORES);
+ }
+
+ @Test
+ public void testMaxDefaultAllocations() throws Throwable {
+
+ ConfTree clusterSpec = factory.newConfTree(1, 0, 0);
+ appState.updateResourceDefinitions(clusterSpec);
+ List ops = appState.reviewRequestAndReleaseNodes();
+ assertEquals(ops.size(), 1);
+ ContainerRequestOperation operation = (ContainerRequestOperation) ops.get(0);
+ Resource requirements = operation.getRequest().getCapability();
+ assertEquals(requirements.getMemory(), ResourceKeys.DEF_YARN_MEMORY);
+ assertEquals(requirements.getVirtualCores(), ResourceKeys.DEF_YARN_CORES);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockLabelledAAPlacement.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockLabelledAAPlacement.java
new file mode 100644
index 0000000..0fffe52
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestMockLabelledAAPlacement.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoles;
+import org.apache.slider.server.appmaster.model.mock.MockYarnEngine;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.state.AppState;
+import org.apache.slider.server.appmaster.state.AppState.NodeUpdatedOutcome;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test Anti-affine placement
+ */
+public class TestMockLabelledAAPlacement extends BaseMockAppStateAATest
+ implements MockRoles {
+
+ private int NODES = 3;
+ private int GPU_NODES = 2;
+ private String HOST0 = "00000000";
+ private String HOST1 = "00000001";
+
+
+ @Override
+ public void setup() throws Exception {
+ super.setup();
+ // node 1 is GPU
+
+ updateNodes(MockFactory.instance.newNodeReport(HOST0, NodeState.RUNNING,
+ LABEL_GPU));
+ updateNodes(MockFactory.instance.newNodeReport(HOST1, NodeState.RUNNING,
+ LABEL_GPU));
+ }
+
+ @Override
+ public MockYarnEngine createYarnEngine() {
+ return new MockYarnEngine(NODES, 8);
+ }
+
+ void assertAllContainersAA() {
+ assertAllContainersAA(gpuRole.getKey());
+ }
+
+ /**
+ *
+ * @throws Throwable
+ */
+ @Test
+ public void testAskForTooMany() throws Throwable {
+
+ describe("Ask for 1 more than the no of available nodes;" +
+ " expect the final request to be unsatisfied until the cluster " +
+ "changes size");
+ //more than expected
+ int size = GPU_NODES;
+ gpuRole.setDesired(size + 1);
+
+ List operations = appState.reviewRequestAndReleaseNodes();
+ assertTrue(gpuRole.isAARequestOutstanding());
+
+ assertEquals(gpuRole.getPendingAntiAffineRequests(), size);
+ for (int i = 0; i < size; i++) {
+ String iter = "Iteration " + i + " role = " + aaRole;
+ describe(iter);
+ List operationsOut = new ArrayList<>();
+
+ List roleInstances = submitOperations(operations,
+ EMPTY_ID_LIST, operationsOut);
+ // one instance per request
+ assertEquals(1, roleInstances.size());
+ appState.onNodeManagerContainerStarted(roleInstances.get(0)
+ .getContainerId());
+ assertAllContainersAA();
+ // there should be none left
+ log.debug(nodeInformationSnapshotAsString());
+ operations = operationsOut;
+ if (i + 1 < size) {
+ assertEquals(2, operations.size());
+ } else {
+ assertEquals(1, operations.size());
+ }
+ }
+ // expect an outstanding AA request to be unsatisfied
+ assertTrue(gpuRole.getActual() < gpuRole.getDesired());
+ assertEquals(0, gpuRole.getRequested());
+ assertFalse(gpuRole.isAARequestOutstanding());
+ List allocatedContainers = engine.execute(operations, EMPTY_ID_LIST);
+ assertEquals(0, allocatedContainers.size());
+ // in a review now, no more requests can be generated, as there is no space for AA placements,
+ // even though there is cluster capacity
+ assertEquals(0, appState.reviewRequestAndReleaseNodes().size());
+
+ // switch node 2 into being labelled
+ NodeUpdatedOutcome outcome = updateNodes(MockFactory.instance.
+ newNodeReport("00000002", NodeState.RUNNING, "gpu"));
+
+ assertEquals(NODES, cloneNodemap().size());
+ assertTrue(outcome.clusterChanged);
+ // no active calls to empty
+ assertTrue(outcome.operations.isEmpty());
+ assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
+ }
+
+ protected AppState.NodeUpdatedOutcome addNewNode() {
+ return updateNodes(MockFactory.instance.newNodeReport("00000004",
+ NodeState.RUNNING, "gpu"));
+ }
+
+ @Test
+ public void testClusterSizeChangesDuringRequestSequence() throws Throwable {
+ describe("Change the cluster size where the cluster size changes during " +
+ "a test sequence.");
+ gpuRole.setDesired(GPU_NODES + 1);
+ List operations = appState
+ .reviewRequestAndReleaseNodes();
+ assertTrue(gpuRole.isAARequestOutstanding());
+ assertEquals(GPU_NODES, gpuRole.getPendingAntiAffineRequests());
+ NodeUpdatedOutcome outcome = addNewNode();
+ assertTrue(outcome.clusterChanged);
+ // one call to cancel
+ assertEquals(1, outcome.operations.size());
+ // and on a review, one more to rebuild
+ assertEquals(1, appState.reviewRequestAndReleaseNodes().size());
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestOutstandingRequestValidation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestOutstandingRequestValidation.java
new file mode 100644
index 0000000..e23b6dc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/appstate/TestOutstandingRequestValidation.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.appstate;
+
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
+import org.apache.slider.server.appmaster.state.ContainerPriority;
+import org.apache.slider.server.appmaster.state.OutstandingRequest;
+import org.apache.slider.test.SliderTestBase;
+import org.junit.Test;
+
+public class TestOutstandingRequestValidation extends SliderTestBase {
+
+ final String[] H1 = hosts("one");
+
+ @Test
+ public void testRelaxedNohostsOrLabels() throws Throwable {
+ createAndValidate(null, null, true);
+ }
+
+ @Test
+ public void testRelaxedLabels() throws Throwable {
+ createAndValidate(null, "gpu", true);
+ }
+
+ @Test
+ public void testNonRelaxedLabels() throws Throwable {
+ expectCreationFailure(null, "gpu", false);
+ }
+
+ @Test
+ public void testRelaxedHostNoLabel() throws Throwable {
+ createAndValidate(H1, "", true);
+ }
+
+ /**
+ * Use varargs for simple list to array conversion
+ * @param hostnames host names
+ * @return
+ */
+ public static String[] hosts(String...hostnames) {
+ return hostnames;
+ }
+
+ void expectCreationFailure(
+ String[] hosts,
+ String labels,
+ boolean relaxLocality) {
+ try {
+ ContainerRequest result = createAndValidate(hosts, labels, relaxLocality);
+ fail("Expected an exception, got " + result);
+ } catch (IllegalArgumentException expected) {
+ assertTrue(expected.toString()
+ .contains("Can't turn off locality relaxation on a request with no " +
+ "location constraints"));
+ }
+ }
+
+
+ AMRMClient.ContainerRequest createAndValidate(
+ String[] hosts,
+ String labels,
+ boolean relaxLocality) {
+ int cores = 1;
+ int memory = 64;
+ int p = 1;
+ Priority pri = ContainerPriority.createPriority(p, !relaxLocality);
+ ContainerRequest issuedRequest =
+ newRequest(pri, hosts, labels, relaxLocality);
+ OutstandingRequest.validateContainerRequest(issuedRequest, p, "");
+ return issuedRequest;
+ }
+
+ AMRMClient.ContainerRequest newRequest(
+ Priority pri,
+ String[] hosts,
+ String labels,
+ boolean relaxLocality) {
+ int cores = 1;
+ int memory = 64;
+ Resource resource = Resource.newInstance(memory, cores);
+ return new AMRMClient.ContainerRequest(resource,
+ hosts,
+ null,
+ pri,
+ relaxLocality,
+ labels);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryAA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryAA.java
new file mode 100644
index 0000000..d7f9bc6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryAA.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.slider.api.proto.Messages;
+import org.apache.slider.api.types.NodeInformation;
+import org.apache.slider.api.types.NodeInformationList;
+import org.apache.slider.api.types.RestTypeMarshalling;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.NodeMap;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.test.SliderTestBase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test anti-affine
+ */
+public class TestRoleHistoryAA extends SliderTestBase {
+
+ List hostnames = Arrays.asList("1", "2", "3");
+ NodeMap nodeMap, gpuNodeMap;
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+
+ public TestRoleHistoryAA() throws BadConfigException {
+ }
+
+
+ @Override
+ public void setup() throws Exception {
+ super.setup();
+ nodeMap = createNodeMap(hostnames, NodeState.RUNNING, "");
+ gpuNodeMap = createNodeMap(hostnames, NodeState.RUNNING, "GPU");
+ }
+
+ @Test
+ public void testFindNodesInFullCluster() throws Throwable {
+ // all three will surface at first
+ verifyResultSize(3, nodeMap.findAllNodesForRole(1, ""));
+ }
+
+ @Test
+ public void testFindNodesInUnhealthyCluster() throws Throwable {
+ // all three will surface at first
+ markNodeOneUnhealthy();
+ verifyResultSize(2, nodeMap.findAllNodesForRole(1, ""));
+ }
+
+ public boolean markNodeOneUnhealthy() {
+ return setNodeState(nodeMap.get("1"), NodeState.UNHEALTHY);
+ }
+
+ protected boolean setNodeState(NodeInstance node, NodeState state) {
+ return node.updateNode(MockFactory.instance.newNodeReport(node.hostname,
+ state, ""));
+ }
+
+ @Test
+ public void testFindNoNodesWrongLabel() throws Throwable {
+ // all three will surface at first
+ verifyResultSize(0, nodeMap.findAllNodesForRole(1, "GPU"));
+ }
+
+ @Test
+ public void testFindSomeNodesSomeLabel() throws Throwable {
+ // all three will surface at first
+ update(nodeMap,
+ Arrays.asList(MockFactory.instance.newNodeReport("1", NodeState.RUNNING,
+ "GPU")));
+ List gpuNodes = nodeMap.findAllNodesForRole(1, "GPU");
+ verifyResultSize(1, gpuNodes);
+ NodeInstance instance = gpuNodes.get(0);
+ instance.getOrCreate(1).onStarting();
+ assertFalse(instance.canHost(1, "GPU"));
+ assertFalse(instance.canHost(1, ""));
+ verifyResultSize(0, nodeMap.findAllNodesForRole(1, "GPU"));
+
+ }
+
+ @Test
+ public void testFindNoNodesRightLabel() throws Throwable {
+ // all three will surface at first
+ verifyResultSize(3, gpuNodeMap.findAllNodesForRole(1, "GPU"));
+ }
+
+ @Test
+ public void testFindNoNodesNoLabel() throws Throwable {
+ // all three will surface at first
+ verifyResultSize(3, gpuNodeMap.findAllNodesForRole(1, ""));
+ }
+
+ @Test
+ public void testFindNoNodesClusterRequested() throws Throwable {
+ // all three will surface at first
+ for (NodeInstance ni : nodeMap.values()) {
+ ni.getOrCreate(1).request();
+ }
+ assertNoAvailableNodes(1);
+ }
+
+ @Test
+ public void testFindNoNodesClusterBusy() throws Throwable {
+ // all three will surface at first
+ for (NodeInstance ni : nodeMap.values()) {
+ ni.getOrCreate(1).request();
+ }
+ assertNoAvailableNodes(1);
+ }
+
+ /**
+ * Tag all nodes as starting, then walk one through a bit
+ * more of its lifecycle
+ */
+ @Test
+ public void testFindNoNodesLifecycle() throws Throwable {
+ // all three will surface at first
+ for (NodeInstance ni : nodeMap.values()) {
+ ni.getOrCreate(1).onStarting();
+ }
+ assertNoAvailableNodes(1);
+
+ // walk one of the nodes through the lifecycle
+ NodeInstance node1 = nodeMap.get("1");
+ assertFalse(node1.canHost(1,""));
+ node1.get(1).onStartCompleted();
+ assertFalse(node1.canHost(1,""));
+ assertNoAvailableNodes(1);
+ node1.get(1).release();
+ assertTrue(node1.canHost(1,""));
+ List list2 =
+ verifyResultSize(1, nodeMap.findAllNodesForRole(1, ""));
+ assertEquals(list2.get(0).hostname, "1");
+
+ // now tag that node as unhealthy and expect it to go away
+ markNodeOneUnhealthy();
+ assertNoAvailableNodes(1);
+ }
+
+ @Test
+ public void testRolesIndependent() throws Throwable {
+ NodeInstance node1 = nodeMap.get("1");
+ NodeEntry role1 = node1.getOrCreate(1);
+ NodeEntry role2 = node1.getOrCreate(2);
+ for (NodeInstance ni : nodeMap.values()) {
+ ni.updateNode(MockFactory.instance.newNodeReport("0", NodeState
+ .UNHEALTHY, ""));
+ }
+ assertNoAvailableNodes(1);
+ assertNoAvailableNodes(2);
+ assertTrue(setNodeState(node1, NodeState.RUNNING));
+ // tag role 1 as busy
+ role1.onStarting();
+ assertNoAvailableNodes(1);
+
+ verifyResultSize(1, nodeMap.findAllNodesForRole(2, ""));
+ assertTrue(node1.canHost(2,""));
+ }
+
+ @Test
+ public void testNodeEntryAvailablity() throws Throwable {
+ NodeEntry entry = new NodeEntry(1);
+ assertTrue(entry.isAvailable());
+ entry.onStarting();
+ assertFalse(entry.isAvailable());
+ entry.onStartCompleted();
+ assertFalse(entry.isAvailable());
+ entry.release();
+ assertTrue(entry.isAvailable());
+ entry.onStarting();
+ assertFalse(entry.isAvailable());
+ entry.onStartFailed();
+ assertTrue(entry.isAvailable());
+ }
+
+ @Test
+ public void testNodeInstanceSerialization() throws Throwable {
+ MockRoleHistory rh2 = new MockRoleHistory(new ArrayList<>());
+ rh2.getOrCreateNodeInstance("localhost");
+ NodeInstance instance = rh2.getOrCreateNodeInstance("localhost");
+ instance.getOrCreate(1).onStartCompleted();
+ Map naming = Collections.singletonMap(1, "manager");
+ NodeInformation ni = instance.serialize(naming);
+ assertEquals(1, ni.entries.get("manager").live);
+ NodeInformation ni2 = rh2.getNodeInformation("localhost", naming);
+ assertEquals(1, ni2.entries.get("manager").live);
+ Map info = rh2.getNodeInformationSnapshot(naming);
+ assertEquals(1, info.get("localhost").entries.get("manager").live);
+ NodeInformationList nil = new NodeInformationList(info.values());
+ assertEquals(1, nil.get(0).entries.get("manager").live);
+
+ Messages.NodeInformationProto nodeInformationProto =
+ RestTypeMarshalling.marshall(ni);
+ Messages.NodeEntryInformationProto entryProto = nodeInformationProto
+ .getEntries(0);
+ assertNotNull(entryProto);
+ assertEquals(1, entryProto.getPriority());
+ NodeInformation unmarshalled =
+ RestTypeMarshalling.unmarshall(nodeInformationProto);
+ assertEquals(unmarshalled.hostname, ni.hostname);
+ assertTrue(unmarshalled.entries.keySet().containsAll(ni.entries.keySet()));
+
+ }
+
+ @Test
+ public void testBuildRolenames() throws Throwable {
+
+ }
+ public List assertNoAvailableNodes(int role) {
+ String label = "";
+ return verifyResultSize(0, nodeMap.findAllNodesForRole(role, label));
+ }
+
+ List verifyResultSize(int size, List list) {
+ if (list.size() != size) {
+ for (NodeInstance ni : list) {
+ log.error(ni.toFullString());
+ }
+ }
+ assertEquals(size, list.size());
+ return list;
+ }
+
+ NodeMap createNodeMap(List nodeReports)
+ throws BadConfigException {
+ NodeMap nodeMap = new NodeMap(1);
+ update(nodeMap, nodeReports);
+ return nodeMap;
+ }
+
+ protected boolean update(NodeMap nodeMap, List nodeReports) {
+ return nodeMap.buildOrUpdate(nodeReports);
+ }
+
+ NodeMap createNodeMap(List hosts, NodeState state,
+ String label) throws BadConfigException {
+ return createNodeMap(MockFactory.instance.createNodeReports(hosts, state,
+ label));
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryContainerEvents.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryContainerEvents.java
new file mode 100644
index 0000000..4f3bef8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryContainerEvents.java
@@ -0,0 +1,446 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockContainer;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockNodeId;
+import org.apache.slider.server.appmaster.state.ContainerOutcome;
+import org.apache.slider.server.appmaster.state.ContainerPriority;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.NodeMap;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Test container events at the role history level -one below
+ * the App State
+ */
+public class TestRoleHistoryContainerEvents extends BaseMockAppStateTest {
+
+ @Override
+ public String getTestName() {
+ return "TestRoleHistoryContainerEvents";
+ }
+
+ NodeInstance age1Active4 = nodeInstance(1, 4, 0, 0);
+ NodeInstance age2Active2 = nodeInstance(2, 2, 0, 1);
+ NodeInstance age3Active0 = nodeInstance(3, 0, 0, 0);
+ NodeInstance age4Active1 = nodeInstance(4, 1, 0, 0);
+ NodeInstance age2Active0 = nodeInstance(2, 0, 0, 0);
+ NodeInstance empty = new NodeInstance("empty", MockFactory.ROLE_COUNT);
+
+ String roleName = "test";
+
+ List nodes = Arrays.asList(age2Active2, age2Active0,
+ age4Active1, age1Active4, age3Active0);
+ RoleHistory roleHistory;
+
+ Resource resource;
+
+ AMRMClient.ContainerRequest requestContainer(RoleStatus roleStatus) {
+ return roleHistory.requestContainerForRole(roleStatus).getIssuedRequest();
+ }
+
+ @Override
+ public void setup() throws Exception {
+ super.setup();
+ roleHistory = appState.getRoleHistory();
+ roleHistory.insert(nodes);
+ roleHistory.buildRecentNodeLists();
+ resource = Resource.newInstance(ResourceKeys.DEF_YARN_CORES,
+ ResourceKeys.DEF_YARN_MEMORY);
+ }
+
+ @Test
+ public void testFindAndCreate() throws Throwable {
+ int role = 0;
+ RoleStatus roleStatus = appState.lookupRoleStatus(role);
+
+ AMRMClient.ContainerRequest request =
+ requestContainer(roleStatus);
+
+ List nodes = request.getNodes();
+ assertNotNull(nodes);
+ assertEquals(1, nodes.size());
+ String hostname = nodes.get(0);
+ assertEquals(hostname, age3Active0.hostname);
+
+ //build a container
+ MockContainer container = factory.newContainer();
+ container.setNodeId(new MockNodeId(hostname, 0));
+ container.setPriority(request.getPriority());
+ roleHistory.onContainerAssigned(container);
+
+ NodeMap nodemap = roleHistory.cloneNodemap();
+ NodeInstance allocated = nodemap.get(hostname);
+ NodeEntry roleEntry = allocated.get(role);
+ assertEquals(1, roleEntry.getStarting());
+ assertFalse(roleEntry.isAvailable());
+ RoleInstance ri = new RoleInstance(container);
+ //start it
+ roleHistory.onContainerStartSubmitted(container, ri);
+ //later, declare that it started
+ roleHistory.onContainerStarted(container);
+ assertEquals(0, roleEntry.getStarting());
+ assertFalse(roleEntry.isAvailable());
+ assertEquals(1, roleEntry.getActive());
+ assertEquals(1, roleEntry.getLive());
+ }
+
+ @Test
+ public void testCreateAndRelease() throws Throwable {
+ int role = 1;
+ RoleStatus roleStatus = appState.lookupRoleStatus(role);
+
+ //verify it is empty
+ assertTrue(roleHistory.listActiveNodes(role).isEmpty());
+
+ AMRMClient.ContainerRequest request =
+ requestContainer(roleStatus);
+
+ assertNull(request.getNodes());
+
+ //pick an idle host
+ String hostname = age3Active0.hostname;
+
+ //build a container
+ MockContainer container = factory.newContainer(new MockNodeId(hostname,
+ 0), request.getPriority());
+ roleHistory.onContainerAssigned(container);
+
+ NodeMap nodemap = roleHistory.cloneNodemap();
+ NodeInstance allocated = nodemap.get(hostname);
+ NodeEntry roleEntry = allocated.get(role);
+ assertEquals(1, roleEntry.getStarting());
+ assertFalse(roleEntry.isAvailable());
+ RoleInstance ri = new RoleInstance(container);
+ //start it
+ roleHistory.onContainerStartSubmitted(container, ri);
+ //later, declare that it started
+ roleHistory.onContainerStarted(container);
+ assertEquals(0, roleEntry.getStarting());
+ assertFalse(roleEntry.isAvailable());
+ assertEquals(1, roleEntry.getActive());
+ assertEquals(1, roleEntry.getLive());
+
+ // now pick that instance to destroy
+ List activeNodes = roleHistory.listActiveNodes(role);
+
+
+ assertEquals(1, activeNodes.size());
+ NodeInstance target = activeNodes.get(0);
+ assertEquals(target, allocated);
+ roleHistory.onContainerReleaseSubmitted(container);
+ assertEquals(1, roleEntry.getReleasing());
+ assertEquals(1, roleEntry.getLive());
+ assertEquals(0, roleEntry.getActive());
+
+ // release completed
+ roleHistory.onReleaseCompleted(container);
+ assertEquals(0, roleEntry.getReleasing());
+ assertEquals(0, roleEntry.getLive());
+ assertEquals(0, roleEntry.getActive());
+
+ // verify it is empty
+ assertTrue(roleHistory.listActiveNodes(role).isEmpty());
+
+ // ask for a container and expect to get the recently released one
+ AMRMClient.ContainerRequest request2 =
+ requestContainer(roleStatus);
+
+ List nodes2 = request2.getNodes();
+ assertNotNull(nodes2);
+ String hostname2 = nodes2.get(0);
+
+ //pick an idle host
+ assertEquals(hostname2, age3Active0.hostname);
+ }
+
+
+ @Test
+ public void testStartWithoutWarning() throws Throwable {
+ int role = 0;
+ //pick an idle host
+ String hostname = age3Active0.hostname;
+ //build a container
+ MockContainer container = factory.newContainer(
+ new MockNodeId(hostname, 0),
+ ContainerPriority.createPriority(0, false));
+
+ NodeMap nodemap = roleHistory.cloneNodemap();
+ NodeInstance allocated = nodemap.get(hostname);
+ NodeEntry roleEntry = allocated.get(role);
+
+ //tell RH that it started
+ roleHistory.onContainerStarted(container);
+ assertEquals(0, roleEntry.getStarting());
+ assertFalse(roleEntry.isAvailable());
+ assertEquals(1, roleEntry.getActive());
+ assertEquals(1, roleEntry.getLive());
+ }
+
+ @Test
+ public void testStartFailed() throws Throwable {
+ int role = 0;
+ RoleStatus roleStatus = appState.lookupRoleStatus(role);
+
+ AMRMClient.ContainerRequest request =
+ requestContainer(roleStatus);
+
+ String hostname = request.getNodes().get(0);
+ assertEquals(hostname, age3Active0.hostname);
+
+ //build a container
+ MockContainer container = factory.newContainer(new MockNodeId(hostname,
+ 0), request.getPriority());
+ roleHistory.onContainerAssigned(container);
+
+ NodeMap nodemap = roleHistory.cloneNodemap();
+ NodeInstance allocated = nodemap.get(hostname);
+ NodeEntry roleEntry = allocated.get(role);
+ assertEquals(1, roleEntry.getStarting());
+ assertFalse(roleEntry.isAvailable());
+ RoleInstance ri = new RoleInstance(container);
+ //start it
+ roleHistory.onContainerStartSubmitted(container, ri);
+ //later, declare that it failed on startup
+ assertFalse(roleHistory.onNodeManagerContainerStartFailed(container));
+ assertEquals(0, roleEntry.getStarting());
+ assertEquals(1, roleEntry.getStartFailed());
+ assertEquals(1, roleEntry.getFailed());
+ assertTrue(roleEntry.isAvailable());
+ assertEquals(0, roleEntry.getActive());
+ assertEquals(0, roleEntry.getLive());
+ }
+
+ @Test
+ public void testStartFailedWithoutWarning() throws Throwable {
+ int role = 0;
+ RoleStatus roleStatus = appState.lookupRoleStatus(role);
+
+ AMRMClient.ContainerRequest request =
+ requestContainer(roleStatus);
+
+ String hostname = request.getNodes().get(0);
+ assertEquals(hostname, age3Active0.hostname);
+
+ //build a container
+ MockContainer container = factory.newContainer();
+ container.setNodeId(new MockNodeId(hostname, 0));
+ container.setPriority(request.getPriority());
+
+ NodeMap nodemap = roleHistory.cloneNodemap();
+ NodeInstance allocated = nodemap.get(hostname);
+ NodeEntry roleEntry = allocated.get(role);
+
+ assertFalse(roleHistory.onNodeManagerContainerStartFailed(container));
+ assertEquals(0, roleEntry.getStarting());
+ assertEquals(1, roleEntry.getStartFailed());
+ assertEquals(1, roleEntry.getFailed());
+ assertTrue(roleEntry.isAvailable());
+ assertEquals(0, roleEntry.getActive());
+ assertEquals(0, roleEntry.getLive());
+ }
+
+ @Test
+ public void testContainerFailed() throws Throwable {
+ describe("fail a container without declaring it as starting");
+
+ int role = 0;
+ RoleStatus roleStatus = appState.lookupRoleStatus(role);
+
+ AMRMClient.ContainerRequest request =
+ requestContainer(roleStatus);
+
+ String hostname = request.getNodes().get(0);
+ assertEquals(hostname, age3Active0.hostname);
+
+ //build a container
+ MockContainer container = factory.newContainer();
+ container.setNodeId(new MockNodeId(hostname, 0));
+ container.setPriority(request.getPriority());
+ roleHistory.onContainerAssigned(container);
+
+ NodeMap nodemap = roleHistory.cloneNodemap();
+ NodeInstance allocated = nodemap.get(hostname);
+ NodeEntry roleEntry = allocated.get(role);
+ assertEquals(1, roleEntry.getStarting());
+ assertFalse(roleEntry.isAvailable());
+ RoleInstance ri = new RoleInstance(container);
+ //start it
+ roleHistory.onContainerStartSubmitted(container, ri);
+ roleHistory.onContainerStarted(container);
+
+ //later, declare that it failed
+ roleHistory.onFailedContainer(
+ container,
+ false,
+ ContainerOutcome.Failed);
+ assertEquals(0, roleEntry.getStarting());
+ assertTrue(roleEntry.isAvailable());
+ assertEquals(0, roleEntry.getActive());
+ assertEquals(0, roleEntry.getLive());
+ }
+
+ @Test
+ public void testContainerFailedWithoutWarning() throws Throwable {
+ describe( "fail a container without declaring it as starting");
+ int role = 0;
+ RoleStatus roleStatus = appState.lookupRoleStatus(role);
+
+ AMRMClient.ContainerRequest request =
+ requestContainer(roleStatus);
+
+ String hostname = request.getNodes().get(0);
+ assertEquals(hostname, age3Active0.hostname);
+
+ //build a container
+ MockContainer container = factory.newContainer();
+ container.setNodeId(new MockNodeId(hostname, 0));
+ container.setPriority(request.getPriority());
+
+
+ NodeMap nodemap = roleHistory.cloneNodemap();
+ NodeInstance allocated = nodemap.get(hostname);
+ NodeEntry roleEntry = allocated.get(role);
+ assertTrue(roleEntry.isAvailable());
+ roleHistory.onFailedContainer(
+ container,
+ false,
+ ContainerOutcome.Failed);
+ assertEquals(0, roleEntry.getStarting());
+ assertEquals(1, roleEntry.getFailed());
+ assertTrue(roleEntry.isAvailable());
+ assertEquals(0, roleEntry.getActive());
+ assertEquals(0, roleEntry.getLive());
+ }
+
+ @Test
+ public void testAllocationListPrep() throws Throwable {
+ describe("test prepareAllocationList");
+ int role = 0;
+ RoleStatus roleStatus = appState.lookupRoleStatus(role);
+
+ AMRMClient.ContainerRequest request =
+ requestContainer(roleStatus);
+
+ String hostname = request.getNodes().get(0);
+ assertEquals(hostname, age3Active0.hostname);
+
+ MockContainer container1 = factory.newContainer();
+ container1.setNodeId(new MockNodeId(hostname, 0));
+ container1.setPriority(Priority.newInstance(0));
+
+ MockContainer container2 = factory.newContainer();
+ container2.setNodeId(new MockNodeId(hostname, 0));
+ container2.setPriority(Priority.newInstance(1));
+
+ // put containers in List with role == 1 first
+ List containers = Arrays.asList((Container) container2,
+ (Container) container1);
+ List sortedContainers = roleHistory.prepareAllocationList(
+ containers);
+
+ // verify that the first container has role == 0 after sorting
+ MockContainer c1 = (MockContainer) sortedContainers.get(0);
+ assertEquals(0, c1.getPriority().getPriority());
+ MockContainer c2 = (MockContainer) sortedContainers.get(1);
+ assertEquals(1, c2.getPriority().getPriority());
+ }
+
+ @Test
+ public void testNodeUpdated() throws Throwable {
+ describe("fail a node");
+
+ int role = 0;
+ RoleStatus roleStatus = appState.lookupRoleStatus(role);
+
+ AMRMClient.ContainerRequest request =
+ roleHistory.requestContainerForRole(roleStatus).getIssuedRequest();
+
+ String hostname = request.getNodes().get(0);
+ assertEquals(age3Active0.hostname, hostname);
+
+ // build a container
+ MockContainer container = factory.newContainer(new MockNodeId(hostname,
+ 0), request.getPriority());
+
+ roleHistory.onContainerAssigned(container);
+
+ NodeMap nodemap = roleHistory.cloneNodemap();
+ NodeInstance allocated = nodemap.get(hostname);
+ NodeEntry roleEntry = allocated.get(role);
+ assertEquals(1, roleEntry.getStarting());
+ assertFalse(roleEntry.isAvailable());
+ RoleInstance ri = new RoleInstance(container);
+ // start it
+ roleHistory.onContainerStartSubmitted(container, ri);
+ roleHistory.onContainerStarted(container);
+
+ int startSize = nodemap.size();
+
+ // now send a list of updated (failed) nodes event
+ List nodesUpdated = new ArrayList<>();
+ NodeReport nodeReport = NodeReport.newInstance(
+ NodeId.newInstance(hostname, 0),
+ NodeState.LOST,
+ null, null, null, null, 1, null, 0);
+ nodesUpdated.add(nodeReport);
+ roleHistory.onNodesUpdated(nodesUpdated);
+
+ nodemap = roleHistory.cloneNodemap();
+ int endSize = nodemap.size();
+ // as even unused nodes are added to the list, we expect the map size to be >1
+ assertTrue(startSize <= endSize);
+ assertNotNull(nodemap.get(hostname));
+ assertFalse(nodemap.get(hostname).isOnline());
+
+ // add a failure of a node we've never head of
+ String newhost = "newhost";
+ nodesUpdated = Arrays.asList(
+ NodeReport.newInstance(
+ NodeId.newInstance(newhost, 0),
+ NodeState.LOST,
+ null, null, null, null, 1, null, 0)
+ );
+ roleHistory.onNodesUpdated(nodesUpdated);
+
+ NodeMap nodemap2 = roleHistory.cloneNodemap();
+ assertNotNull(nodemap2.get(newhost));
+ assertFalse(nodemap2.get(newhost).isOnline());
+
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryFindNodesForNewInstances.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryFindNodesForNewInstances.java
new file mode 100644
index 0000000..a7a9134
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryFindNodesForNewInstances.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.state.ContainerOutcome;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Testing finding nodes for new instances.
+ *
+ * This stresses the non-AA codepath
+ */
+public class TestRoleHistoryFindNodesForNewInstances extends BaseMockAppStateTest {
+
+ public TestRoleHistoryFindNodesForNewInstances() throws BadConfigException {
+ }
+
+ @Override
+ public String getTestName() {
+ return "TestFindNodesForNewInstances";
+ }
+
+ NodeInstance age1Active4 = nodeInstance(1, 4, 0, 0);
+ NodeInstance age2Active2 = nodeInstance(2, 2, 0, 1);
+ NodeInstance age3Active0 = nodeInstance(3, 0, 0, 0);
+ NodeInstance age4Active1 = nodeInstance(4, 1, 0, 0);
+ NodeInstance age2Active0 = nodeInstance(2, 0, 0, 0);
+ NodeInstance empty = new NodeInstance("empty", MockFactory.ROLE_COUNT);
+
+ List nodes = Arrays.asList(age2Active2, age2Active0,
+ age4Active1, age1Active4, age3Active0);
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+
+ String roleName = "test";
+ RoleStatus roleStat = new RoleStatus(new ProviderRole(roleName, 0));
+ RoleStatus roleStat2 = new RoleStatus(new ProviderRole(roleName, 2));
+
+ @Before
+ public void setupNodeMap() {
+ roleHistory.insert(nodes);
+ roleHistory.buildRecentNodeLists();
+ }
+
+ public List findNodes(int count) {
+ return findNodes(count, roleStat);
+ }
+
+ public List findNodes(int count, RoleStatus roleStatus) {
+ List found = new ArrayList<>();
+ for (int i = 0; i < count; i++) {
+ NodeInstance f = roleHistory.findRecentNodeForNewInstance(roleStatus);
+ if (f != null) {
+ found.add(f);
+ };
+ }
+ return found;
+ }
+
+ @Test
+ public void testFind1NodeR0() throws Throwable {
+ NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
+ log.info("found: {}", found);
+ assertTrue(Arrays.asList(age3Active0).contains(found));
+ }
+
+ @Test
+ public void testFind2NodeR0() throws Throwable {
+ NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
+ log.info("found: {}", found);
+ assertTrue(Arrays.asList(age2Active0, age3Active0).contains(found));
+ NodeInstance found2 = roleHistory.findRecentNodeForNewInstance(roleStat);
+ log.info("found: {}", found2);
+ assertTrue(Arrays.asList(age2Active0, age3Active0).contains(found2));
+ assertNotEquals(found, found2);
+ }
+
+ @Test
+ public void testFind3NodeR0ReturnsNull() throws Throwable {
+ assertEquals(2, findNodes(2).size());
+ NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
+ assertNull(found);
+ }
+
+ @Test
+ public void testFindNodesOneEntry() throws Throwable {
+ List nodes = findNodes(4, roleStat2);
+ assertEquals(0, nodes.size());
+ }
+
+ @Test
+ public void testFindNodesIndependent() throws Throwable {
+ assertEquals(2, findNodes(2).size());
+ roleHistory.dump();
+ assertEquals(0, findNodes(3, roleStat2).size());
+ }
+
+ @Test
+ public void testFindNodesFallsBackWhenUsed() throws Throwable {
+ // mark age2 and active 0 as busy, expect a null back
+ age2Active0.get(0).onStartCompleted();
+ assertNotEquals(0, age2Active0.getActiveRoleInstances(0));
+ age3Active0.get(0).onStartCompleted();
+ assertNotEquals(0, age3Active0.getActiveRoleInstances(0));
+ NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
+ if (found != null) {
+ log.info(found.toFullString());
+ }
+ assertNull(found);
+ }
+ @Test
+ public void testFindNodesSkipsFailingNode() throws Throwable {
+ // mark age2 and active 0 as busy, expect a null back
+
+ NodeEntry entry0 = age2Active0.get(0);
+ entry0.containerCompleted(
+ false,
+ ContainerOutcome.Failed);
+ assertTrue(entry0.getFailed() > 0);
+ assertTrue(entry0.getFailedRecently() > 0);
+ entry0.containerCompleted(
+ false,
+ ContainerOutcome.Failed);
+ assertFalse(age2Active0.exceedsFailureThreshold(roleStat));
+ // set failure to 1
+ roleStat.getProviderRole().nodeFailureThreshold = 1;
+ // threshold is now exceeded
+ assertTrue(age2Active0.exceedsFailureThreshold(roleStat));
+
+ // get the role & expect age3 to be picked up, even though it is older
+ NodeInstance found = roleHistory.findRecentNodeForNewInstance(roleStat);
+ assertEquals(age3Active0, found);
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryNIComparators.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryNIComparators.java
new file mode 100644
index 0000000..d595dfe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryNIComparators.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Unit test to verify the comparators sort as expected
+ */
+public class TestRoleHistoryNIComparators extends BaseMockAppStateTest {
+
+ NodeInstance age1Active4 = nodeInstance(1001, 4, 0, 0);
+ NodeInstance age2Active2 = nodeInstance(1002, 2, 0, 0);
+ NodeInstance age3Active0 = nodeInstance(1003, 0, 0, 0);
+ NodeInstance age4Active1 = nodeInstance(1004, 1, 0, 0);
+ NodeInstance empty = new NodeInstance("empty", MockFactory.ROLE_COUNT);
+ NodeInstance age6failing = nodeInstance(1006, 0, 0, 0);
+ NodeInstance age1failing = nodeInstance(1001, 0, 0, 0);
+
+ List nodes = Arrays.asList(age2Active2, age4Active1,
+ age1Active4, age3Active0);
+ List nodesPlusEmpty = Arrays.asList(age2Active2, age4Active1,
+ age1Active4, age3Active0, empty);
+ List allnodes = Arrays.asList(age6failing, age2Active2,
+ age4Active1, age1Active4, age3Active0, age1failing);
+
+ @Before
+ public void setup() {
+ age6failing.get(0).setFailedRecently(2);
+ age1failing.get(0).setFailedRecently(1);
+ }
+
+ @Override
+ public String getTestName() {
+ return "TestNIComparators";
+ }
+
+ @Test
+ public void testPreferred() throws Throwable {
+ Collections.sort(nodes, new NodeInstance.Preferred(0));
+ assertListEquals(nodes, Arrays.asList(age4Active1, age3Active0,
+ age2Active2, age1Active4));
+ }
+
+ /**
+ * The preferred sort still includes failures; up to next phase in process
+ * to handle that
+ * @throws Throwable
+ */
+ @Test
+ public void testPreferredWithFailures() throws Throwable {
+ Collections.sort(allnodes, new NodeInstance.Preferred(0));
+ assertEquals(allnodes.get(0), age6failing);
+ assertEquals(allnodes.get(1), age4Active1);
+ }
+
+ @Test
+ public void testPreferredComparatorDowngradesFailures() throws Throwable {
+ NodeInstance.Preferred preferred = new NodeInstance.Preferred(0);
+ assertEquals(-1, preferred.compare(age6failing, age1failing));
+ assertEquals(1, preferred.compare(age1failing, age6failing));
+ }
+
+ @Test
+ public void testNewerThanNoRole() throws Throwable {
+ Collections.sort(nodesPlusEmpty, new NodeInstance.Preferred(0));
+ assertListEquals(nodesPlusEmpty, Arrays.asList(age4Active1, age3Active0,
+ age2Active2, age1Active4, empty));
+ }
+
+ @Test
+ public void testMoreActiveThan() throws Throwable {
+
+ Collections.sort(nodes, new NodeInstance.MoreActiveThan(0));
+ assertListEquals(nodes, Arrays.asList(age1Active4, age2Active2,
+ age4Active1, age3Active0));
+ }
+
+ @Test
+ public void testMoreActiveThanEmpty() throws Throwable {
+
+ Collections.sort(nodesPlusEmpty, new NodeInstance.MoreActiveThan(0));
+ assertListEquals(nodesPlusEmpty, Arrays.asList(age1Active4, age2Active2,
+ age4Active1, age3Active0, empty));
+ }
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java
new file mode 100644
index 0000000..7c853fe
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryOutstandingRequestTracker.java
@@ -0,0 +1,374 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
+import org.apache.hadoop.yarn.util.resource.Resources;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockAppState;
+import org.apache.slider.server.appmaster.model.mock.MockContainer;
+import org.apache.slider.server.appmaster.model.mock.MockNodeId;
+import org.apache.slider.server.appmaster.model.mock.MockPriority;
+import org.apache.slider.server.appmaster.model.mock.MockResource;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.CancelSingleRequest;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.apache.slider.server.appmaster.state.AppStateBindingInfo;
+import org.apache.slider.server.appmaster.state.ContainerAllocationOutcome;
+import org.apache.slider.server.appmaster.state.ContainerAllocationResults;
+import org.apache.slider.server.appmaster.state.ContainerPriority;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.OutstandingRequest;
+import org.apache.slider.server.appmaster.state.OutstandingRequestTracker;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public class TestRoleHistoryOutstandingRequestTracker extends BaseMockAppStateTest {
+
+ public static final String WORKERS_LABEL = "workers";
+ NodeInstance host1 = new NodeInstance("host1", 3);
+ NodeInstance host2 = new NodeInstance("host2", 3);
+ MockResource resource = factory.newResource(48, 1);
+
+ OutstandingRequestTracker tracker = new OutstandingRequestTracker();
+
+ public static final ProviderRole WORKER = new ProviderRole(
+ "worker",
+ 5,
+ PlacementPolicy.NONE,
+ 2,
+ 1,
+ WORKERS_LABEL);
+
+ @Override
+ public AppStateBindingInfo buildBindingInfo() {
+ AppStateBindingInfo bindingInfo = super.buildBindingInfo();
+ bindingInfo.roles.add(WORKER);
+ return bindingInfo;
+ }
+
+ @Test
+ public void testAddRetrieveEntry() throws Throwable {
+ OutstandingRequest request = tracker.newRequest(host1, 0);
+ assertEquals(tracker.lookupPlacedRequest(0, "host1"), request);
+ assertEquals(tracker.removePlacedRequest(request), request);
+ assertNull(tracker.lookupPlacedRequest(0, "host1"));
+ }
+
+ @Test
+ public void testAddCompleteEntry() throws Throwable {
+ OutstandingRequest req1 = tracker.newRequest(host1, 0);
+ req1.buildContainerRequest(resource, getRole0Status(), 0);
+
+ tracker.newRequest(host2, 0).buildContainerRequest(resource,
+ getRole0Status(), 0);
+ tracker.newRequest(host1, 1).buildContainerRequest(resource,
+ getRole0Status(), 0);
+
+ ContainerAllocationResults allocation = tracker.onContainerAllocated(1,
+ "host1", null);
+ assertEquals(allocation.outcome, ContainerAllocationOutcome.Placed);
+ assertTrue(allocation.operations.get(0) instanceof CancelSingleRequest);
+
+ assertNull(tracker.lookupPlacedRequest(1, "host1"));
+ assertNotNull(tracker.lookupPlacedRequest(0, "host1"));
+ }
+
+ @Test
+ public void testResetOpenRequests() throws Throwable {
+ OutstandingRequest req1 = tracker.newRequest(null, 0);
+ assertFalse(req1.isLocated());
+ tracker.newRequest(host1, 0);
+ List openRequests = tracker.listOpenRequests();
+ assertEquals(1, openRequests.size());
+ tracker.resetOutstandingRequests(0);
+ assertTrue(tracker.listOpenRequests().isEmpty());
+ assertTrue(tracker.listPlacedRequests().isEmpty());
+ }
+
+ @Test
+ public void testRemoveOpenRequestUnissued() throws Throwable {
+ OutstandingRequest req1 = tracker.newRequest(null, 0);
+ req1.buildContainerRequest(resource, getRole0Status(), 0);
+ assertEquals(1, tracker.listOpenRequests().size());
+ MockContainer c1 = factory.newContainer(null, new MockPriority(0));
+ c1.setResource(resource);
+
+ ContainerAllocationResults allocation =
+ tracker.onContainerAllocated(0, "host1", c1);
+ ContainerAllocationOutcome outcome = allocation.outcome;
+ assertEquals(outcome, ContainerAllocationOutcome.Unallocated);
+ assertTrue(allocation.operations.isEmpty());
+ assertEquals(1, tracker.listOpenRequests().size());
+ }
+
+ @Test
+ public void testIssuedOpenRequest() throws Throwable {
+ OutstandingRequest req1 = tracker.newRequest(null, 0);
+ req1.buildContainerRequest(resource, getRole0Status(), 0);
+ assertEquals(1, tracker.listOpenRequests().size());
+
+ int pri = ContainerPriority.buildPriority(0, false);
+ assertTrue(pri > 0);
+ MockNodeId nodeId = factory.newNodeId("hostname-1");
+ MockContainer c1 = factory.newContainer(nodeId, new MockPriority(pri));
+
+ c1.setResource(resource);
+
+ ContainerRequest issued = req1.getIssuedRequest();
+ assertEquals(issued.getCapability(), resource);
+ assertEquals(issued.getPriority().getPriority(), c1.getPriority()
+ .getPriority());
+ assertTrue(req1.resourceRequirementsMatch(resource));
+
+ ContainerAllocationResults allocation =
+ tracker.onContainerAllocated(0, nodeId.getHost(), c1);
+ assertEquals(0, tracker.listOpenRequests().size());
+ assertTrue(allocation.operations.get(0) instanceof CancelSingleRequest);
+
+ assertEquals(allocation.outcome, ContainerAllocationOutcome.Open);
+ assertEquals(allocation.origin, req1);
+ }
+
+ @Test
+ public void testResetEntries() throws Throwable {
+ tracker.newRequest(host1, 0);
+ tracker.newRequest(host2, 0);
+ tracker.newRequest(host1, 1);
+ List canceled = tracker.resetOutstandingRequests(0);
+ assertEquals(2, canceled.size());
+ assertTrue(canceled.contains(host1));
+ assertTrue(canceled.contains(host2));
+ assertNotNull(tracker.lookupPlacedRequest(1, "host1"));
+ assertNull(tracker.lookupPlacedRequest(0, "host1"));
+ canceled = tracker.resetOutstandingRequests(0);
+ assertEquals(0, canceled.size());
+ assertEquals(1, tracker.resetOutstandingRequests(1).size());
+ }
+
+ @Test
+ public void testEscalation() throws Throwable {
+
+ // first request: default placement
+ assertEquals(getRole0Status().getPlacementPolicy(), PlacementPolicy.DEFAULT);
+ Resource res0 = newResource(getRole0Status());
+ OutstandingRequest outstanding0 = tracker.newRequest(host1,
+ getRole0Status().getKey());
+ ContainerRequest initialRequest =
+ outstanding0.buildContainerRequest(res0, getRole0Status
+ (), 0);
+ assertNotNull(outstanding0.getIssuedRequest());
+ assertTrue(outstanding0.isLocated());
+ assertFalse(outstanding0.isEscalated());
+ assertFalse(initialRequest.getRelaxLocality());
+ assertEquals(1, tracker.listPlacedRequests().size());
+
+ // second. This one doesn't get launched. This is to verify that the escalation
+ // process skips entries which are in the list but have not been issued.
+ // ...which can be a race condition between request issuance & escalation.
+ // (not one observed outside test authoring, but retained for completeness)
+ Resource res2 = newResource(getRole2Status());
+ OutstandingRequest outstanding2 = tracker.newRequest(host1,
+ getRole2Status().getKey());
+
+ // simulate some time escalation of role 1 MUST now be triggered
+ long interval = getRole0Status().getPlacementTimeoutSeconds() * 1000 + 500;
+ long now = interval;
+ final List escalations = tracker
+ .escalateOutstandingRequests(now);
+
+ assertTrue(outstanding0.isEscalated());
+ assertFalse(outstanding2.isEscalated());
+
+ // two entries
+ assertEquals(2, escalations.size());
+ AbstractRMOperation e1 = escalations.get(0);
+ assertTrue(e1 instanceof CancelSingleRequest);
+ final CancelSingleRequest cancel = (CancelSingleRequest) e1;
+ assertEquals(initialRequest, cancel.getRequest());
+ AbstractRMOperation e2 = escalations.get(1);
+ assertTrue(e2 instanceof ContainerRequestOperation);
+ ContainerRequestOperation escRequest = (ContainerRequestOperation) e2;
+ assertTrue(escRequest.getRequest().getRelaxLocality());
+
+ // build that second request from an anti-affine entry
+ // these get placed as well
+ now += interval;
+ ContainerRequest containerReq2 =
+ outstanding2.buildContainerRequest(res2, getRole2Status(), now);
+ // escalate a little bit more
+ final List escalations2 = tracker
+ .escalateOutstandingRequests(now);
+ // and expect no new entries
+ assertEquals(0, escalations2.size());
+
+ // go past the role2 timeout
+ now += getRole2Status().getPlacementTimeoutSeconds() * 1000 + 500;
+ // escalate a little bit more
+ final List escalations3 = tracker
+ .escalateOutstandingRequests(now);
+ // and expect another escalation
+ assertEquals(2, escalations3.size());
+ assertTrue(outstanding2.isEscalated());
+
+ // finally add a strict entry to the mix
+ Resource res3 = newResource(getRole1Status());
+ OutstandingRequest outstanding3 = tracker.newRequest(host1,
+ getRole1Status().getKey());
+
+ final ProviderRole providerRole1 = getRole1Status().getProviderRole();
+ assertEquals(providerRole1.placementPolicy, PlacementPolicy.STRICT);
+ now += interval;
+ assertFalse(outstanding3.mayEscalate());
+ final List escalations4 = tracker
+ .escalateOutstandingRequests(now);
+ assertTrue(escalations4.isEmpty());
+
+ }
+
+ /**
+ * If the placement does include a label, the initial request must
+ * not include it.
+ * The escalation request will contain the label, while
+ * leaving out the node list.
+ * retains the node list, but sets relaxLocality==true
+ * @throws Throwable
+ */
+ @Test
+ public void testRequestLabelledPlacement() throws Throwable {
+ NodeInstance ni = new NodeInstance("host1", 0);
+ OutstandingRequest req1 = tracker.newRequest(ni, 0);
+ Resource resource = factory.newResource(48, 1);
+
+ RoleStatus workerRole = lookupRole(WORKER.name);
+ // initial request
+ ContainerRequest yarnRequest =
+ req1.buildContainerRequest(resource, workerRole, 0);
+ assertEquals(req1.label, WORKERS_LABEL);
+
+ assertNull(yarnRequest.getNodeLabelExpression());
+ assertFalse(yarnRequest.getRelaxLocality());
+ // escalation
+ ContainerRequest yarnRequest2 = req1.escalate();
+ assertNull(yarnRequest2.getNodes());
+ assertTrue(yarnRequest2.getRelaxLocality());
+ assertEquals(yarnRequest2.getNodeLabelExpression(), WORKERS_LABEL);
+ }
+
+ /**
+ * If the placement doesnt include a label, then the escalation request
+ * retains the node list, but sets relaxLocality==true
+ * @throws Throwable
+ */
+ @Test
+ public void testRequestUnlabelledPlacement() throws Throwable {
+ NodeInstance ni = new NodeInstance("host1", 0);
+ OutstandingRequest req1 = tracker.newRequest(ni, 0);
+ Resource resource = factory.newResource(48, 1);
+
+ // initial request
+ ContainerRequest yarnRequest = req1.buildContainerRequest(resource,
+ getRole0Status(), 0);
+ assertNotNull(yarnRequest.getNodes());
+ assertTrue(SliderUtils.isUnset(yarnRequest.getNodeLabelExpression()));
+ assertFalse(yarnRequest.getRelaxLocality());
+ ContainerRequest yarnRequest2 = req1.escalate();
+ assertNotNull(yarnRequest2.getNodes());
+ assertTrue(yarnRequest2.getRelaxLocality());
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testAARequestNoNodes() throws Throwable {
+ tracker.newAARequest(getRole0Status().getKey(), new ArrayList<>(), "");
+ }
+
+ @Test
+ public void testAARequest() throws Throwable {
+ int role0 = getRole0Status().getKey();
+ OutstandingRequest request = tracker.newAARequest(role0, Arrays
+ .asList(host1), "");
+ assertEquals(host1.hostname, request.hostname);
+ assertFalse(request.isLocated());
+ }
+
+ @Test
+ public void testAARequestPair() throws Throwable {
+ int role0 = getRole0Status().getKey();
+ OutstandingRequest request = tracker.newAARequest(role0, Arrays.asList(host1,
+ host2), "");
+ assertEquals(host1.hostname, request.hostname);
+ assertFalse(request.isLocated());
+ ContainerRequest yarnRequest = request.buildContainerRequest(
+ getRole0Status().copyResourceRequirements(new MockResource(0, 0)),
+ getRole0Status(),
+ 0);
+ assertFalse(yarnRequest.getRelaxLocality());
+ assertFalse(request.mayEscalate());
+
+ assertEquals(2, yarnRequest.getNodes().size());
+ }
+
+ @Test
+ public void testBuildResourceRequirements() throws Throwable {
+ // Store original values
+ ConfTreeOperations resources = appState.getResourcesSnapshot();
+ String origMem = resources.getComponentOpt(getRole0Status().getGroup(),
+ ResourceKeys.YARN_MEMORY, null);
+ String origVcores = resources.getComponentOpt(getRole0Status().getGroup(),
+ ResourceKeys.YARN_CORES, null);
+
+ // Resource values to be used for this test
+ int testMem = 32768;
+ int testVcores = 2;
+ resources.setComponentOpt(getRole0Status().getGroup(), ResourceKeys.YARN_MEMORY,
+ Integer.toString(testMem));
+ resources.setComponentOpt(getRole0Status().getGroup(), ResourceKeys.YARN_CORES,
+ Integer.toString(testVcores));
+
+ MockResource requestedRes = new MockResource(testMem, testVcores);
+ MockResource expectedRes = new MockResource(MockAppState.RM_MAX_RAM, testVcores);
+ log.info("Resource requested: " + requestedRes);
+ Resource resFinal = appState.buildResourceRequirements(getRole0Status(),
+ new MockResource(0, 0));
+ log.info("Resource actual: " + resFinal);
+ assertTrue(Resources.equals(expectedRes, resFinal));
+
+ // revert resource configuration to original value
+ resources.setComponentOpt(getRole0Status().getGroup(), ResourceKeys.YARN_MEMORY,
+ origMem);
+ resources.setComponentOpt(getRole0Status().getGroup(), ResourceKeys.YARN_CORES,
+ origVcores);
+ }
+
+ public Resource newResource(RoleStatus r) {
+ final Resource res2 = new MockResource(0, 0);
+ appState.buildResourceRequirements(r, res2);
+ return res2;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRW.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRW.java
new file mode 100644
index 0000000..c9510d3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRW.java
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.avro.LoadedRoleHistory;
+import org.apache.slider.server.avro.RoleHistoryWriter;
+import org.junit.Test;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class TestRoleHistoryRW extends BaseMockAppStateTest {
+
+ static long time = System.currentTimeMillis();
+ public static final String HISTORY_V1_6_ROLE =
+ "org/apache/slider/server/avro/history-v01-6-role.json";
+ public static final String HISTORY_V1_3_ROLE =
+ "org/apache/slider/server/avro/history-v01-3-role.json";
+ public static final String HISTORY_V1b_1_ROLE =
+ "org/apache/slider/server/avro/history_v01b_1_role.json";
+
+
+ static final ProviderRole PROVIDER_ROLE3 = new ProviderRole(
+ "role3",
+ 3,
+ PlacementPolicy.STRICT,
+ 3,
+ 3,
+ ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
+
+ @Override
+ public String getTestName() {
+ return "TestHistoryRW";
+ }
+
+ @Test
+ public void testWriteReadEmpty() throws Throwable {
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+ roleHistory.onStart(fs, historyPath);
+ Path history = roleHistory.saveHistory(time++);
+ assertTrue(fs.isFile(history));
+ RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+ historyWriter.read(fs, history);
+ }
+
+ @Test
+ public void testWriteReadData() throws Throwable {
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+ assertFalse(roleHistory.onStart(fs, historyPath));
+ String addr = "localhost";
+ NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
+ NodeEntry ne1 = instance.getOrCreate(0);
+ ne1.setLastUsed(0xf00d);
+
+ Path history = roleHistory.saveHistory(time++);
+ assertTrue(fs.isFile(history));
+ RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+ RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
+
+
+ LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history);
+ assertTrue(0 < loadedRoleHistory.size());
+ rh2.rebuild(loadedRoleHistory);
+ NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
+ assertNotNull(ni2);
+ NodeEntry ne2 = ni2.get(0);
+ assertNotNull(ne2);
+ assertEquals(ne2.getLastUsed(), ne1.getLastUsed());
+ }
+
+ @Test
+ public void testWriteReadActiveData() throws Throwable {
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+ roleHistory.onStart(fs, historyPath);
+ String addr = "localhost";
+ String addr2 = "rack1server5";
+ NodeInstance localhost = roleHistory.getOrCreateNodeInstance(addr);
+ NodeEntry orig1 = localhost.getOrCreate(0);
+ orig1.setLastUsed(0x10);
+ NodeInstance rack1server5 = roleHistory.getOrCreateNodeInstance(addr2);
+ NodeEntry orig2 = rack1server5.getOrCreate(1);
+ orig2.setLive(3);
+ assertFalse(orig2.isAvailable());
+ NodeEntry orig3 = localhost.getOrCreate(1);
+ orig3.setLastUsed(0x20);
+ orig3.setLive(1);
+ assertFalse(orig3.isAvailable());
+ orig3.release();
+ assertTrue(orig3.isAvailable());
+ roleHistory.dump();
+
+ long savetime = 0x0001000;
+ Path history = roleHistory.saveHistory(savetime);
+ assertTrue(fs.isFile(history));
+ describe("Loaded");
+ log.info("testWriteReadActiveData in {}", history);
+ RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+ RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
+ LoadedRoleHistory loadedRoleHistory = historyWriter.read(fs, history);
+ assertEquals(3, loadedRoleHistory.size());
+ rh2.rebuild(loadedRoleHistory);
+ rh2.dump();
+
+ assertEquals(2, rh2.getClusterSize());
+ NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
+ assertNotNull(ni2);
+ NodeEntry loadedNE = ni2.get(0);
+ assertEquals(loadedNE.getLastUsed(), orig1.getLastUsed());
+ NodeInstance ni2b = rh2.getExistingNodeInstance(addr2);
+ assertNotNull(ni2b);
+ NodeEntry loadedNE2 = ni2b.get(1);
+ assertNotNull(loadedNE2);
+ assertEquals(loadedNE2.getLastUsed(), savetime);
+ assertEquals(rh2.getThawedDataTime(), savetime);
+
+ // now start it
+ rh2.buildRecentNodeLists();
+ describe("starting");
+ rh2.dump();
+ List available0 = rh2.cloneRecentNodeList(0);
+ assertEquals(1, available0.size());
+
+ NodeInstance entry = available0.get(0);
+ assertEquals(entry.hostname, "localhost");
+ assertEquals(entry, localhost);
+ List available1 = rh2.cloneRecentNodeList(1);
+ assertEquals(2, available1.size());
+ //and verify that even if last used was set, the save time is picked up
+ assertEquals(entry.get(1).getLastUsed(), roleHistory.getSaveTime());
+
+ }
+
+ @Test
+ public void testWriteThaw() throws Throwable {
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+ assertFalse(roleHistory.onStart(fs, historyPath));
+ String addr = "localhost";
+ NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
+ NodeEntry ne1 = instance.getOrCreate(0);
+ ne1.setLastUsed(0xf00d);
+
+ Path history = roleHistory.saveHistory(time++);
+ long savetime =roleHistory.getSaveTime();
+ assertTrue(fs.isFile(history));
+ RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
+ assertTrue(rh2.onStart(fs, historyPath));
+ NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
+ assertNotNull(ni2);
+ NodeEntry ne2 = ni2.get(0);
+ assertNotNull(ne2);
+ assertEquals(ne2.getLastUsed(), ne1.getLastUsed());
+ assertEquals(rh2.getThawedDataTime(), savetime);
+ }
+
+
+ @Test
+ public void testPurgeOlderEntries() throws Throwable {
+ RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+ time = 1;
+ Path file1 = touch(historyWriter, time++);
+ Path file2 = touch(historyWriter, time++);
+ Path file3 = touch(historyWriter, time++);
+ Path file4 = touch(historyWriter, time++);
+ Path file5 = touch(historyWriter, time++);
+ Path file6 = touch(historyWriter, time++);
+
+ assertEquals(0, historyWriter.purgeOlderHistoryEntries(fs, file1));
+ assertEquals(1, historyWriter.purgeOlderHistoryEntries(fs, file2));
+ assertEquals(0, historyWriter.purgeOlderHistoryEntries(fs, file2));
+ assertEquals(3, historyWriter.purgeOlderHistoryEntries(fs, file5));
+ assertEquals(1, historyWriter.purgeOlderHistoryEntries(fs, file6));
+ try {
+ // make an impossible assertion that will fail if the method
+ // actually completes
+ assertEquals(-1, historyWriter.purgeOlderHistoryEntries(fs, file1));
+ } catch (FileNotFoundException ignored) {
+ // expected
+ }
+
+ }
+
+ public Path touch(RoleHistoryWriter historyWriter, long time)
+ throws IOException {
+ Path path = historyWriter.createHistoryFilename(historyPath, time);
+ FSDataOutputStream out = fs.create(path);
+ out.close();
+ return path;
+ }
+
+ @Test
+ public void testSkipEmptyFileOnRead() throws Throwable {
+ describe("verify that empty histories are skipped on read; old histories " +
+ "purged");
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+ roleHistory.onStart(fs, historyPath);
+ time = 0;
+ Path oldhistory = roleHistory.saveHistory(time++);
+
+ String addr = "localhost";
+ NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
+ NodeEntry ne1 = instance.getOrCreate(0);
+ ne1.setLastUsed(0xf00d);
+
+ Path goodhistory = roleHistory.saveHistory(time++);
+
+ RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+ Path touched = touch(historyWriter, time++);
+
+ RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
+ assertTrue(rh2.onStart(fs, historyPath));
+ NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
+ assertNotNull(ni2);
+
+ //and assert the older file got purged
+ assertFalse(fs.exists(oldhistory));
+ assertTrue(fs.exists(goodhistory));
+ assertTrue(fs.exists(touched));
+ }
+
+ @Test
+ public void testSkipBrokenFileOnRead() throws Throwable {
+ describe("verify that empty histories are skipped on read; old histories " +
+ "purged");
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+ roleHistory.onStart(fs, historyPath);
+ time = 0;
+ Path oldhistory = roleHistory.saveHistory(time++);
+
+ String addr = "localhost";
+ NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
+ NodeEntry ne1 = instance.getOrCreate(0);
+ ne1.setLastUsed(0xf00d);
+
+ Path goodhistory = roleHistory.saveHistory(time++);
+
+ RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+ Path badfile = historyWriter.createHistoryFilename(historyPath, time++);
+ FSDataOutputStream out = fs.create(badfile);
+ out.writeBytes("{broken:true}");
+ out.close();
+
+ RoleHistory rh2 = new MockRoleHistory(MockFactory.ROLES);
+ describe("IGNORE STACK TRACE BELOW");
+
+ assertTrue(rh2.onStart(fs, historyPath));
+
+ describe( "IGNORE STACK TRACE ABOVE");
+ NodeInstance ni2 = rh2.getExistingNodeInstance(addr);
+ assertNotNull(ni2);
+
+ //and assert the older file got purged
+ assertFalse(fs.exists(oldhistory));
+ assertTrue(fs.exists(goodhistory));
+ assertTrue(fs.exists(badfile));
+ }
+
+ /**
+ * Test that a v1 JSON file can be read. Here the number of roles
+ * matches the current state.
+ * @throws Throwable
+ */
+ @Test
+ public void testReloadDataV1_3_role() throws Throwable {
+ String source = HISTORY_V1_3_ROLE;
+ RoleHistoryWriter writer = new RoleHistoryWriter();
+
+ LoadedRoleHistory loadedRoleHistory = writer.read(source);
+ assertEquals(4, loadedRoleHistory.size());
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+ assertEquals(0, roleHistory.rebuild(loadedRoleHistory));
+ }
+
+ /**
+ * Test that a v1 JSON file can be read. Here more roles than expected
+ * @throws Throwable
+ */
+ @Test
+ public void testReloadDataV1_6_role() throws Throwable {
+ String source = HISTORY_V1_6_ROLE;
+ RoleHistoryWriter writer = new RoleHistoryWriter();
+
+ LoadedRoleHistory loadedRoleHistory = writer.read(source);
+ assertEquals(6, loadedRoleHistory.size());
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+ assertEquals(3, roleHistory.rebuild(loadedRoleHistory));
+ }
+
+ /**
+ * Test that a v1 JSON file can be read. Here the number of roles
+ * is less than the current state.
+ * @throws Throwable
+ */
+ @Test
+ public void testReload_less_roles() throws Throwable {
+ String source = HISTORY_V1_3_ROLE;
+ RoleHistoryWriter writer = new RoleHistoryWriter();
+
+ LoadedRoleHistory loadedRoleHistory = writer.read(source);
+ assertEquals(4, loadedRoleHistory.size());
+ List expandedRoles = new ArrayList(MockFactory.ROLES);
+ expandedRoles.add(PROVIDER_ROLE3);
+ RoleHistory roleHistory = new MockRoleHistory(expandedRoles);
+ assertEquals(0, roleHistory.rebuild(loadedRoleHistory));
+ }
+
+ /**
+ * Test that a v1b JSON file can be read. Here more roles than expected
+ * @throws Throwable
+ */
+ @Test
+ public void testReloadDataV1b_1_role() throws Throwable {
+ String source = HISTORY_V1b_1_ROLE;
+ RoleHistoryWriter writer = new RoleHistoryWriter();
+
+ LoadedRoleHistory loadedRoleHistory = writer.read(source);
+ assertEquals(1, loadedRoleHistory.size());
+ assertEquals(2, loadedRoleHistory.roleMap.size());
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+ assertEquals(0, roleHistory.rebuild(loadedRoleHistory));
+
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRWOrdering.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRWOrdering.java
new file mode 100644
index 0000000..05eb4bc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRWOrdering.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.avro.NewerFilesFirst;
+import org.apache.slider.server.avro.RoleHistoryWriter;
+import org.junit.Test;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class TestRoleHistoryRWOrdering extends BaseMockAppStateTest {
+
+ List paths = pathlist(
+ Arrays.asList(
+ "hdfs://localhost/history-0406c.json",
+ "hdfs://localhost/history-5fffa.json",
+ "hdfs://localhost/history-0001a.json",
+ "hdfs://localhost/history-0001f.json"
+ )
+ );
+ Path h_0406c = paths.get(0);
+ Path h_5fffa = paths.get(1);
+ Path h_0001a = paths.get(3);
+
+ public TestRoleHistoryRWOrdering() throws URISyntaxException {
+ }
+
+
+ List pathlist(List pathnames) throws URISyntaxException {
+ List paths = new ArrayList<>();
+ for (String p : pathnames) {
+ paths.add(new Path(new URI(p)));
+ }
+ return paths;
+ }
+
+ @Override
+ public String getTestName() {
+ return "TestHistoryRWOrdering";
+ }
+
+ /**
+ * This tests regexp pattern matching. It uses the current time so isn't
+ * repeatable -but it does test a wider range of values in the process
+ * @throws Throwable
+ */
+ @Test
+ public void testPatternRoundTrip() throws Throwable {
+ describe("test pattern matching of names");
+ long value=System.currentTimeMillis();
+ String name = String.format(SliderKeys.HISTORY_FILENAME_CREATION_PATTERN,
+ value);
+ String matchpattern = SliderKeys.HISTORY_FILENAME_MATCH_PATTERN;
+ Pattern pattern = Pattern.compile(matchpattern);
+ Matcher matcher = pattern.matcher(name);
+ if (!matcher.find()) {
+ throw new Exception("No match for pattern $matchpattern in $name");
+ }
+ }
+
+ @Test
+ public void testWriteSequenceReadData() throws Throwable {
+ describe("test that if multiple entries are written, the newest is picked" +
+ " up");
+ long time = System.currentTimeMillis();
+
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+ assertFalse(roleHistory.onStart(fs, historyPath));
+ String addr = "localhost";
+ NodeInstance instance = roleHistory.getOrCreateNodeInstance(addr);
+ NodeEntry ne1 = instance.getOrCreate(0);
+ ne1.setLastUsed(0xf00d);
+
+ Path history1 = roleHistory.saveHistory(time++);
+ Path history2 = roleHistory.saveHistory(time++);
+ Path history3 = roleHistory.saveHistory(time);
+
+ //inject a later file with a different name
+ sliderFileSystem.cat(new Path(historyPath, "file.json"), true, "hello," +
+ " world");
+
+
+ RoleHistoryWriter historyWriter = new RoleHistoryWriter();
+
+ List entries = historyWriter.findAllHistoryEntries(
+ fs,
+ historyPath,
+ false);
+ assertEquals(entries.size(), 3);
+ assertEquals(entries.get(0), history3);
+ assertEquals(entries.get(1), history2);
+ assertEquals(entries.get(2), history1);
+ }
+
+ @Test
+ public void testPathStructure() throws Throwable {
+ assertEquals(h_5fffa.getName(), "history-5fffa.json");
+ }
+
+ @Test
+ public void testPathnameComparator() throws Throwable {
+
+ NewerFilesFirst newerName = new NewerFilesFirst();
+
+ log.info("{} name is {}", h_5fffa, h_5fffa.getName());
+ log.info("{} name is {}", h_0406c, h_0406c.getName());
+ assertEquals( newerName.compare(h_5fffa, h_5fffa), 0);
+ assertTrue(newerName.compare(h_5fffa, h_0406c) < 0);
+ assertTrue(newerName.compare(h_5fffa, h_0001a) < 0);
+ assertTrue(newerName.compare(h_0001a, h_5fffa) > 0);
+
+ }
+
+ @Test
+ public void testPathSort() throws Throwable {
+ List paths2 = new ArrayList<>(paths);
+ RoleHistoryWriter.sortHistoryPaths(paths2);
+ assertListEquals(paths2,
+ Arrays.asList(
+ paths.get(1),
+ paths.get(0),
+ paths.get(3),
+ paths.get(2)
+ ));
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRequestTracking.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRequestTracking.java
new file mode 100644
index 0000000..48a758f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/model/history/TestRoleHistoryRequestTracking.java
@@ -0,0 +1,286 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.model.history;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.model.mock.BaseMockAppStateTest;
+import org.apache.slider.server.appmaster.model.mock.MockContainer;
+import org.apache.slider.server.appmaster.model.mock.MockFactory;
+import org.apache.slider.server.appmaster.model.mock.MockRoleHistory;
+import org.apache.slider.server.appmaster.state.ContainerAllocationOutcome;
+import org.apache.slider.server.appmaster.state.NodeEntry;
+import org.apache.slider.server.appmaster.state.NodeInstance;
+import org.apache.slider.server.appmaster.state.OutstandingRequest;
+import org.apache.slider.server.appmaster.state.RoleHistory;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Test the RH availability list and request tracking: that hosts
+ * get removed and added
+ */
+public class TestRoleHistoryRequestTracking extends BaseMockAppStateTest {
+
+ String roleName = "test";
+
+ NodeInstance age1Active4 = nodeInstance(1, 4, 0, 0);
+ NodeInstance age2Active2 = nodeInstance(2, 2, 0, 1);
+ NodeInstance age2Active0 = nodeInstance(2, 0, 0, 0);
+ NodeInstance age3Active0 = nodeInstance(3, 0, 0, 0);
+ NodeInstance age4Active1 = nodeInstance(4, 1, 0, 0);
+ NodeInstance empty = new NodeInstance("empty", MockFactory.ROLE_COUNT);
+
+ List nodes = Arrays.asList(age2Active2, age2Active0,
+ age4Active1, age1Active4, age3Active0);
+ RoleHistory roleHistory = new MockRoleHistory(MockFactory.ROLES);
+ /** 1MB, 1 vcore*/
+ Resource resource = Resource.newInstance(1, 1);
+
+ ProviderRole provRole = new ProviderRole(roleName, 0);
+ RoleStatus roleStatus = new RoleStatus(provRole);
+
+ public TestRoleHistoryRequestTracking() throws BadConfigException {
+ }
+
+ AMRMClient.ContainerRequest requestContainer(RoleStatus roleStatus) {
+ return roleHistory.requestContainerForRole(roleStatus).getIssuedRequest();
+ }
+
+ @Override
+ public String getTestName() {
+ return "TestRoleHistoryAvailableList";
+ }
+
+ @Before
+ public void setupNodeMap() {
+ roleHistory.insert(nodes);
+ roleHistory.buildRecentNodeLists();
+ roleStatus.setResourceRequirements(Resource.newInstance(1, 1));
+ }
+
+ @Test
+ public void testAvailableListBuiltForRoles() throws Throwable {
+ List available0 = roleHistory.cloneRecentNodeList(0);
+ assertListEquals(Arrays.asList(age3Active0, age2Active0), available0);
+ }
+
+ @Test
+ public void testRequestedNodeOffList() throws Throwable {
+ NodeInstance ni = roleHistory.findRecentNodeForNewInstance(roleStatus);
+ assertEquals(age3Active0, ni);
+ assertListEquals(Arrays.asList(age2Active0),
+ roleHistory.cloneRecentNodeList(0));
+ roleHistory.requestInstanceOnNode(ni,
+ roleStatus,
+ resource
+ );
+ }
+
+ @Test
+ public void testRequestedNodeOffListWithFailures() throws Throwable {
+ assertEquals(0, roleStatus.getKey());
+ assertFalse(roleHistory.cloneRecentNodeList(0).isEmpty());
+
+ NodeEntry age3role0 = recordAsFailed(age3Active0, 0, 4);
+ assertTrue(age3Active0.isConsideredUnreliable(0, roleStatus
+ .getNodeFailureThreshold()));
+ recordAsFailed(age2Active0, 0, 4);
+ assertTrue(age2Active0.isConsideredUnreliable(0, roleStatus
+ .getNodeFailureThreshold()));
+ // expect to get a null node back
+ NodeInstance ni = roleHistory.findRecentNodeForNewInstance(roleStatus);
+ assertNull(ni);
+
+ // which is translated to a no-location request
+ AMRMClient.ContainerRequest req = roleHistory.requestInstanceOnNode(ni,
+ roleStatus,
+ resource).getIssuedRequest();
+
+ assertNull(req.getNodes());
+
+ log.info("resetting failure count");
+ age3role0.resetFailedRecently();
+ roleHistory.dump();
+ assertEquals(0, age3role0.getFailedRecently());
+ assertFalse(age3Active0.isConsideredUnreliable(0, roleStatus
+ .getNodeFailureThreshold()));
+ assertFalse(roleHistory.cloneRecentNodeList(0).isEmpty());
+ // looking for a node should now find one
+ ni = roleHistory.findRecentNodeForNewInstance(roleStatus);
+ assertEquals(ni, age3Active0);
+ req = roleHistory.requestInstanceOnNode(ni, roleStatus, resource)
+ .getIssuedRequest();
+ assertEquals(1, req.getNodes().size());
+ }
+
+ /**
+ * verify that strict placement policies generate requests for nodes irrespective
+ * of their failed status
+ * @throws Throwable
+ */
+ @Test
+ public void testStrictPlacementIgnoresFailures() throws Throwable {
+
+ RoleStatus targetRole = getRole1Status();
+ final ProviderRole providerRole1 = targetRole.getProviderRole();
+ assertEquals(providerRole1.placementPolicy, PlacementPolicy.STRICT);
+ int key1 = targetRole.getKey();
+ int key0 = getRole0Status().getKey();
+
+ List nodes = Arrays.asList(age1Active4, age2Active0,
+ age2Active2, age3Active0, age4Active1);
+ recordAllFailed(key0, 4, nodes);
+ recordAllFailed(key1, 4, nodes);
+
+ // trigger a list rebuild
+ roleHistory.buildRecentNodeLists();
+ List recentRole0 = roleHistory.cloneRecentNodeList(key0);
+ assertTrue(recentRole0.indexOf(age3Active0) < recentRole0.indexOf(age2Active0));
+
+ // the non-strict role has no suitable nodes
+ assertNull(roleHistory.findRecentNodeForNewInstance(getRole0Status()));
+
+
+ NodeInstance ni = roleHistory.findRecentNodeForNewInstance(targetRole);
+ assertNotNull(ni);
+
+ NodeInstance ni2 = roleHistory.findRecentNodeForNewInstance(targetRole);
+ assertNotNull(ni2);
+ assertNotEquals(ni, ni2);
+ }
+
+ @Test
+ public void testFindAndRequestNode() throws Throwable {
+ AMRMClient.ContainerRequest req = requestContainer(roleStatus);
+
+ assertEquals(age3Active0.hostname, req.getNodes().get(0));
+ List