diff --git a/fair-scheduler.xml b/fair-scheduler.xml
new file mode 100644
index 00000000000..bbbcfaf9359
--- /dev/null
+++ b/fair-scheduler.xml
@@ -0,0 +1,70 @@
+
+
+
+ 1.0
+ drf
+ alice,bob,joe,john hadoop_users
+ alice,bob,joe,john hadoop_users
+
+ 1.0
+ drf
+
+
+ 1.0
+ drf
+
+ 1.0
+ drf
+ john
+ john
+
+
+ memory-mb=50.0%, vcores=50.0%
+ 3.0
+ false
+ drf
+ joe
+ joe
+
+
+
+ memory-mb=8192, vcores=1
+ 1.0
+ drf
+
+ memory-mb=16384, vcores=4
+ 2
+ 3.0
+ false
+ drf
+ alice
+ alice
+ 0.15
+
+
+ memory-mb=8192, vcores=2
+ 1.0
+ drf
+ bob
+ bob
+ -1.0
+
+
+
+ 23
+ 24
+ 0.12
+ 15
+ fair
+ 0.16
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index b9b7b702c10..5e96ee2fda2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -65,7 +65,7 @@
org.mockito
mockito-core
- test
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index d939ac1525e..694a5b1e891 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -21,6 +21,9 @@
import com.google.common.annotations.VisibleForTesting;
import com.sun.jersey.spi.container.servlet.ServletContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigArgumentHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigConverter;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.Marker;
@@ -136,7 +139,6 @@
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
-import java.net.URLClassLoader;
import java.nio.charset.Charset;
import java.security.PrivilegedExceptionAction;
import java.security.SecureRandom;
@@ -226,6 +228,13 @@
private Configuration conf;
private UserGroupInformation rmLoginUGI;
+ private static FSConfigToCSConfigArgumentHandler
+ fsConfigConversionArgumentHandler;
+
+ static {
+ FSConfigToCSConfigConverter converter = initFSConfigConverter();
+ initFSArgumentHandler(converter);
+ }
public ResourceManager() {
super("ResourceManager");
@@ -1556,6 +1565,14 @@ public static void main(String argv[]) {
} else if (argv[0].equals("-remove-application-from-state-store")
&& argv.length == 2) {
removeApplication(conf, argv[1]);
+ } else if (argv[0].equals("-convert-fs-configuration")) {
+ String[] args = Arrays.copyOfRange(argv, 1, argv.length);
+ try {
+ fsConfigConversionArgumentHandler.parseAndConvert(args);
+ } catch (Throwable t) {
+ LOG.error(FATAL, "Error while starting FS configuration conversion!", t);
+ System.exit(-1);
+ }
} else {
printUsage(System.err);
}
@@ -1666,6 +1683,13 @@ private static void printUsage(PrintStream out) {
out.println("Usage: yarn resourcemanager [-format-state-store]");
out.println(" "
+ "[-remove-application-from-state-store ]" + "\n");
+ out.println("[-convert-fs-configuration ");
+ out.println(" -y|yarnsiteconfig ");
+ out.println(" -f|fsconfig ");
+ out.println(" -r|rulesconfig ");
+ out.println(" -c|console If specified, result will be printed " +
+ "to console instead");
+ out.println("]");
}
protected RMAppLifetimeMonitor createRMAppLifetimeMonitor() {
@@ -1683,4 +1707,15 @@ private void registerMXBean() {
public boolean isSecurityEnabled() {
return UserGroupInformation.isSecurityEnabled();
}
+
+ @VisibleForTesting
+ static void initFSArgumentHandler(FSConfigToCSConfigConverter converter) {
+ ResourceManager.fsConfigConversionArgumentHandler = new FSConfigToCSConfigArgumentHandler(converter);
+ }
+
+ private static FSConfigToCSConfigConverter initFSConfigConverter() {
+ FSConfigToCSConfigRuleHandler ruleHandler = new FSConfigToCSConfigRuleHandler();
+ return new FSConfigToCSConfigConverter(ruleHandler);
+ }
+
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index fe7379a5abe..cf02952fed5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -96,6 +96,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerPreemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ReleaseContainerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerContext;
import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
@@ -182,6 +183,8 @@
protected SchedulingMonitorManager schedulingMonitorManager =
new SchedulingMonitorManager();
+ private boolean migration;
+
/**
* Construct the service.
*
@@ -197,6 +200,8 @@ public AbstractYarnScheduler(String name) {
@Override
public void serviceInit(Configuration conf) throws Exception {
+ migration = conf.getBoolean(FairSchedulerConfiguration.MIGRATION_MODE, false);
+
nmExpireInterval =
conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);
@@ -209,7 +214,10 @@ public void serviceInit(Configuration conf) throws Exception {
nodeTracker.setConfiguredMaxAllocationWaitTime(
configuredMaximumAllocationWaitTime);
maxClusterLevelAppPriority = getMaxPriorityFromConf(conf);
- this.releaseCache = new Timer("Pending Container Clear Timer");
+ if (!migration) {
+ this.releaseCache = new Timer("Pending Container Clear Timer");
+ }
+
autoUpdateContainers =
conf.getBoolean(YarnConfiguration.RM_AUTO_UPDATE_CONTAINERS,
YarnConfiguration.DEFAULT_RM_AUTO_UPDATE_CONTAINERS);
@@ -227,11 +235,14 @@ public void serviceInit(Configuration conf) throws Exception {
@Override
protected void serviceStart() throws Exception {
- if (updateThread != null) {
- updateThread.start();
+ if (!migration) {
+ if (updateThread != null) {
+ updateThread.start();
+ }
+ schedulingMonitorManager.startAll();
+ createReleaseCache();
}
- schedulingMonitorManager.startAll();
- createReleaseCache();
+
super.serviceStart();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index 7f065211026..34e05737bf3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -249,12 +249,24 @@ int getQueueMaxApps(String queue) {
return (maxApps == null) ? queueMaxAppsDefault : maxApps;
}
+ int getQueueMaxAppsDefault() {
+ return queueMaxAppsDefault;
+ }
+
+ int getUserMaxAppsDefault() {
+ return userMaxAppsDefault;
+ }
+
@VisibleForTesting
float getQueueMaxAMShare(String queue) {
Float maxAMShare = queueMaxAMShares.get(queue);
return (maxAMShare == null) ? queueMaxAMShareDefault : maxAMShare;
}
+ float getQueueMaxAMShareDefault() {
+ return queueMaxAMShareDefault;
+ }
+
/**
* Get the minimum resource allocation for the given queue.
*
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
index 44049fdf595..798ad34a224 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
@@ -137,4 +137,8 @@ void setPercentage(String name, double value) {
}
}
}
+
+ double[] getPercentages() {
+ return percentages;
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConversionException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConversionException.java
new file mode 100644
index 00000000000..ba621d0bb77
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConversionException.java
@@ -0,0 +1,13 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+public class ConversionException extends RuntimeException {
+ private static final long serialVersionUID = 4161836727287317835L;
+
+ public ConversionException(String message) {
+ super(message);
+ }
+
+ public ConversionException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigArgumentHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigArgumentHandler.java
new file mode 100644
index 00000000000..b85f93160d8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigArgumentHandler.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.MissingArgumentException;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+
+import java.io.File;
+
+public class FSConfigToCSConfigArgumentHandler {
+ private final FSConfigToCSConfigConverter converter;
+
+ public FSConfigToCSConfigArgumentHandler(FSConfigToCSConfigConverter converter) {
+ this.converter = converter;
+ }
+
+ private enum CliOption {
+ YARN_SITE("yarn-site.xml", "y", "yarnsiteconfig",
+ "Path to a valid yarn-site.xml config file", true, true),
+
+ // fair-scheduler.xml is not mandatory
+ // if FairSchedulerConfiguration.ALLOCATION_FILE is defined in yarn-site.xml
+ FAIR_SCHEDULER("fair-scheduler.xml", "f", "fsconfig",
+ "Path to a valid fair-scheduler.xml config file", false, true),
+ CONVERSION_RULES("conversion rules config file", "r", "rulesconfig",
+ "Optional parameter. If given, should specify a valid path to the " +
+ "conversion rules file (property format).", false, true),
+ CONSOLE_MODE("console mode", "p", "print",
+ "If defined, the converted configuration will " +
+ "only be emitted to the console.", false, false),
+ CLUSTER_RESOURCE("cluster resource", "c", "cluster-resource",
+ "Needs to be given if maxResources is defined as percentages " +
+ "for any queue, otherwise this parameter can be omitted.", false, true),
+ OUTPUT_DIR("output directory", "o", "output-directory",
+ "Output directory for yarn-site.xml and capacity-scheduler.xml files." +
+ "Must have write permission for user who is running this script.", true, true);
+
+ private final String name;
+ private final String shortSwitch;
+ private final String longSwitch;
+ private final String description;
+ private final boolean required;
+ private final boolean hasArg;
+
+ CliOption(String name, String shortSwitch, String longSwitch,
+ String description, boolean required, boolean hasArg) {
+ this.name = name;
+ this.shortSwitch = shortSwitch;
+ this.longSwitch = longSwitch;
+ this.description = description;
+ this.required = required;
+ this.hasArg = hasArg;
+ }
+
+ public Option createCommonsCliOption() {
+ Option option = new Option(shortSwitch, longSwitch, hasArg, description);
+ option.setRequired(required);
+ return option;
+ }
+ }
+
+ public void parseAndConvert(String[] args) throws Exception {
+ Options opts = createOptions();
+ CommandLine cliParser;
+ try {
+ cliParser = new GnuParser().parse(opts, args);
+ } catch (MissingArgumentException ex) {
+ throw new IllegalArgumentException("Missing argument for options", ex);
+ }
+
+ checkOptionPresent(cliParser, CliOption.YARN_SITE);
+ checkOptionPresent(cliParser, CliOption.OUTPUT_DIR);
+ FSConfigToCSConfigConverterParams params = validateInputFiles(cliParser);
+ try {
+ converter.convert(params);
+ } catch (UnsupportedPropertyException e) {
+ System.err.println("Fatal error during conversion! " + e.getMessage());
+ } catch (ConversionException e) {
+ System.err.println("Error occurred during property conversion: " + e.getMessage());
+ }
+ }
+
+ private Options createOptions() {
+ Options opts = new Options();
+
+ for (CliOption cliOption : CliOption.values()) {
+ opts.addOption(cliOption.createCommonsCliOption());
+ }
+
+ return opts;
+ }
+
+ private FSConfigToCSConfigConverterParams validateInputFiles(CommandLine cliParser) {
+ String yarnSiteXmlFile =
+ cliParser.getOptionValue(CliOption.YARN_SITE.shortSwitch);
+ String fairSchedulerXmlFile =
+ cliParser.getOptionValue(CliOption.FAIR_SCHEDULER.shortSwitch);
+ String conversionRulesFile =
+ cliParser.getOptionValue(CliOption.CONVERSION_RULES.shortSwitch);
+ String outputDir =
+ cliParser.getOptionValue(CliOption.OUTPUT_DIR.shortSwitch);
+
+ checkFile(CliOption.YARN_SITE, yarnSiteXmlFile);
+ checkFile(CliOption.FAIR_SCHEDULER, fairSchedulerXmlFile);
+ checkFile(CliOption.CONVERSION_RULES, conversionRulesFile);
+ checkDirectory(CliOption.OUTPUT_DIR, outputDir);
+
+ return FSConfigToCSConfigConverterParams.Builder.create()
+ .withYarnSiteXmlConfig(yarnSiteXmlFile)
+ .withFairSchedulerXmlConfig(fairSchedulerXmlFile)
+ .withConversionRulesConfig(conversionRulesFile)
+ .withClusterResource(cliParser.getOptionValue(CliOption.CLUSTER_RESOURCE.shortSwitch))
+ .withConsole(cliParser.hasOption(CliOption.CONSOLE_MODE.shortSwitch))
+ .withOutputDirectory(outputDir)
+ .build();
+ }
+
+ private static void checkOptionPresent(CommandLine cliParser, CliOption cliOption) {
+ if (!cliParser.hasOption(cliOption.shortSwitch)) {
+ throw new IllegalArgumentException(
+ String.format("Missing %s parameter " + "(switch: %s|%s).",
+ cliOption.name, cliOption.shortSwitch, cliOption.longSwitch));
+ }
+ }
+
+ private static void checkFile(CliOption cliOption, String filePath) {
+ checkFileInternal(cliOption, filePath, true);
+ }
+
+ private static void checkDirectory(CliOption cliOption, String dirPath) {
+ checkFileInternal(cliOption, dirPath, false);
+ }
+
+ private static void checkFileInternal(CliOption cliOption, String filePath, boolean isFile) {
+ //We can safely ignore null here as files / dirs were checked before
+ if (filePath == null) {
+ return;
+ }
+
+ File file = new File(filePath);
+ if (isFile && file.isDirectory()) {
+ throw new IllegalArgumentException(String.format("Specified path %s is a directory but should be a file " +
+ "(As value of parameter %s)", filePath, cliOption.name));
+ } else if (!isFile && !file.isDirectory()) {
+ throw new IllegalArgumentException(String.format("Specified path %s is not a directory " +
+ "(As value of parameter %s)", filePath, cliOption.name));
+ } else if (!file.exists()) {
+ throw new IllegalArgumentException(String.format("Specified path %s does not exist " +
+ "(As value of parameter %s)", filePath, cliOption.name));
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigConverter.java
new file mode 100644
index 00000000000..4dd47637507
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigConverter.java
@@ -0,0 +1,768 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.lang3.mutable.MutableBoolean;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.AccessType;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy;
+import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.hadoop.yarn.util.resource.Resources;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+public class FSConfigToCSConfigConverter {
+ public static final Logger LOG = LoggerFactory.getLogger(
+ FSConfigToCSConfigConverter.class.getName());
+
+ private Resource clusterResource;
+ private boolean preemptionEnabled = false;
+ private int queueMaxAppsDefault;
+ private float queueMaxAMshareDefault;
+ private boolean autoCreateChildQueues = false;
+ private boolean sizeBasedWeight = false;
+ private boolean userAsDefaultQueue = false;
+ private PlacementManager placementManager;
+ private boolean fifoOrFairSharePolicyUsed = false;
+ private boolean drfPolicyUsedOnQueueLevel = false;
+
+ private Configuration yarnSiteConfig;
+ private Configuration capacacitySchedulerConfig;
+ private FSConfigToCSConfigRuleHandler ruleHandler;
+ private Set leafQueueNames;
+
+ private OutputStream yarnSiteOutputStream;
+ private OutputStream capacacitySchedulerOutputStream;
+ private boolean consoleMode = true;
+
+ private enum OutputFile {
+ YARN_SITE_XML("yarn-site.xml"),
+ CAPACITY_SCHEDULER_XML("capacity-scheduler.xml");
+
+ private final String name;
+
+ OutputFile(String name) {
+ this.name = name;
+ }
+ }
+
+ public FSConfigToCSConfigConverter(FSConfigToCSConfigRuleHandler ruleHandler) {
+ this.ruleHandler = ruleHandler;
+ this.leafQueueNames = new HashSet<>();
+ this.yarnSiteOutputStream = System.out;
+ this.capacacitySchedulerOutputStream = System.out;
+ }
+
+ public void convert(FSConfigToCSConfigConverterParams params) throws Exception {
+ validateParams(params);
+ prepareOutputFiles(params.getOutputDirectory(), params.isConsole());
+ loadConversionRules(params.getConversionRulesConfig());
+ Configuration conf = createConfiguration(params);
+ handleFairSchedulerConfig(params, conf);
+
+ convert(conf, getClusterResource(params));
+ }
+
+ private void prepareOutputFiles(String outputDirectory, boolean console) throws FileNotFoundException {
+ if (console) {
+ LOG.info("Console mode is enabled, yarn-site.xml and capacity-scheduler.xml will be only emitted to the console!");
+ return;
+ }
+ this.consoleMode = false;
+ File yarnSiteXmlOutputFile = new File(outputDirectory, OutputFile.YARN_SITE_XML.name);
+ File schedulerXmlOutput = new File(outputDirectory, OutputFile.CAPACITY_SCHEDULER_XML.name);
+ LOG.info("Output directory for yarn-site.xml and capacity-scheduler.xml is: " + outputDirectory);
+
+ this.yarnSiteOutputStream = new FileOutputStream(yarnSiteXmlOutputFile);
+ this.capacacitySchedulerOutputStream = new FileOutputStream(schedulerXmlOutput);
+ }
+
+ private void validateParams(FSConfigToCSConfigConverterParams params) {
+ if (params.getYarnSiteXmlConfig() == null) {
+ throw new IllegalArgumentException("yarn-site.xml configuration " +
+ "is not defined but it is mandatory!");
+ } else if (params.getOutputDirectory() == null) {
+ throw new IllegalArgumentException("Output directory configuration " +
+ "is not defined but it is mandatory!");
+ }
+ }
+
+ private Resource getClusterResource(FSConfigToCSConfigConverterParams params) {
+ Resource clusterResource = null;
+ if (params.getClusterResource() != null) {
+ ConfigurableResource configurableResource;
+ try {
+ configurableResource = FairSchedulerConfiguration
+ .parseResourceConfigValue(params.getClusterResource());
+ } catch (AllocationConfigurationException e) {
+ throw new ConversionException("Error while parsing resource.", e);
+ }
+ clusterResource = configurableResource.getResource();
+ }
+ return clusterResource;
+ }
+
+ private void loadConversionRules(String rulesFile) throws IOException {
+ if (rulesFile != null) {
+ LOG.info("Reading conversion rules file from: " + rulesFile);
+ this.ruleHandler.loadRulesFromFile(rulesFile);
+ } else {
+ LOG.info("Conversion rules file is not defined, using default conversion config!");
+ }
+ }
+
+ private Configuration createConfiguration(FSConfigToCSConfigConverterParams params) {
+ Configuration conf = new YarnConfiguration();
+ conf.addResource(new Path(params.getYarnSiteXmlConfig()));
+ conf.setBoolean(FairSchedulerConfiguration.MIGRATION_MODE, true);
+ return conf;
+ }
+
+ private void handleFairSchedulerConfig(FSConfigToCSConfigConverterParams params, Configuration conf) {
+ String fairSchedulerXmlConfig = params.getFairSchedulerXmlConfig();
+
+ // Don't override allocation file in conf yet, as it would ruin the second
+ // condition here
+ if (fairSchedulerXmlConfig != null) {
+ LOG.info("Using explicitly defined fair-scheduler.xml");
+ } else if (conf.get(FairSchedulerConfiguration.ALLOCATION_FILE) != null) {
+ LOG.info("Using fair-scheduler.xml defined in yarn-site.xml by key: " + FairSchedulerConfiguration.ALLOCATION_FILE);
+ } else {
+ throw new IllegalArgumentException("fair-scheduler.xml is not defined " +
+ "neither in yarn-site.xml " +
+ "(with property: " + FairSchedulerConfiguration.ALLOCATION_FILE + ") " +
+ "nor directly with its own parameter!");
+ }
+
+ // We can now safely override allocation file in conf
+ if (fairSchedulerXmlConfig != null) {
+ conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, params.getFairSchedulerXmlConfig());
+ }
+ }
+
+ @VisibleForTesting
+ void convert(Configuration conf, Resource clusterResource) throws Exception {
+ // initialize Fair Scheduler
+ RMContext ctx = new RMContextImpl();
+ PlacementManager placementManager = new PlacementManager();
+ ctx.setQueuePlacementManager(placementManager);
+
+ FairScheduler fs = new FairScheduler();
+ fs.setRMContext(ctx);
+ fs.init(conf);
+ this.clusterResource = clusterResource;
+
+ AllocationConfiguration allocConf = fs.getAllocationConfiguration();
+ queueMaxAppsDefault = allocConf.getQueueMaxAppsDefault();
+ queueMaxAMshareDefault = allocConf.getQueueMaxAMShareDefault();
+
+ yarnSiteConfig = new Configuration(false);
+ capacacitySchedulerConfig = new Configuration(false);
+
+ checkUserMaxApps(allocConf);
+ checkUserMaxAppsDefault(allocConf);
+
+ // conversion -- yarn-site.xml
+ convertSiteProperties(conf);
+ checkReservationSystem(conf);
+
+ // conversion -- capacity-scheduler.xml
+ FSParentQueue rootQueue = fs.getQueueManager().getRootQueue();
+ emitDefaultMaxApplications();
+ emitDefaultMaxAMshare();
+ convertQueue(rootQueue);
+ emitACLs(fs);
+
+ if (placementManager.getPlacementRules().size() > 0) {
+ QueuePlacementConverter placementConverter = new QueuePlacementConverter();
+ Map properties =
+ placementConverter.convertPlacementPolicy(placementManager,
+ ruleHandler, userAsDefaultQueue);
+ properties.entrySet().forEach(entry ->
+ capacacitySchedulerConfig.set(entry.getKey(), entry.getValue()));
+ }
+
+ // Validate ordering policy
+ if (drfPolicyUsedOnQueueLevel) {
+ if (fifoOrFairSharePolicyUsed) {
+ throw new ConversionException("DRF ordering policy cannot be used together with fifo/fair");
+ } else {
+ capacacitySchedulerConfig.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
+ DominantResourceCalculator.class.getCanonicalName());
+ }
+ }
+
+ // display capacity-scheduler.xml
+ if (consoleMode) {
+ System.out.println("======= capacity-scheduler.xml =======");
+ }
+ capacacitySchedulerConfig.writeXml(capacacitySchedulerOutputStream);
+
+ // display yarn-site.xml
+ if (consoleMode) {
+ System.out.println();
+ System.out.println("======= yarn-site.xml =======");
+ }
+ yarnSiteConfig.writeXml(yarnSiteOutputStream);
+ }
+
+ @VisibleForTesting
+ void setYarnSiteOutputStream(OutputStream out) {
+ this.yarnSiteOutputStream = out;
+ }
+
+ @VisibleForTesting
+ void setCapacitySchedulerConfigOutputStream(OutputStream out) {
+ this.capacacitySchedulerOutputStream = out;
+ }
+
+ private void emitDefaultMaxApplications() {
+ if (queueMaxAppsDefault != Integer.MAX_VALUE) {
+ capacacitySchedulerConfig.set(CapacitySchedulerConfiguration.MAXIMUM_SYSTEM_APPLICATIONS,
+ String.valueOf(queueMaxAppsDefault));
+ }
+ }
+
+ private void emitDefaultMaxAMshare() {
+ capacacitySchedulerConfig.set(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,
+ String.valueOf(queueMaxAMshareDefault));
+ }
+
+ @SuppressWarnings("deprecation")
+ private void convertSiteProperties(Configuration conf) {
+ yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName());
+
+ // TODO: deprecated property, check if necessary
+ if (conf.getBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,
+ FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_ENABLED)) {
+ yarnSiteConfig.set(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE,
+ "true");
+ int interval = conf.getInt(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS,
+ FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS);
+ yarnSiteConfig.set("yarn.scheduler.capacity.schedule-asynchronously.scheduling-interval-ms",
+ String.valueOf(interval));
+ }
+
+ String mbIncrementAllocation = conf.get("yarn.resource-types.memory-mb.increment-allocation");
+ if (mbIncrementAllocation != null) {
+ yarnSiteConfig.set("yarn.scheduler.minimum-allocation-mb",
+ String.valueOf(mbIncrementAllocation));
+ }
+
+ String vcoreIncrementAllocation = conf.get("yarn.resource-types.vcores.increment-allocation");
+ if (vcoreIncrementAllocation != null) {
+ yarnSiteConfig.set("yarn.scheduler.minimum-allocation-vcores",
+ String.valueOf(vcoreIncrementAllocation));
+ }
+
+ if (conf.getBoolean(FairSchedulerConfiguration.PREEMPTION,
+ FairSchedulerConfiguration.DEFAULT_PREEMPTION)) {
+ yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, "true");
+ preemptionEnabled = true;
+
+ int waitTimeBeforeKill = conf.getInt(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,
+ FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_KILL);
+ yarnSiteConfig.set(CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
+ String.valueOf(waitTimeBeforeKill));
+
+ long waitBeforeNextStarvationCheck = conf.getLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS,
+ FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS);
+ yarnSiteConfig.set(CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
+ String.valueOf(waitBeforeNextStarvationCheck));
+ }
+
+ if (conf.getBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,
+ FairSchedulerConfiguration.DEFAULT_ASSIGN_MULTIPLE)) {
+ yarnSiteConfig.set(CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, "true");
+ } else {
+ yarnSiteConfig.set(CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, "false");
+ }
+
+ int maxAssign = conf.getInt(FairSchedulerConfiguration.MAX_ASSIGN,
+ FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN);
+ if (maxAssign != -1) {
+ yarnSiteConfig.set(CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT,
+ String.valueOf(maxAssign));
+ }
+
+ float localityThresholdNode = conf.getFloat(FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE,
+ FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE);
+ if (localityThresholdNode != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE) {
+ yarnSiteConfig.set(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY,
+ String.valueOf(localityThresholdNode));
+ }
+
+ float localityThresholdRack = conf.getFloat(FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK,
+ FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK);
+ if (localityThresholdRack != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK) {
+ yarnSiteConfig.set(CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY,
+ String.valueOf(localityThresholdRack));
+ }
+
+ if (conf.getBoolean(FairSchedulerConfiguration.ALLOW_UNDECLARED_POOLS,
+ FairSchedulerConfiguration.DEFAULT_ALLOW_UNDECLARED_POOLS)) {
+ autoCreateChildQueues = true;
+ }
+
+ if (conf.getBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT,
+ FairSchedulerConfiguration.DEFAULT_SIZE_BASED_WEIGHT)) {
+ sizeBasedWeight = true;
+ }
+
+ if (conf.getBoolean(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,
+ FairSchedulerConfiguration.DEFAULT_USER_AS_DEFAULT_QUEUE)) {
+ userAsDefaultQueue = true;
+ }
+ }
+
+ private void convertQueue(FSQueue queue) {
+ List children = queue.getChildQueues();
+ final String queueName = queue.getName();
+
+ if (queue instanceof FSLeafQueue) {
+ String shortName = getQueueShortName(queueName);
+ if (!leafQueueNames.add(shortName)) {
+ throw new ConversionException(
+ "Leaf queues must be unique, "
+ + shortName + " is defined at least twice");
+ }
+ }
+
+ // generates yarn.scheduler.capacity..queues
+ emitChildQueues(queueName, children);
+
+ // ==> yarn.scheduler.capacity..maximum-am-resource-percent
+ emitMaxAMshare(queueName, queue);
+
+ // ==> yarn.scheduler.capacity..maximum-applications
+ emitMaxRunningApps(queueName, queue);
+
+ // ==> yarn.scheduler.capacity..maximum-allocation-mb/vcores
+ emitMaxAllocations(queueName, queue);
+
+ // ==> yarn.scheduler.capacity..disable_preemption
+ emitPreemptionDisabled(queueName, queue);
+
+ // TODO: COULD BE incorrect! Needs further clarifications
+ // weight + minResources ==> yarn.scheduler.capacity..capacity
+ emitChildCapacity(queue);
+
+ // ==> yarn.scheduler.capacity..maximum-capacity
+ emitMaximumCapacity(queueName, queue);
+
+ // yarn.scheduler.fair.allow-undeclared-pools ==> yarn.scheduler.capacity..auto-create-child-queue.enabled
+ emitAutoCreateChildQueue(queueName);
+
+ // yarn.scheduler.fair.sizebasedweight ==> yarn.scheduler.capacity..ordering-policy.fair.enable-size-based-weight
+ emitSizeBasedWeight(queueName);
+
+ // ==> yarn.scheduler.capacity..ordering-policy
+ emitOrderingPolicy(queueName, queue);
+
+ // missing feature, "leaf-queue-template.capacity" only accepts a single pct value
+ checkMaxChildCapacitySetting(queueName, queue);
+
+ for (FSQueue childQueue : children) {
+ convertQueue(childQueue);
+ }
+ }
+
+ private void emitChildQueues(String queueName, List children) {
+ StringBuilder childQueues = new StringBuilder();
+ ruleHandler.handleChildQueueCount(queueName, children.size());
+
+ children.stream().forEach(child -> {
+ if (childQueues.length() != 0) {
+ childQueues.append(",");
+ }
+
+ childQueues.append(getQueueShortName(child.getName()));
+ }
+ );
+
+ if (childQueues.length() > 0) {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName + ".queues",
+ childQueues.toString());
+ }
+ }
+
+ private void emitMaxAMshare(String queueName, FSQueue queue) {
+ float queueMaxAmShare = queue.getMaxAMShare();
+
+ if (queueMaxAmShare != 0.0f
+ && queueMaxAmShare != queueMaxAMshareDefault
+ && queueMaxAmShare != -1.0f) {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName + ".maximum-am-resource-percent",
+ String.valueOf(queueMaxAmShare));
+ }
+
+ if (queueMaxAmShare == -1.0f) {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName + ".maximum-am-resource-percent",
+ "1.0");
+ }
+ }
+
+ private void emitMaxRunningApps(String queueName, FSQueue queue) {
+ if (queue.getMaxRunningApps() != Integer.MAX_VALUE
+ && queue.getMaxRunningApps() != queueMaxAppsDefault) {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName + ".maximum-applications",
+ String.valueOf(queue.getMaxRunningApps()));
+ }
+ }
+
+ private void emitMaximumCapacity(String queueName, FSQueue queue) {
+ ConfigurableResource rawMaxShare = queue.getRawMaxShare();
+ final Resource maxResource = rawMaxShare.getResource();
+
+ long memSize = 0;
+ long vCores = 0;
+ boolean defined = false;
+
+ if (maxResource == null) {
+ if (rawMaxShare.getPercentages() != null) {
+ if (clusterResource == null) {
+ throw new ConversionException(
+ String.format(" defined in percentages for queue %s " +
+ "but cluster resource parameter is not defined via CLI!", queueName));
+ }
+
+ ruleHandler.handleMaxCapacityPercentage(queueName);
+
+ double[] percentages = rawMaxShare.getPercentages();
+ int memIndex = ResourceUtils.getResourceTypeIndex().get("memory-mb");
+ int vcoreIndex = ResourceUtils.getResourceTypeIndex().get("vcores");
+
+ memSize = (long) (percentages[memIndex] * clusterResource.getMemorySize());
+ vCores = (long) (percentages[vcoreIndex] * clusterResource.getVirtualCores());
+ defined = true;
+ } else {
+ throw new IllegalArgumentException("Illegal ConfigurableResource = " + rawMaxShare);
+ }
+ } else if (Resources.unbounded().compareTo(maxResource) != 0) {
+ memSize = maxResource.getMemorySize();
+ vCores = maxResource.getVirtualCores();
+ defined = true;
+ }
+
+ if (defined) {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName + ".maximum-capacity",
+ String.format("[memory=%d, vcores=%d]", memSize, vCores));
+ }
+ }
+
+ private void emitMaxAllocations(String queueName, FSQueue queue) {
+ Resource maxAllocation = queue.getMaximumContainerAllocation();
+
+ if (Resources.unbounded().compareTo(maxAllocation) != 0) {
+ long parentMaxVcores = Integer.MIN_VALUE;
+ long parentMaxMemory = Integer.MIN_VALUE;
+
+ if (queue.getParent() != null) {
+ FSQueue parent = queue.getParent();
+ Resource parentMaxAllocation = parent.getMaximumContainerAllocation();
+ if (Resources.unbounded().compareTo(parentMaxAllocation) != 0) {
+ parentMaxVcores = parentMaxAllocation.getVirtualCores();
+ parentMaxMemory = parentMaxAllocation.getMemorySize();
+ }
+ }
+
+ long maxVcores = maxAllocation.getVirtualCores();
+ long maxMemory = maxAllocation.getMemorySize();
+
+ // only emit max allocation if it differs from the parent's setting
+ if (maxVcores != parentMaxVcores || maxMemory != parentMaxMemory) {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName + ".maximum-allocation-mb",
+ String.valueOf(maxMemory));
+
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName + ".maximum-allocation-vcores",
+ String.valueOf(maxVcores));
+ }
+ }
+ }
+
+ private void emitPreemptionDisabled(String queueName, FSQueue queue) {
+ if (preemptionEnabled && !queue.isPreemptable()) {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName + ".disable_preemption",
+ "true");
+ }
+ }
+
+ private void emitAutoCreateChildQueue(String queueName) {
+ if (autoCreateChildQueues) {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName + ".auto-create-child-queue.enabled",
+ "true");
+ }
+ }
+
+ private void emitSizeBasedWeight(String queueName) {
+ if (sizeBasedWeight) {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName + ".ordering-policy.fair.enable-size-based-weight",
+ "true");
+ }
+ }
+
+ private void emitOrderingPolicy(String queueName, FSQueue queue) {
+ String policy = queue.getPolicy().getName();
+
+ switch (policy) {
+ case FairSharePolicy.NAME:
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName
+ + ".ordering-policy", FairSharePolicy.NAME);
+ fifoOrFairSharePolicyUsed = true;
+ break;
+ case FifoPolicy.NAME:
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queueName
+ + ".ordering-policy", FifoPolicy.NAME);
+ fifoOrFairSharePolicyUsed = true;
+ break;
+ case DominantResourceFairnessPolicy.NAME:
+ // DRF is not supported on a queue level,
+ // it has to be global
+ drfPolicyUsedOnQueueLevel = true;
+ break;
+ default:
+ throw new IllegalArgumentException("Unexpected policy: " + policy);
+ }
+ }
+
+ private void emitChildCapacity(FSQueue queue) {
+ List children = queue.getChildQueues();
+
+ int totalWeight = getTotalWeight(children);
+ Map capacities = getCapacities(totalWeight, children);
+ capacities.entrySet().stream()
+ .forEach(entry -> {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + entry.getKey() + ".capacity",
+ entry.getValue().toString());
+ });
+ }
+
+ private void emitACLs(FairScheduler fs) {
+ fs.getAllocationConfiguration().getQueueAcls()
+ .entrySet()
+ .stream()
+ .forEach(entry -> generateQueueAcl(entry.getKey(),
+ entry.getValue()));
+ }
+
+ private void generateQueueAcl(String queue, Map access) {
+ AccessControlList submitAcls = access.get(AccessType.SUBMIT_APP);
+ AccessControlList adminAcls = access.get(AccessType.ADMINISTER_QUEUE);
+
+ if (!submitAcls.getGroups().isEmpty() || !submitAcls.getUsers().isEmpty()) {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queue + ".acl_submit_applications",
+ submitAcls.getAclString());
+ }
+
+ if (!adminAcls.getGroups().isEmpty() || !adminAcls.getUsers().isEmpty()) {
+ capacacitySchedulerConfig.set("yarn.scheduler.capacity." + queue + ".acl_administer_queue",
+ adminAcls.getAclString());
+ }
+ }
+
+ PlacementManager getPlacementManager() {
+ return placementManager;
+ }
+
+ private String getQueueShortName(String queueName) {
+ int lastDot = queueName.lastIndexOf(".");
+ return queueName.substring(lastDot + 1, queueName.length());
+ }
+
+ private int getTotalWeight(List children) {
+ double sum = children
+ .stream()
+ .mapToDouble(c -> c.getWeight())
+ .sum();
+ return (int) sum;
+ }
+
+ private Map getCapacities(int totalWeight, List children) {
+ Map capacities = new HashMap<>();
+ Map bdCapacities = new HashMap<>();
+ final BigDecimal HUNDRED = new BigDecimal(100).setScale(3);
+
+ MutableBoolean needVerifySum = new MutableBoolean(true);
+ children
+ .stream()
+ .forEach(queue -> {
+ BigDecimal total = new BigDecimal(totalWeight);
+ BigDecimal weight = new BigDecimal(queue.getWeight());
+ BigDecimal pct = weight
+ .setScale(5)
+ .divide(total, RoundingMode.HALF_UP)
+ .multiply(HUNDRED)
+ .setScale(3);
+
+ // defined?
+ if (Resources.none().compareTo(queue.getMinShare()) != 0) {
+ needVerifySum.setFalse();
+
+ /* TODO: Needs discussion.
+ *
+ * Probably it's not entirely correct this way!
+ * Eg. root.queue1 in FS translates to 33%
+ * capacity, but minResources is defined as 1vcore,8GB
+ * which is less than 33%.
+ *
+ * Therefore max(calculatedCapacity, minResource) is
+ * more sound.
+ */
+ Resource minShare = queue.getMinShare();
+ String capacity = String.format("[memory=%d,vcores=%d]",
+ minShare.getMemorySize(), minShare.getVirtualCores());
+ capacities.put(queue.getName(), Capacity.newCapacity(capacity));
+ } else {
+ capacities.put(queue.getName(), Capacity.newCapacity(pct));
+ bdCapacities.put(queue.getName(), pct);
+ }
+ });
+
+ if (needVerifySum.isTrue()) {
+ BigDecimal totalPct = new BigDecimal(0);
+ for (Map.Entry entry : bdCapacities.entrySet()) {
+ totalPct = totalPct.add(entry.getValue());
+ }
+
+ // fix last value if total != 100.000
+ if (!totalPct.equals(HUNDRED)) {
+ if (children.size() > 1) {
+ BigDecimal tmp = new BigDecimal(0);
+ for (int i = 0; i < children.size() - 2; i++) {
+ tmp = tmp.add(bdCapacities.get(children.get(i).getQueueName()));
+ }
+
+ String lastQueue = children.get(children.size() - 1).getName();
+ BigDecimal corrected = HUNDRED.subtract(tmp);
+ capacities.put(lastQueue, Capacity.newCapacity(corrected));
+ } else if (children.size() == 1) {
+ // just a single element, probably this branch should
+ // not be reached
+ String queueName = children.get(0).getName();
+ capacities.put(queueName, Capacity.newCapacity(HUNDRED));
+ }
+ }
+ }
+
+ return capacities;
+ }
+
+ private void checkMaxChildCapacitySetting(String queueName, FSQueue queue) {
+ if (queue.getMaxChildQueueResource() != null) {
+ Resource resource = queue.getMaxChildQueueResource().getResource();
+
+ if ((resource != null && Resources.unbounded().compareTo(resource) != 0)
+ || queue.getMaxChildQueueResource().getPercentages() != null) {
+ // Maximum child resource is defined
+ ruleHandler.handleMaxChildCapacity();
+ }
+ }
+ }
+
+ private void checkReservationSystem(Configuration conf) {
+ if (conf.getBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE,
+ YarnConfiguration.DEFAULT_RM_RESERVATION_SYSTEM_ENABLE)) {
+ ruleHandler.handleReservationSystem();
+ }
+ }
+
+ private void checkUserMaxApps(AllocationConfiguration allocConf) {
+ if (allocConf.userMaxApps != null
+ && allocConf.userMaxApps.size() > 0) {
+ ruleHandler.handleUserMaxApps();
+ }
+ }
+
+ private void checkUserMaxAppsDefault(AllocationConfiguration allocConf) {
+ if (allocConf.getUserMaxAppsDefault() > 0) {
+ ruleHandler.handleUserMaxAppsDefault();
+ }
+ }
+
+ @VisibleForTesting
+ Resource getClusterResource() {
+ return clusterResource;
+ }
+
+ @VisibleForTesting
+ FSConfigToCSConfigRuleHandler getRuleHandler() {
+ return ruleHandler;
+ }
+
+ /*
+ * Represents a queue capacity in either percentage
+ * or in absolute resources
+ */
+ private static class Capacity {
+ BigDecimal percentage;
+ String absoluteResource;
+
+ public static Capacity newCapacity(BigDecimal pct) {
+ Capacity capacity = new Capacity();
+ capacity.percentage = pct;
+ capacity.absoluteResource = null;
+
+ return capacity;
+ }
+
+ public static Capacity newCapacity(String absoluteResource) {
+ Capacity capacity = new Capacity();
+ capacity.percentage = null;
+ capacity.absoluteResource = absoluteResource;
+
+ return capacity;
+ }
+
+ @Override
+ public String toString() {
+ if (percentage != null) {
+ return percentage.toString();
+ } else {
+ return absoluteResource;
+ }
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigConverterParams.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigConverterParams.java
new file mode 100644
index 00000000000..db13a43c3eb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigConverterParams.java
@@ -0,0 +1,125 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+public class FSConfigToCSConfigConverterParams {
+ private String yarnSiteXmlConfig;
+ private String fairSchedulerXmlConfig;
+ private String conversionRulesConfig;
+ private boolean console;
+ private String clusterResource;
+ private String outputDirectory;
+
+ private FSConfigToCSConfigConverterParams() {
+ //must use builder
+ }
+
+ public String getFairSchedulerXmlConfig() {
+ return fairSchedulerXmlConfig;
+ }
+
+ public String getYarnSiteXmlConfig() {
+ return yarnSiteXmlConfig;
+ }
+
+ public String getConversionRulesConfig() {
+ return conversionRulesConfig;
+ }
+
+ public String getClusterResource() {
+ return clusterResource;
+ }
+
+ public boolean isConsole() {
+ return console;
+ }
+
+ public String getOutputDirectory() {
+ return outputDirectory;
+ }
+
+ @Override
+ public String toString() {
+ return "FSConfigToCSConfigConverterParams{" +
+ "yarnSiteXmlConfig='" + yarnSiteXmlConfig + '\'' +
+ ", fairSchedulerXmlConfig='" + fairSchedulerXmlConfig + '\'' +
+ ", conversionRulesConfig='" + conversionRulesConfig + '\'' +
+ ", clusterResource='" + clusterResource + '\'' +
+ ", console=" + console +
+ '}';
+ }
+
+ public static final class Builder {
+ private String yarnSiteXmlConfig;
+ private String fairSchedulerXmlConfig;
+ private String conversionRulesConfig;
+ private boolean console;
+ private String clusterResource;
+ private String outputDirectory;
+
+ private Builder() {
+ }
+
+ public static Builder create() {
+ return new Builder();
+ }
+
+ public Builder withYarnSiteXmlConfig(String yarnSiteXmlConfig) {
+ this.yarnSiteXmlConfig = yarnSiteXmlConfig;
+ return this;
+ }
+
+ public Builder withFairSchedulerXmlConfig(String fairSchedulerXmlConfig) {
+ this.fairSchedulerXmlConfig = fairSchedulerXmlConfig;
+ return this;
+ }
+
+ public Builder withConversionRulesConfig(String conversionRulesConfig) {
+ this.conversionRulesConfig = conversionRulesConfig;
+ return this;
+ }
+
+ public Builder withClusterResource(String res) {
+ this.clusterResource = res;
+ return this;
+ }
+
+ public Builder withConsole(boolean console) {
+ this.console = console;
+ return this;
+ }
+
+ public Builder withOutputDirectory(String outputDir) {
+ this.outputDirectory = outputDir;
+ return this;
+ }
+
+ public FSConfigToCSConfigConverterParams build() {
+ FSConfigToCSConfigConverterParams params =
+ new FSConfigToCSConfigConverterParams();
+ params.clusterResource = this.clusterResource;
+ params.console = this.console;
+ params.fairSchedulerXmlConfig = this.fairSchedulerXmlConfig;
+ params.yarnSiteXmlConfig = this.yarnSiteXmlConfig;
+ params.conversionRulesConfig = this.conversionRulesConfig;
+ params.outputDirectory = this.outputDirectory;
+ return params;
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigRuleHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigRuleHandler.java
new file mode 100644
index 00000000000..36fd0bd33ed
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSConfigToCSConfigRuleHandler.java
@@ -0,0 +1,216 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+import static java.lang.String.format;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.commons.lang3.StringUtils;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class FSConfigToCSConfigRuleHandler {
+ private static final Logger LOG = LoggerFactory.getLogger(FSConfigToCSConfigRuleHandler.class);
+
+
+ public static final String MAX_CHILD_QUEUE_LIMIT =
+ "maxChildQueue.limit";
+
+ public static final String MAX_CAPACITY_PERCENTAGE =
+ "maxCapacityPercentage.action";
+
+ public static final String MAX_CHILD_CAPACITY =
+ "maxChildCapacity.action";
+
+ public static final String USER_MAX_RUNNING_APPS =
+ "userMaxRunningApps.action";
+
+ public static final String USER_MAX_APPS_DEFAULT =
+ "userMaxAppsDefault.action";
+
+ public static final String DYNAMIC_MAX_ASSIGN =
+ "dynamicMaxAssign.action";
+
+ public static final String SPECIFIED_NOT_FIRST =
+ "specifiedNotFirstRule.action";
+
+ public static final String RESERVATION_SYSTEM =
+ "reservationSystem.action";
+
+ public static final String QUEUE_AUTO_CREATE =
+ "queueAutoCreate.action";
+
+ @VisibleForTesting
+ enum RuleAction {
+ WARNING,
+ ABORT
+ }
+
+ private Map actions;
+ private Properties properties;
+
+ void loadRulesFromFile(String ruleFile) throws IOException {
+ if (ruleFile == null) {
+ throw new IllegalArgumentException("Rule file cannot be null!");
+ }
+
+ properties = new Properties();
+ File rules = new File(ruleFile);
+ properties.load(new FileInputStream(rules));
+ actions = new HashMap<>();
+ initPropertyActions();
+ }
+
+ public FSConfigToCSConfigRuleHandler() {
+ properties = new Properties();
+ actions = new HashMap<>();
+ }
+
+ @VisibleForTesting
+ FSConfigToCSConfigRuleHandler(Properties props) throws IOException {
+ properties = props;
+ actions = new HashMap<>();
+ initPropertyActions();
+ }
+
+ private void initPropertyActions() {
+ setActionForProperty(MAX_CAPACITY_PERCENTAGE);
+ setActionForProperty(MAX_CHILD_CAPACITY);
+ setActionForProperty(USER_MAX_RUNNING_APPS);
+ setActionForProperty(USER_MAX_APPS_DEFAULT);
+ setActionForProperty(DYNAMIC_MAX_ASSIGN);
+ setActionForProperty(SPECIFIED_NOT_FIRST);
+ setActionForProperty(RESERVATION_SYSTEM);
+ setActionForProperty(QUEUE_AUTO_CREATE);
+ }
+
+ public void handleMaxCapacityPercentage(String queueName) {
+ handle(MAX_CAPACITY_PERCENTAGE, null,
+ format(" defined in percentages for queue %s",
+ queueName));
+ }
+
+ public void handleMaxChildCapacity() {
+ handle(MAX_CHILD_CAPACITY, "", null);
+ }
+
+ public void handleChildQueueCount(String queue, int count) {
+ String value = properties.getProperty(MAX_CHILD_QUEUE_LIMIT);
+ if (value != null) {
+ if (StringUtils.isNumeric(value)) {
+ int maxChildQueue = Integer.parseInt(value);
+ if (count > maxChildQueue) {
+ throw new ConversionException(
+ format("Queue %s has too many children: %d", queue, count));
+ }
+ } else {
+ throw new IllegalArgumentException("maxChildQueue.limit is not an integer");
+ }
+ }
+ }
+
+ public void handleUserMaxApps() {
+ handle(USER_MAX_RUNNING_APPS, "", null);
+ }
+
+ public void handleUserMaxAppsDefault() {
+ handle(USER_MAX_APPS_DEFAULT, "", null);
+ }
+
+ public void handleDynamicMaxAssign() {
+ handle(DYNAMIC_MAX_ASSIGN,
+ FairSchedulerConfiguration.DYNAMIC_MAX_ASSIGN, null);
+ }
+
+ public void handleSecifiedNotFirstRule() {
+ handle(SPECIFIED_NOT_FIRST,
+ null,
+ "The tag is not the first placement rule, this cannot be"
+ + " converted properly");
+ }
+
+ public void handleReservationSystem() {
+ handle(RESERVATION_SYSTEM,
+ null,
+ "Conversion of reservation system is not supported");
+ }
+
+ public void handleQueueAutoCreate(String placementRule) {
+ handle(QUEUE_AUTO_CREATE,
+ null,
+ format(
+ "Placement rules: queue auto-create is not supported (type: %s)",
+ placementRule));
+ }
+
+ private void handle(String actionName, String fsSetting, String message) {
+ RuleAction action = actions.get(actionName);
+
+ if (action != null) {
+ switch (action) {
+ case ABORT:
+ String exceptionMessage;
+ if (message != null) {
+ exceptionMessage = message;
+ } else {
+ exceptionMessage = format("Setting %s is not supported", fsSetting);
+ }
+ throw new UnsupportedPropertyException(exceptionMessage);
+ case WARNING:
+ if (message != null) {
+ LOG.warn(message);
+ } else {
+ LOG.warn("Setting {} is not supported, ignoring conversion",
+ fsSetting);
+ }
+ break;
+ }
+ }
+ }
+
+ private void setActionForProperty(String property) {
+ String action = properties.getProperty(property);
+
+ if (action == null) {
+ LOG.info("No rule set for {}, defaulting to WARNING", property);
+ actions.put(property, RuleAction.WARNING);
+ } else if (action.equalsIgnoreCase("warning")) {
+ actions.put(property, RuleAction.WARNING);
+ } else if (action.equalsIgnoreCase("abort")) {
+ actions.put(property, RuleAction.ABORT);
+ } else {
+ LOG.warn("Unknown action {} set for rule {}, defaulting to WARNING",
+ action, property);
+ actions.put(property, RuleAction.WARNING);
+ }
+ }
+
+ @VisibleForTesting
+ public Map getActions() {
+ return actions;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index c22fdb05c22..1b428e8766d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -184,6 +184,10 @@ public Resource getMaxShare() {
return result;
}
+ ConfigurableResource getRawMaxShare() {
+ return maxShare;
+ }
+
public Resource getReservedResource() {
reservedResource.setMemorySize(metrics.getReservedMB());
reservedResource.setVirtualCores(metrics.getReservedVirtualCores());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 151a7ab0867..d8a304db662 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -212,6 +212,8 @@
// Container size threshold for making a reservation.
@VisibleForTesting
Resource reservationThreshold;
+
+ private boolean migration;
public FairScheduler() {
super(FairScheduler.class.getName());
@@ -1428,7 +1430,7 @@ private void initScheduler(Configuration conf) throws IOException {
allocConf = new AllocationConfiguration(this);
queueMgr.initialize();
- if (continuousSchedulingEnabled) {
+ if (continuousSchedulingEnabled && !migration) {
// Continuous scheduling is deprecated log it on startup
LOG.warn("Continuous scheduling is turned ON. It is deprecated " +
"because it can cause scheduler slowness due to locking issues. " +
@@ -1441,7 +1443,7 @@ private void initScheduler(Configuration conf) throws IOException {
schedulingThread.setDaemon(true);
}
- if (this.conf.getPreemptionEnabled()) {
+ if (this.conf.getPreemptionEnabled() && !migration) {
createPreemptionThread();
}
} finally {
@@ -1495,11 +1497,14 @@ private void startSchedulerThreads() {
@Override
public void serviceInit(Configuration conf) throws Exception {
+ migration = conf.getBoolean(FairSchedulerConfiguration.MIGRATION_MODE, false);
initScheduler(conf);
super.serviceInit(conf);
- // Initialize SchedulingMonitorManager
- schedulingMonitorManager.initialize(rmContext, conf);
+ if (!migration) {
+ // Initialize SchedulingMonitorManager
+ schedulingMonitorManager.initialize(rmContext, conf);
+ }
}
@Override
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index cfe07c9ef48..c595d8b7645 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -85,6 +85,8 @@
private static final String CONF_PREFIX = "yarn.scheduler.fair.";
+ public static final String MIGRATION_MODE = CONF_PREFIX + "migration.mode";
+
public static final String ALLOCATION_FILE = CONF_PREFIX + "allocation.file";
protected static final String DEFAULT_ALLOCATION_FILE = "fair-scheduler.xml";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementConverter.java
new file mode 100644
index 00000000000..f5f3c9dbce3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueuePlacementConverter.java
@@ -0,0 +1,112 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.server.resourcemanager.placement.DefaultPlacementRule;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.FSPlacementRule;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementRule;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.PrimaryGroupPlacementRule;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.SecondaryGroupExistingPlacementRule;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.SpecifiedPlacementRule;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.UserPlacementRule;
+
+class QueuePlacementConverter {
+
+ Map convertPlacementPolicy(PlacementManager placementManager,
+ FSConfigToCSConfigRuleHandler rulehandler, boolean userAsDefaultQueue) {
+ StringBuilder mapping = new StringBuilder();
+ Map properties = new HashMap<>();
+
+ if (userAsDefaultQueue) {
+ mapping.append("u:%user:%user");
+ }
+
+ int ruleCount = 0;
+ for (PlacementRule rule : placementManager.getPlacementRules()) {
+ if (((FSPlacementRule)rule).getCreateFlag()) {
+ rulehandler.handleQueueAutoCreate(rule.getName());
+ }
+
+ ruleCount++;
+ if (rule instanceof UserPlacementRule) {
+ UserPlacementRule userRule = (UserPlacementRule) rule;
+ if (mapping.length() > 0) {
+ mapping.append(";");
+ }
+
+ // nested rule
+ if (userRule.getParentRule() != null) {
+ PlacementRule pr = userRule.getParentRule();
+ if (pr instanceof PrimaryGroupPlacementRule) {
+ // TODO: wait for YARN-9841
+ mapping.append("u:%user:%primary_group.%user");
+ } else if (pr instanceof SecondaryGroupExistingPlacementRule) {
+ // TODO: wait for YARN-9865
+ mapping.append("u:%user:%secondary_group.%user");
+ } else if (pr instanceof DefaultPlacementRule) {
+ DefaultPlacementRule defaultRule = (DefaultPlacementRule) pr;
+ mapping.append("u:%user:").append(defaultRule.defaultQueueName).append(".%user");
+ } else {
+ throw new UnsupportedOperationException("Unsupported nested rule: " + pr.getClass().getCanonicalName());
+ }
+ } else {
+ if (!userAsDefaultQueue) {
+ mapping.append("u:%user:%user");
+ }
+ }
+ }
+
+ if (rule instanceof SpecifiedPlacementRule) {
+ if (ruleCount > 1) {
+ rulehandler.handleSecifiedNotFirstRule();
+ }
+ properties.put("yarn.scheduler.capacity.queue-mappings-override.enable", "false");
+ }
+
+ if (rule instanceof PrimaryGroupPlacementRule) {
+ if (mapping.length() > 0) {
+ mapping.append(";");
+ }
+ mapping.append("u:%user:%primary_group");
+ }
+
+ if (rule instanceof DefaultPlacementRule) {
+ DefaultPlacementRule defaultRule = (DefaultPlacementRule) rule;
+ if (mapping.length() > 0) {
+ mapping.append(";");
+ }
+ mapping.append("u:%user:").append(defaultRule.defaultQueueName);
+ }
+
+ if (rule instanceof SecondaryGroupExistingPlacementRule) {
+ // TODO: wait for YARN-9840
+ mapping.append("u:%user:%secondary_group");
+ }
+ }
+
+ if (mapping.length() > 0) {
+ properties.put("yarn.scheduler.capacity.queue-mappings", mapping.toString());
+ }
+
+ return properties;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/UnsupportedPropertyException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/UnsupportedPropertyException.java
new file mode 100644
index 00000000000..f0a2112afc3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/UnsupportedPropertyException.java
@@ -0,0 +1,9 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+public class UnsupportedPropertyException extends RuntimeException {
+ private static final long serialVersionUID = 5468104026818355871L;
+
+ public UnsupportedPropertyException(String message) {
+ super(message);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/FSConfigConverterTestCommons.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/FSConfigConverterTestCommons.java
new file mode 100644
index 00000000000..54cd3a4e729
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/FSConfigConverterTestCommons.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+
+public class FSConfigConverterTestCommons {
+ protected final static String TEST_DIR =
+ new File(System.getProperty("test.build.data", "/tmp")).getAbsolutePath();
+ public final static String FS_ALLOC_FILE =
+ new File(TEST_DIR, "test-fair-scheduler.xml").getAbsolutePath();
+ public final static String YARN_SITE_XML =
+ new File(TEST_DIR, "test-yarn-site.xml").getAbsolutePath();
+ public final static String CONVERSION_RULES_FILE =
+ new File(TEST_DIR, "test-conversion-rules.properties").getAbsolutePath();
+ public final static String OUTPUT_DIR = new File(TEST_DIR, "conversion-output").getAbsolutePath();
+
+ private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
+ private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
+
+ static {
+ new File(TEST_DIR, "conversion-output").mkdirs();
+ }
+
+ public static void tearDown() {
+ deleteTestFiles();
+ }
+
+ public FSConfigConverterTestCommons() {
+ setUpStreams();
+ }
+
+ private void setUpStreams() {
+ System.setOut(new PrintStream(outContent));
+ System.setErr(new PrintStream(errContent));
+ }
+
+ public static void restoreStreams() {
+ System.setOut(System.out);
+ System.setErr(System.err);
+ }
+
+ public ByteArrayOutputStream getErrContent() {
+ return errContent;
+ }
+
+ private static void deleteTestFiles() {
+ //Files may not be created so we are not strict here!
+ deleteFile(FS_ALLOC_FILE, false);
+ deleteFile(YARN_SITE_XML, false);
+ deleteFile(CONVERSION_RULES_FILE, false);
+ deleteFile(OUTPUT_DIR, false);
+ }
+
+ private static void deleteFile(String f, boolean strict) {
+ boolean delete = new File(f).delete();
+ if (strict && !delete) {
+ throw new RuntimeException("Can't delete test file: " + f);
+ }
+ }
+
+ static void setupFSConfigConversionFiles() throws IOException {
+ configureFairSchedulerXml();
+ configureYarnSiteXmlWithFsAllocFileDefined();
+ configureDummyConversionRulesFile();
+ }
+
+ public static void configureFairSchedulerXml() throws IOException {
+ PrintWriter out = new PrintWriter(new FileWriter(FS_ALLOC_FILE));
+ out.println("");
+ out.println("");
+ out.println("-1.0");
+ out.println("fair");
+ addQueue(out, "");
+ out.println("");
+ out.close();
+ }
+
+ private static void addQueue(PrintWriter out, String additionalConfig) {
+ out.println("");
+ out.println(" fair");
+ out.println(" 1.0");
+ out.println(" 100");
+ out.println(" 120");
+ out.println(" .5");
+
+ if (StringUtils.isNotEmpty(additionalConfig)) {
+ out.println(additionalConfig);
+ }
+ out.println("");
+ }
+
+ public static void configureInvalidFairSchedulerXml() throws IOException {
+ PrintWriter out = new PrintWriter(new FileWriter(FS_ALLOC_FILE));
+ out.println("");
+ out.println("");
+ out.println("");
+ out.println("");
+ out.close();
+ }
+
+ public static void configureEmptyFairSchedulerXml() throws IOException {
+ PrintWriter out = new PrintWriter(new FileWriter(FS_ALLOC_FILE));
+ out.println("");
+ out.println("");
+ out.close();
+ }
+
+ public static void configureYarnSiteXmlWithFsAllocFileDefined() throws IOException {
+ PrintWriter out = new PrintWriter(new FileWriter(YARN_SITE_XML));
+ out.println("");
+ out.println("");
+ out.println("" + FairSchedulerConfiguration.ALLOCATION_FILE + "");
+ out.println("" + FS_ALLOC_FILE + "");
+ out.println("");
+ out.close();
+ }
+
+ public static void configureInvalidYarnSiteXml() throws IOException {
+ PrintWriter out = new PrintWriter(new FileWriter(YARN_SITE_XML));
+ out.println("");
+ out.println("");
+ out.println("" + FairSchedulerConfiguration.ALLOCATION_FILE);
+ out.println("");
+ out.close();
+ }
+
+ public static void configureEmptyYarnSiteXml() throws IOException {
+ PrintWriter out = new PrintWriter(new FileWriter(YARN_SITE_XML));
+ out.println("");
+ out.println("");
+ out.close();
+ }
+
+ public static void configureDummyConversionRulesFile() throws IOException {
+ PrintWriter out = new PrintWriter(new FileWriter(CONVERSION_RULES_FILE));
+ out.println("dummy_key=dummy_value");
+ out.close();
+ }
+
+ public static void configureAbortingConversionRulesFile() throws IOException {
+ PrintWriter out = new PrintWriter(new FileWriter(CONVERSION_RULES_FILE));
+ out.println("maxCapacityPercentage.action=abort");
+ out.println("maxChildCapacity.action=ABORT");
+ out.println("userMaxRunningApps.action=abort");
+ out.println("userMaxAppsDefault.action=ABORT");
+ out.println("dynamicMaxAssign.action=abort");
+ out.println("specifiedNotFirstRule.action=ABORT");
+ out.println("reservationSystem.action=abort");
+ out.println("queueAutoCreate.action=ABORT");
+ out.close();
+ }
+
+ public static void configureInvalidConversionRulesFile() throws IOException {
+ PrintWriter out = new PrintWriter(new FileWriter(CONVERSION_RULES_FILE));
+ out.println("bla");
+ out.close();
+ }
+
+ public static void configureEmptyConversionRulesFile() throws IOException {
+ PrintWriter out = new PrintWriter(new FileWriter(CONVERSION_RULES_FILE));
+ out.close();
+ }
+
+
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index f2073aeb55b..48bc0daffc2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -18,17 +18,8 @@
package org.apache.hadoop.yarn.server.resourcemanager;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.http.lib.StaticUserWebFilter;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
@@ -47,6 +38,8 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigConverter;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigConverterParams;
import org.apache.hadoop.yarn.server.security.http.RMAuthenticationFilterInitializer;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.After;
@@ -55,11 +48,28 @@
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
+import org.mockito.ArgumentCaptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.CONVERSION_RULES_FILE;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.FS_ALLOC_FILE;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.OUTPUT_DIR;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.YARN_SITE_XML;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.setupFSConfigConversionFiles;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
public class TestResourceManager {
private static final Logger LOG =
LoggerFactory.getLogger(TestResourceManager.class);
-
+
private ResourceManager resourceManager = null;
@Rule
@@ -67,7 +77,7 @@
@Before
public void setUp() throws Exception {
- Configuration conf = new YarnConfiguration();
+ YarnConfiguration conf = new YarnConfiguration();
UserGroupInformation.setConfiguration(conf);
resourceManager = new ResourceManager();
resourceManager.init(conf);
@@ -78,6 +88,7 @@ public void setUp() throws Exception {
@After
public void tearDown() throws Exception {
resourceManager.stop();
+ FSConfigConverterTestCommons.tearDown();
}
private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
@@ -357,4 +368,65 @@ public void testUserProvidedUGIConf() throws Exception {
}
}
+ /**
+ * Example command:
+ * opt/hadoop/bin/yarn resourcemanager -convert-fs-configuration
+ * -o /tmp/output
+ * -y /opt/hadoop/etc/hadoop/yarn-site.xml
+ * -f /opt/hadoop/etc/hadoop/fair-scheduler.xml
+ * -r /home/systest/sample-rules-config.properties
+ */
+ @Test
+ public void testResourceManagerConvertFSConfigurationDefaults() throws Exception {
+ setupFSConfigConversionFiles();
+
+ ArgumentCaptor conversionParams =
+ ArgumentCaptor.forClass(FSConfigToCSConfigConverterParams.class);
+
+ final String mainSwitch = "-convert-fs-configuration";
+ FSConfigToCSConfigConverter mockConverter =
+ mock(FSConfigToCSConfigConverter.class);
+
+ ResourceManager.initFSArgumentHandler(mockConverter);
+ ResourceManager.main(new String[] {mainSwitch, "-o", OUTPUT_DIR,
+ "-y", YARN_SITE_XML, "-f", FS_ALLOC_FILE, "-r", CONVERSION_RULES_FILE});
+
+ // validate params
+ verify(mockConverter).convert(conversionParams.capture());
+ FSConfigToCSConfigConverterParams params = conversionParams.getValue();
+ LOG.info("FS config converter parameters: " + params);
+
+ assertThat(params.getYarnSiteXmlConfig()).isEqualTo(YARN_SITE_XML);
+ assertThat(params.getFairSchedulerXmlConfig()).isEqualTo(FS_ALLOC_FILE);
+ assertThat(params.getConversionRulesConfig()).isEqualTo(CONVERSION_RULES_FILE);
+ assertThat(params.isConsole()).isEqualTo(false);
+ }
+
+ @Test
+ public void testResourceManagerConvertFSConfigurationWithConsoleParam()
+ throws Exception {
+ setupFSConfigConversionFiles();
+
+ ArgumentCaptor conversionParams =
+ ArgumentCaptor.forClass(FSConfigToCSConfigConverterParams.class);
+
+ final String mainSwitch = "-convert-fs-configuration";
+ FSConfigToCSConfigConverter mockConverter =
+ mock(FSConfigToCSConfigConverter.class);
+
+ ResourceManager.initFSArgumentHandler(mockConverter);
+ ResourceManager.main(new String[] {mainSwitch, "-o", OUTPUT_DIR, "-p", "-y",
+ YARN_SITE_XML, "-f", FS_ALLOC_FILE, "-r", CONVERSION_RULES_FILE});
+
+ // validate params
+ verify(mockConverter).convert(conversionParams.capture());
+ FSConfigToCSConfigConverterParams params = conversionParams.getValue();
+ LOG.info("FS config converter parameters: " + params);
+
+ assertThat(params.getYarnSiteXmlConfig()).isEqualTo(YARN_SITE_XML);
+ assertThat(params.getFairSchedulerXmlConfig()).isEqualTo(FS_ALLOC_FILE);
+ assertThat(params.getConversionRulesConfig()).isEqualTo(CONVERSION_RULES_FILE);
+ assertThat(params.isConsole()).isEqualTo(true);
+ }
+
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSConfigToCSConfigArgumentHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSConfigToCSConfigArgumentHandler.java
new file mode 100644
index 00000000000..fcae92d60b7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSConfigToCSConfigArgumentHandler.java
@@ -0,0 +1,340 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+import com.google.common.collect.Lists;
+import org.apache.commons.cli.MissingOptionException;
+import org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.mockito.ArgumentCaptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.CONVERSION_RULES_FILE;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.FS_ALLOC_FILE;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.OUTPUT_DIR;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.YARN_SITE_XML;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.configureDummyConversionRulesFile;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.configureEmptyConversionRulesFile;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.configureEmptyFairSchedulerXml;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.configureEmptyYarnSiteXml;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.configureFairSchedulerXml;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.configureInvalidConversionRulesFile;
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.configureYarnSiteXmlWithFsAllocFileDefined;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+
+
+public class TestFSConfigToCSConfigArgumentHandler {
+ private static final Logger LOG = LoggerFactory.getLogger(TestFSConfigToCSConfigArgumentHandler.class);
+
+ @Rule
+ public ExpectedException expectedException = ExpectedException.none();
+
+ private FSConfigToCSConfigConverter mockConverter;
+ private FSConfigConverterTestCommons fsTestCommons;
+
+ @Before
+ public void setUp() {
+ fsTestCommons = new FSConfigConverterTestCommons();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ FSConfigConverterTestCommons.tearDown();
+ }
+
+
+ private void setupFSConfigConversionFiles(boolean defineAllocationFile) throws IOException {
+ configureFairSchedulerXml();
+
+ if (defineAllocationFile) {
+ configureYarnSiteXmlWithFsAllocFileDefined();
+ } else {
+ configureEmptyYarnSiteXml();
+ }
+ configureDummyConversionRulesFile();
+ }
+
+
+ private FSConfigToCSConfigArgumentHandler createArgumentHandler() {
+ mockConverter = mock(FSConfigToCSConfigConverter.class);
+ return new FSConfigToCSConfigArgumentHandler(mockConverter);
+ }
+
+ private static String[] getDefaultArgumentsAsArray() {
+ List args = getDefaultArguments();
+ return args.toArray(new String[0]);
+ }
+
+ private static List getDefaultArguments() {
+ return Lists.newArrayList("-y", YARN_SITE_XML, "-o", OUTPUT_DIR);
+ }
+
+ private String[] getArgumentsAsArrayWithDefaults(String... args) {
+ List result = getDefaultArguments();
+ result.addAll(Arrays.asList(args));
+ return result.toArray(new String[0]);
+ }
+
+ private String[] getArgumentsAsArray(String... args) {
+ List result = Lists.newArrayList();
+ result.addAll(Arrays.asList(args));
+ return result.toArray(new String[0]);
+ }
+
+ @Test
+ public void testMissingYarnSiteXmlArgument() throws Exception {
+ setupFSConfigConversionFiles(true);
+
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+
+ String[] args = new String[] {"-o", OUTPUT_DIR};
+
+ expectedException.expect(MissingOptionException.class);
+ expectedException.expectMessage("Missing required option: y");
+
+ argumentHandler.parseAndConvert(args);
+ }
+
+
+ @Test
+ public void testMissingFairSchedulerXmlArgument() throws Exception {
+ setupFSConfigConversionFiles(true);
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+
+ argumentHandler.parseAndConvert(getDefaultArgumentsAsArray());
+ }
+
+ @Test
+ public void testMissingOutputDirArgument() throws Exception {
+ setupFSConfigConversionFiles(true);
+
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+
+ String[] args = new String[] {"-y", YARN_SITE_XML};
+
+ expectedException.expect(MissingOptionException.class);
+ expectedException.expectMessage("Missing required option: o");
+
+ argumentHandler.parseAndConvert(args);
+ }
+
+ @Test
+ public void testMissingRulesConfiguration() throws Exception {
+ setupFSConfigConversionFiles(true);
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+
+ argumentHandler.parseAndConvert(getDefaultArgumentsAsArray());
+ }
+
+ @Test
+ public void testInvalidRulesConfigFile() throws Exception {
+ configureYarnSiteXmlWithFsAllocFileDefined();
+ configureFairSchedulerXml();
+ configureInvalidConversionRulesFile();
+
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+
+ String[] args = getArgumentsAsArrayWithDefaults();
+ argumentHandler.parseAndConvert(args);
+ }
+
+ @Test
+ public void testInvalidOutputDir() throws Exception {
+ configureYarnSiteXmlWithFsAllocFileDefined();
+ configureFairSchedulerXml();
+ configureDummyConversionRulesFile();
+
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+
+ String[] args = getArgumentsAsArray("-y", YARN_SITE_XML, "-o", YARN_SITE_XML);
+
+ expectedException.expect(IllegalArgumentException.class);
+ expectedException.expectMessage("is not a directory");
+ argumentHandler.parseAndConvert(args);
+ }
+
+ @Test
+ public void testFairSchedulerXmlIsNotDefinedIfItsDefinedInYarnSiteXml() throws Exception {
+ setupFSConfigConversionFiles(true);
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+ argumentHandler.parseAndConvert(getDefaultArgumentsAsArray());
+ }
+
+ @Test
+ public void testEmptyYarnSiteXmlSpecified() throws Exception {
+ configureFairSchedulerXml();
+ configureEmptyYarnSiteXml();
+ configureDummyConversionRulesFile();
+
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+
+ String[] args = getArgumentsAsArrayWithDefaults("-f", FS_ALLOC_FILE);
+ argumentHandler.parseAndConvert(args);
+ }
+
+ @Test
+ public void testEmptyFairSchedulerXmlSpecified() throws Exception {
+ configureEmptyFairSchedulerXml();
+ configureEmptyYarnSiteXml();
+ configureDummyConversionRulesFile();
+
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+
+ String[] args = getArgumentsAsArrayWithDefaults("-f", FS_ALLOC_FILE);
+ argumentHandler.parseAndConvert(args);
+ }
+
+ @Test
+ public void testEmptyRulesConfigurationSpecified() throws Exception {
+ configureEmptyFairSchedulerXml();
+ configureEmptyYarnSiteXml();
+ configureEmptyConversionRulesFile();
+
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+
+ String[] args = getArgumentsAsArrayWithDefaults("-f", FS_ALLOC_FILE,
+ "-r", CONVERSION_RULES_FILE);
+ argumentHandler.parseAndConvert(args);
+ }
+
+ @Test
+ public void testConvertFSConfigurationDefaults() throws Exception {
+ setupFSConfigConversionFiles(true);
+
+ ArgumentCaptor conversionParams =
+ ArgumentCaptor.forClass(FSConfigToCSConfigConverterParams.class);
+
+ FSConfigToCSConfigConverter mockConverter =
+ mock(FSConfigToCSConfigConverter.class);
+
+ FSConfigToCSConfigArgumentHandler argumentHandler =
+ new FSConfigToCSConfigArgumentHandler(mockConverter);
+
+ String[] args = getArgumentsAsArrayWithDefaults("-f", FS_ALLOC_FILE,
+ "-r", CONVERSION_RULES_FILE);
+ argumentHandler.parseAndConvert(args);
+
+ // validate params
+ verify(mockConverter).convert(conversionParams.capture());
+ FSConfigToCSConfigConverterParams params = conversionParams.getValue();
+ LOG.info("FS config converter parameters: " + params);
+
+ assertThat(params.getYarnSiteXmlConfig()).isEqualTo(YARN_SITE_XML);
+ assertThat(params.getFairSchedulerXmlConfig()).isEqualTo(FS_ALLOC_FILE);
+ assertThat(params.getConversionRulesConfig()).isEqualTo(CONVERSION_RULES_FILE);
+ assertThat(params.isConsole()).isEqualTo(false);
+ }
+
+ @Test
+ public void testConvertFSConfigurationWithConsoleParam() throws Exception {
+ setupFSConfigConversionFiles(true);
+
+ ArgumentCaptor conversionParams =
+ ArgumentCaptor.forClass(FSConfigToCSConfigConverterParams.class);
+
+ FSConfigToCSConfigConverter mockConverter =
+ mock(FSConfigToCSConfigConverter.class);
+
+ FSConfigToCSConfigArgumentHandler argumentHandler =
+ new FSConfigToCSConfigArgumentHandler(mockConverter);
+
+ String[] args = getArgumentsAsArrayWithDefaults("-f", FS_ALLOC_FILE,
+ "-r", CONVERSION_RULES_FILE, "-p");
+ argumentHandler.parseAndConvert(args);
+
+ // validate params
+ verify(mockConverter).convert(conversionParams.capture());
+ FSConfigToCSConfigConverterParams params = conversionParams.getValue();
+ LOG.info("FS config converter parameters: " + params);
+
+ assertThat(params.getYarnSiteXmlConfig()).isEqualTo(YARN_SITE_XML);
+ assertThat(params.getFairSchedulerXmlConfig()).isEqualTo(FS_ALLOC_FILE);
+ assertThat(params.getConversionRulesConfig()).isEqualTo(CONVERSION_RULES_FILE);
+ assertThat(params.isConsole()).isEqualTo(true);
+ }
+
+ @Test
+ public void testConvertFSConfigurationClusterResource() throws Exception {
+ setupFSConfigConversionFiles(true);
+
+ ArgumentCaptor conversionParams =
+ ArgumentCaptor.forClass(FSConfigToCSConfigConverterParams.class);
+
+ FSConfigToCSConfigConverter mockConverter =
+ mock(FSConfigToCSConfigConverter.class);
+
+ FSConfigToCSConfigArgumentHandler argumentHandler =
+ new FSConfigToCSConfigArgumentHandler(mockConverter);
+
+ String[] args = getArgumentsAsArrayWithDefaults("-f", FS_ALLOC_FILE,
+ "-r", CONVERSION_RULES_FILE, "-p", "-c", "vcores=20, memory-mb=240");
+ argumentHandler.parseAndConvert(args);
+
+ // validate params
+ verify(mockConverter).convert(conversionParams.capture());
+ FSConfigToCSConfigConverterParams params = conversionParams.getValue();
+ LOG.info("FS config converter parameters: " + params);
+
+ assertThat(params.getYarnSiteXmlConfig()).isEqualTo(YARN_SITE_XML);
+ assertThat(params.getFairSchedulerXmlConfig()).isEqualTo(FS_ALLOC_FILE);
+ assertThat(params.getConversionRulesConfig()).isEqualTo(CONVERSION_RULES_FILE);
+ assertThat(params.getClusterResource()).isEqualTo("vcores=20, memory-mb=240");
+ assertThat(params.isConsole()).isEqualTo(true);
+ }
+
+ @Test
+ public void testConvertFSConfigurationErrorHandling() throws Exception {
+ setupFSConfigConversionFiles(true);
+
+ String[] args = getArgumentsAsArrayWithDefaults("-f", FS_ALLOC_FILE,
+ "-r", CONVERSION_RULES_FILE, "-p");
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+
+ doThrow(UnsupportedPropertyException.class).when(mockConverter).convert(any(FSConfigToCSConfigConverterParams.class));
+ argumentHandler.parseAndConvert(args);
+ assertThat(fsTestCommons.getErrContent().toString()).contains("Fatal error during conversion!");
+ }
+
+ @Test
+ public void testConvertFSConfigurationErrorHandling2() throws Exception {
+ setupFSConfigConversionFiles(true);
+
+ String[] args = getArgumentsAsArrayWithDefaults("-f", FS_ALLOC_FILE,
+ "-r", CONVERSION_RULES_FILE, "-p");
+ FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler();
+
+ doThrow(ConversionException.class).when(mockConverter).convert(any(FSConfigToCSConfigConverterParams.class));
+ argumentHandler.parseAndConvert(args);
+ assertThat(fsTestCommons.getErrContent().toString()).contains("Error occurred during property conversion");
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSConfigToCSConfigConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSConfigToCSConfigConverter.java
new file mode 100644
index 00000000000..105133bced7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSConfigToCSConfigConverter.java
@@ -0,0 +1,791 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.FSConfigConverterTestCommons.OUTPUT_DIR;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.DYNAMIC_MAX_ASSIGN;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.MAX_CAPACITY_PERCENTAGE;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.MAX_CHILD_CAPACITY;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.QUEUE_AUTO_CREATE;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.RESERVATION_SYSTEM;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.SPECIFIED_NOT_FIRST;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.USER_MAX_APPS_DEFAULT;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.USER_MAX_RUNNING_APPS;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.RuleAction.ABORT;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doThrow;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import com.google.common.collect.Sets;
+
+@RunWith(MockitoJUnitRunner.class)
+public class TestFSConfigToCSConfigConverter {
+ @Mock
+ private FSConfigToCSConfigRuleHandler ruleHandler;
+
+ private FSConfigToCSConfigConverter converter;
+ private Configuration config;
+ private final Resource CLUSTER_RESOURCE = Resource.newInstance(16384, 16);
+ private ByteArrayOutputStream yarnSiteOut;
+ private ByteArrayOutputStream csConfigOut;
+
+ @Rule
+ public ExpectedException expectedException = ExpectedException.none();
+
+ private final static Set ALL_QUEUES =
+ Sets.newHashSet("root",
+ "root.default",
+ "root.admins",
+ "root.users",
+ "root.admins.alice",
+ "root.admins.bob",
+ "root.users.joe",
+ "root.users.john");
+
+ private static final String FILE_PREFIX = "file:";
+ private static final String FAIR_SCHEDULER_XML = prepareFileName("fair-scheduler.xml");
+
+ private static String prepareFileName(String f) {
+ return FILE_PREFIX + new File("src/test/resources/" + f).getAbsolutePath();
+ }
+
+ private static final String FAIR_SCHEDULER_XML_INVALID = prepareFileName("fair-scheduler-invalid.xml");
+ private static final String YARN_SITE_XML = prepareFileName("yarn-site-with-allocation-file-ref.xml");
+ private static final String YARN_SITE_XML_NO_REF_TO_FS_XML = prepareFileName("yarn-site.xml");
+ private static final String YARN_SITE_XML_INVALID = prepareFileName("yarn-site-with-invalid-allocation-file-ref.xml");
+ private static final String CONVERSION_RULES_FILE = new File("src/test/resources/conversion-rules.properties").getAbsolutePath();
+
+ @Before
+ public void setup() {
+ config = new Configuration(false);
+ config.set(FairSchedulerConfiguration.ALLOCATION_FILE, FAIR_SCHEDULER_XML);
+ createConverter(ruleHandler);
+ }
+
+ private void createConverter(FSConfigToCSConfigRuleHandler ruleHandler) {
+ converter = new FSConfigToCSConfigConverter(ruleHandler);
+ yarnSiteOut = new ByteArrayOutputStream();
+ csConfigOut = new ByteArrayOutputStream();
+
+ converter.setCapacitySchedulerConfigOutputStream(csConfigOut);
+ converter.setYarnSiteOutputStream(yarnSiteOut);
+ }
+
+ private FSConfigToCSConfigConverterParams.Builder createDefaultParamsBuilder() {
+ return FSConfigToCSConfigConverterParams.Builder.create()
+ .withYarnSiteXmlConfig(YARN_SITE_XML)
+ .withOutputDirectory(OUTPUT_DIR);
+ }
+
+ private FSConfigToCSConfigConverterParams.Builder createParamsBuilder(String yarnSiteConfig) {
+ return FSConfigToCSConfigConverterParams.Builder.create()
+ .withYarnSiteXmlConfig(yarnSiteConfig)
+ .withOutputDirectory(OUTPUT_DIR);
+ }
+
+ @Test
+ public void testDefaultMaxApplications() throws Exception {
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+ int maxApps =
+ conf.getInt(CapacitySchedulerConfiguration.MAXIMUM_SYSTEM_APPLICATIONS, -1);
+
+ assertEquals("Default max apps", 15, maxApps);
+ }
+
+ @Test
+ public void testDefaultMaxAMShare() throws Exception {
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+ String maxAmShare =
+ conf.get(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT);
+
+ assertEquals("Default max AM share", "0.16", maxAmShare);
+ }
+
+ @Test
+ public void testConvertACLs() throws Exception {
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ // root
+ assertEquals("root submit ACL", "alice,bob,joe,john hadoop_users",
+ conf.get(PREFIX + "root.acl_submit_applications"));
+ assertEquals("root admin ACL", "alice,bob,joe,john hadoop_users",
+ conf.get(PREFIX + "root.acl_administer_queue"));
+
+ // root.admins.bob
+ assertEquals("root.admis.bob submit ACL", "bob ",
+ conf.get(PREFIX + "root.admins.bob.acl_submit_applications"));
+ assertEquals("root.admins.bob admin ACL", "bob ",
+ conf.get(PREFIX + "root.admins.bob.acl_administer_queue"));
+
+ // root.admins.alice
+ assertEquals("root.admis.alice submit ACL", "alice ",
+ conf.get(PREFIX + "root.admins.alice.acl_submit_applications"));
+ assertEquals("root.admins.alice admin ACL", "alice ",
+ conf.get(PREFIX + "root.admins.alice.acl_administer_queue"));
+
+ // root.users.john
+ assertEquals("root.users.john submit ACL", "john ",
+ conf.get(PREFIX + "root.users.john.acl_submit_applications"));
+ assertEquals("root.users.john admin ACL", "john ",
+ conf.get(PREFIX + "root.users.john.acl_administer_queue"));
+
+ // root.users.joe
+ assertEquals("root.users.joe submit ACL", "joe ",
+ conf.get(PREFIX + "root.users.joe.acl_submit_applications"));
+ assertEquals("root.users.joe admin ACL", "joe ",
+ conf.get(PREFIX + "root.users.joe.acl_administer_queue"));
+ }
+
+ @Test
+ public void testConvertQueueHierarchy() throws Exception {
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ // root children
+ assertEquals("root children", "default,admins,users",
+ conf.get(PREFIX + "root.queues"));
+
+ // root.admins children
+ assertEquals("root.admins children", "bob,alice",
+ conf.get(PREFIX + "root.admins.queues"));
+
+ // root.default children - none
+ assertNull("root.default children", conf.get(PREFIX + "root.default" +
+ ".queues"));
+
+ // root.users children
+ assertEquals("root.users children", "john,joe",
+ conf.get(PREFIX + "root.users.queues"));
+
+ Set leafs = Sets.difference(ALL_QUEUES,
+ Sets.newHashSet("root",
+ "root.default",
+ "root.admins",
+ "root.users"));
+
+ assertNoValueForQueues(leafs, ".queues", conf);
+ }
+
+ @Test
+ public void testConvertQueueHierarchyWithSameLeafQueues() throws Exception {
+ expectedException.expect(ConversionException.class);
+ expectedException.expectMessage("Leaf queues must be unique");
+
+ String absolutePath =
+ new File("src/test/resources/fair-scheduler-sameleafqueue.xml")
+ .getAbsolutePath();
+
+ config.set(FairSchedulerConfiguration.ALLOCATION_FILE,
+ FILE_PREFIX + absolutePath);
+
+ converter.convert(config, CLUSTER_RESOURCE);
+ }
+
+ @Test
+ public void testQueueMaxAMShare() throws Exception {
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ // default setting
+ assertEquals("Default AM share", "0.16",
+ conf.get(PREFIX + "maximum-am-resource-percent"));
+
+ // root.admins.bob
+ assertEquals("root.admins.bob AM share", "1.0",
+ conf.get(PREFIX + "root.admins.bob.maximum-am-resource-percent"));
+
+ // root.admins.alice
+ assertEquals("root.admins.alice AM share", "0.15",
+ conf.get(PREFIX + "root.admins.alice.maximum-am-resource-percent"));
+
+ Set remaining = Sets.difference(ALL_QUEUES,
+ Sets.newHashSet("root.admins.bob", "root.admins.alice"));
+ assertNoValueForQueues(remaining, ".maximum-am-resource-percent", conf);
+ }
+
+ @Test
+ public void testQueueMaxRunningApps() throws Exception {
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ // default setting
+ assertEquals("Default max apps", 15,
+ conf.getInt(PREFIX + "maximum-applications", -1));
+
+ // root.admins.alice
+ assertEquals("root.admins.alice max apps", 2,
+ conf.getInt(PREFIX + "root.admins.alice.maximum-applications", -1));
+
+ Set remaining = Sets.difference(ALL_QUEUES,
+ Sets.newHashSet("root.admins.alice"));
+ assertNoValueForQueues(remaining, ".maximum-applications", conf);
+ }
+
+ @Test
+ public void testQueueMaxAllocations() throws Exception {
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ // root.admins vcores + mb
+ assertEquals("root.admins max vcores", 3,
+ conf.getInt(PREFIX + "root.admins.maximum-allocation-vcores", -1));
+ assertEquals("root.admins max memory", 4096,
+ conf.getInt(PREFIX + "root.admins.maximum-allocation-mb", -1));
+
+ // root.users.john max vcores + mb
+ assertEquals("root.users.john max vcores", 2,
+ conf.getInt(PREFIX + "root.users.john.maximum-allocation-vcores", -1));
+ assertEquals("root.users.john max memory", 8192,
+ conf.getInt(PREFIX + "root.users.john.maximum-allocation-mb", -1));
+
+ Set remaining = Sets.difference(ALL_QUEUES,
+ Sets.newHashSet("root.admins", "root.users.john"));
+ assertNoValueForQueues(remaining, ".maximum-allocation-vcores", conf);
+ assertNoValueForQueues(remaining, ".maximum-allocation-mb", conf);
+ }
+
+ @Test
+ public void testQueuePreemptionDisabled() throws Exception {
+ config.setBoolean(FairSchedulerConfiguration.PREEMPTION, true);
+
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ assertEquals("root.admins.alice preemption setting", true,
+ conf.getBoolean(PREFIX + "root.admins.alice.disable_preemption",
+ false));
+ assertEquals("root.users.joe preemption setting", true,
+ conf.getBoolean(PREFIX + "root.users.joe.disable_preemption", false));
+
+ Set remaining = Sets.difference(ALL_QUEUES,
+ Sets.newHashSet("root.admins.alice", "root.users.joe"));
+ assertNoValueForQueues(remaining, ".disable_preemption", conf);
+ }
+
+ @Test
+ public void testQueuePreemptionDisabledWhenGlobalPreemptionDisabled()
+ throws Exception {
+ config.setBoolean(FairSchedulerConfiguration.PREEMPTION, false);
+
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ assertNoValueForQueues(ALL_QUEUES, ".disable_preemption", conf);
+ }
+
+ @Test
+ public void testChildCapacity() throws Exception {
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ // root
+ assertEquals("root.default capacity", "33.333",
+ conf.get(PREFIX + "root.default.capacity"));
+ assertEquals("root.admins capacity", "33.333",
+ conf.get(PREFIX + "root.admins.capacity"));
+ assertEquals("root.users capacity", "66.667",
+ conf.get(PREFIX + "root.users.capacity"));
+
+ // root.users
+ assertEquals("root.users.john capacity", "25.000",
+ conf.get(PREFIX + "root.users.john.capacity"));
+ assertEquals("root.users.joe capacity", "75.000",
+ conf.get(PREFIX + "root.users.joe.capacity"));
+
+ // root.admins
+ assertEquals("root.admins.alice capacity", "75.000",
+ conf.get(PREFIX + "root.admins.alice.capacity"));
+ assertEquals("root.admins.bob capacity", "25.000",
+ conf.get(PREFIX + "root.admins.bob.capacity"));
+ }
+
+ @Test
+ public void testQueueMaximumCapacity() throws Exception {
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ assertEquals("root.users.joe maximum capacity", "[memory=8192, vcores=8]",
+ conf.get(PREFIX + "root.users.joe.maximum-capacity"));
+ assertEquals("root.admins.bob maximum capacity", "[memory=8192, vcores=2]",
+ conf.get(PREFIX + "root.admins.bob.maximum-capacity"));
+ assertEquals("root.admins.alice maximum capacity",
+ "[memory=16384, vcores=4]",
+ conf.get(PREFIX + "root.admins.alice.maximum-capacity"));
+
+ Set remaining = Sets.difference(ALL_QUEUES,
+ Sets.newHashSet("root.users.joe",
+ "root.admins.bob",
+ "root.admins.alice"));
+ assertNoValueForQueues(remaining, ".maximum-capacity", conf);
+ }
+
+ @Test
+ public void testQueueAutoCreateChildQueue() throws Exception {
+ config.setBoolean(FairSchedulerConfiguration.ALLOW_UNDECLARED_POOLS, true);
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ assertTrueForQueues(ALL_QUEUES, ".auto-create-child-queue.enabled", conf);
+ }
+
+ @Test
+ public void testQueueSizeBasedWeightEnabled() throws Exception {
+ config.setBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT, true);
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ assertTrueForQueues(ALL_QUEUES,
+ ".ordering-policy.fair.enable-size-based-weight", conf);
+ }
+
+ @Test
+ public void testQueueSizeBasedWeightDisabled() throws Exception {
+ config.setBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT, false);
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ assertNoValueForQueues(ALL_QUEUES,
+ ".ordering-policy.fair.enable-size-based-weight", conf);
+ }
+
+ @Test
+ public void testQueueOrderingPolicy() throws Exception {
+ String absolutePath =
+ new File("src/test/resources/fair-scheduler-orderingpolicy.xml").getAbsolutePath();
+ config.set(FairSchedulerConfiguration.ALLOCATION_FILE,
+ FILE_PREFIX + absolutePath);
+
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedCSconfig();
+
+ // root
+ assertEquals("root ordering policy", "fair",
+ conf.get(PREFIX + "root.ordering-policy"));
+ assertEquals("root.default ordering policy", "fair",
+ conf.get(PREFIX + "root.default.ordering-policy"));
+ assertEquals("root.admins ordering policy", "fair",
+ conf.get(PREFIX + "root.admins.ordering-policy"));
+ assertEquals("root.users ordering policy", "fair",
+ conf.get(PREFIX + "root.users.ordering-policy"));
+
+ // root.users
+ assertEquals("root.users.joe ordering policy", "fair",
+ conf.get(PREFIX + "root.users.joe.ordering-policy"));
+ assertEquals("root.users.john ordering policy", "FIFO",
+ conf.get(PREFIX + "root.users.john.ordering-policy"));
+
+ // root.admins
+ assertEquals("root.admins.alice ordering policy", "FIFO",
+ conf.get(PREFIX + "root.admins.alice.ordering-policy"));
+ assertEquals("root.admins.bob ordering policy", "fair",
+ conf.get(PREFIX + "root.admins.bob.ordering-policy"));
+ }
+
+ @Test
+ public void testMixedQueueOrderingPolicy() throws Exception {
+ expectedException.expect(ConversionException.class);
+ expectedException.expectMessage(
+ "DRF ordering policy cannot be used together with fifo/fair");
+ String absolutePath =
+ new File("src/test/resources/fair-scheduler-orderingpolicy-mixed.xml").getAbsolutePath();
+ config.set(FairSchedulerConfiguration.ALLOCATION_FILE,
+ FILE_PREFIX + absolutePath);
+
+ converter.convert(config, CLUSTER_RESOURCE);
+ }
+
+ @Test
+ public void testQueueMaxChildCapacityNotSupported() throws Exception {
+ expectedException.expect(UnsupportedPropertyException.class);
+ expectedException.expectMessage("test");
+
+ doThrow(new UnsupportedPropertyException("test"))
+ .when(ruleHandler).handleMaxChildCapacity();
+
+ converter.convert(config, CLUSTER_RESOURCE);
+ }
+
+ @Test
+ public void testReservationSystemNotSupported() throws Exception {
+ expectedException.expect(UnsupportedPropertyException.class);
+ expectedException.expectMessage("maxCapacity");
+
+ doThrow(new UnsupportedPropertyException("maxCapacity"))
+ .when(ruleHandler).handleMaxChildCapacity();
+ config.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
+
+ converter.convert(config, CLUSTER_RESOURCE);
+ }
+
+ @Test
+ public void testUserMaxAppsNotSupported() throws Exception {
+ expectedException.expect(UnsupportedPropertyException.class);
+ expectedException.expectMessage("userMaxApps");
+
+ doThrow(new UnsupportedPropertyException("userMaxApps"))
+ .when(ruleHandler).handleUserMaxApps();
+
+ converter.convert(config, CLUSTER_RESOURCE);
+ }
+
+ @Test
+ public void testUserMaxAppsDefaultNotSupported() throws Exception {
+ expectedException.expect(UnsupportedPropertyException.class);
+ expectedException.expectMessage("userMaxAppsDefault");
+
+ doThrow(new UnsupportedPropertyException("userMaxAppsDefault"))
+ .when(ruleHandler).handleUserMaxAppsDefault();
+
+ converter.convert(config, CLUSTER_RESOURCE);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Test
+ public void testSiteContinuousSchedulingConversion() throws Exception {
+ config.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,
+ true);
+ config.setInt(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS,
+ 666);
+
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedYarnSiteConfig();
+ assertEquals("Cont. scheduling", true, conf.getBoolean(
+ CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, false));
+ assertEquals("Scheduling interval", 666,
+ conf.getInt(
+ "yarn.scheduler.capacity.schedule-asynchronously.scheduling-interval-ms", -1));
+ }
+
+ @Test
+ public void testSiteMinimumAllocationIncrementConversion() throws Exception {
+ config.setInt("yarn.resource-types.memory-mb.increment-allocation", 11);
+ config.setInt("yarn.resource-types.vcores.increment-allocation", 5);
+
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedYarnSiteConfig();
+ assertEquals("Memory alloc increment", 11,
+ conf.getInt("yarn.scheduler.minimum-allocation-mb", -1));
+ assertEquals("Vcore increment", 5,
+ conf.getInt("yarn.scheduler.minimum-allocation-vcores", -1));
+ }
+
+ @Test
+ public void testSitePreemptionConversion() throws Exception {
+ config.setBoolean(FairSchedulerConfiguration.PREEMPTION, true);
+ config.setInt(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, 123);
+ config.setInt(
+ FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS,
+ 321);
+
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedYarnSiteConfig();
+ assertEquals("Preemption enabled", true,
+ conf.getBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS,
+ false));
+ assertEquals("Wait time before kill", 123,
+ conf.getInt(
+ CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
+ -1));
+ assertEquals("Starvation check wait time", 321,
+ conf.getInt(
+ CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
+ -1));
+ }
+
+ @Test
+ public void testSiteAssignMultipleConversion() throws Exception {
+ config.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, true);
+
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedYarnSiteConfig();
+ assertEquals("Assign multiple", true,
+ conf.getBoolean(CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED,
+ false));
+ }
+
+ @Test
+ public void testSiteMaxAssignConversion() throws Exception {
+ config.setInt(FairSchedulerConfiguration.MAX_ASSIGN, 111);
+
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedYarnSiteConfig();
+ assertEquals("Max assign", 111,
+ conf.getInt(CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT,
+ -1));
+ }
+
+ @Test
+ public void testSiteLocalityThresholdConversion() throws Exception {
+ config.set(FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE, "123.123");
+ config.set(FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK, "321.321");
+
+ converter.convert(config, CLUSTER_RESOURCE);
+
+ Configuration conf = getConvertedYarnSiteConfig();
+ assertEquals("Locality threshold node", "123.123",
+ conf.get(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY));
+ assertEquals("Locality threshold rack", "321.321",
+ conf.get(
+ CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY));
+ }
+
+ @Test
+ public void testConvertFSConfigurationClusterResource() throws Exception {
+ FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder()
+ .withClusterResource("vcores=20, memory-mb=240")
+ .build();
+ converter.convert(params);
+ assertThat(converter.getClusterResource()).isEqualTo(Resource.newInstance(240, 20));
+ }
+
+ @Test
+ public void testConvertFSConfigurationPercentageModeIsUsedAndClusterResourceDefined() throws Exception {
+ FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder()
+ .withClusterResource("vcores=20, memory-mb=240")
+ .build();
+ converter.convert(params);
+ assertThat(converter.getClusterResource()).isEqualTo(Resource.newInstance(240, 20));
+ }
+
+ @Test
+ public void testConvertFSConfigurationPercentageModeIsUsedAndClusterResourceNotDefined() throws Exception {
+ FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder()
+ .build();
+
+ expectedException.expect(ConversionException.class);
+ expectedException.expectMessage("cluster resource parameter is not defined via CLI");
+
+ converter.convert(params);
+ }
+
+ @Test
+ public void testConvertFSConfigurationClusterResourceInvalid() throws Exception {
+ FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder()
+ .withClusterResource("vcores=20, memory-mb=240G")
+ .build();
+
+ expectedException.expect(ConversionException.class);
+ expectedException.expectMessage("Error while parsing resource");
+
+ converter.convert(params);
+ }
+
+ @Test
+ public void testConvertFSConfigurationClusterResourceInvalid2() throws Exception {
+ FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder()
+ .withClusterResource("vcores=20, memmmm=240")
+ .build();
+
+ expectedException.expect(ConversionException.class);
+ expectedException.expectMessage("Error while parsing resource");
+
+ converter.convert(params);
+ }
+
+ @Test
+ public void testConvertFSConfigurationRulesFile() throws Exception {
+ FSConfigToCSConfigRuleHandler ruleHandler =
+ new FSConfigToCSConfigRuleHandler();
+ createConverter(ruleHandler);
+
+ FSConfigToCSConfigConverterParams params =
+ createDefaultParamsBuilder()
+ .withConversionRulesConfig(CONVERSION_RULES_FILE)
+ .withClusterResource("vcores=20, memory-mb=2400")
+ .build();
+
+ try {
+ converter.convert(params);
+ fail("Should have thrown UnsupportedPropertyException!");
+ } catch (UnsupportedPropertyException e) {
+ //need to catch exception so we can check the rules
+ }
+
+ ruleHandler = converter.getRuleHandler();
+ Map actions =
+ ruleHandler.getActions();
+
+ assertThat(actions.get(MAX_CAPACITY_PERCENTAGE)).isEqualTo(ABORT);
+ assertThat(actions.get(MAX_CHILD_CAPACITY)).isEqualTo(ABORT);
+ assertThat(actions.get(USER_MAX_RUNNING_APPS)).isEqualTo(ABORT);
+ assertThat(actions.get(USER_MAX_APPS_DEFAULT)).isEqualTo(ABORT);
+ assertThat(actions.get(DYNAMIC_MAX_ASSIGN)).isEqualTo(ABORT);
+ assertThat(actions.get(SPECIFIED_NOT_FIRST)).isEqualTo(ABORT);
+ assertThat(actions.get(RESERVATION_SYSTEM)).isEqualTo(ABORT);
+ assertThat(actions.get(QUEUE_AUTO_CREATE)).isEqualTo(ABORT);
+ }
+
+ @Test
+ public void testConvertFSConfigurationUndefinedYarnSiteConfig() throws Exception {
+ FSConfigToCSConfigConverterParams params =
+ FSConfigToCSConfigConverterParams.Builder.create()
+ .withYarnSiteXmlConfig(null)
+ .withOutputDirectory(OUTPUT_DIR)
+ .build();
+
+ expectedException.expect(IllegalArgumentException.class);
+ expectedException.expectMessage("yarn-site.xml configuration is not defined");
+
+ converter.convert(params);
+ }
+
+ @Test
+ public void testConvertCheckOutputDir() throws Exception {
+ FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder()
+ .withClusterResource("vcores=20, memory-mb=240")
+ .build();
+
+ converter.convert(params);
+
+ Configuration conf = getConvertedCSconfig(OUTPUT_DIR);
+
+ File capacityFile = new File(OUTPUT_DIR, "capacity-scheduler.xml");
+ assertThat(capacityFile.exists()).isTrue();
+ assertThat(capacityFile.length()).isGreaterThan(0L);
+ assertThat(conf.size()).isGreaterThan(0);
+
+ File yarnSiteFile = new File(OUTPUT_DIR, "yarn-site.xml");
+ assertThat(yarnSiteFile.exists()).isTrue();
+ assertThat(yarnSiteFile.length()).isGreaterThan(0L);
+ }
+
+ @Test
+ public void testFairSchedulerXmlIsNotDefinedNeitherDirectlyNorInYarnSiteXml() throws Exception {
+ FSConfigToCSConfigConverterParams params = createParamsBuilder(YARN_SITE_XML_NO_REF_TO_FS_XML)
+ .withClusterResource("vcores=20, memory-mb=240")
+ .build();
+
+ expectedException.expect(IllegalArgumentException.class);
+ expectedException.expectMessage("fair-scheduler.xml is not defined");
+ converter.convert(params);
+ }
+
+ @Test
+ public void testInvalidFairSchedulerXml() throws Exception {
+ FSConfigToCSConfigConverterParams params = createDefaultParamsBuilder()
+ .withClusterResource("vcores=20, memory-mb=240")
+ .withFairSchedulerXmlConfig(FAIR_SCHEDULER_XML_INVALID)
+ .build();
+
+ expectedException.expect(RuntimeException.class);
+ converter.convert(params);
+ }
+
+ @Test
+ public void testInvalidYarnSiteXml() throws Exception {
+ FSConfigToCSConfigConverterParams params = createParamsBuilder(YARN_SITE_XML_INVALID)
+ .withClusterResource("vcores=20, memory-mb=240")
+ .build();
+
+ expectedException.expect(RuntimeException.class);
+ converter.convert(params);
+ }
+
+ private void assertNoValueForQueues(Set queues, String postfix,
+ Configuration csConfig) {
+ for (String queue : queues) {
+ String key = PREFIX + queue + postfix;
+ assertNull("Key " + key + " has value, but it should be null",
+ csConfig.get(key));
+ }
+ }
+
+ private void assertTrueForQueues(Set queues, String postfix,
+ Configuration csConfig) {
+ for (String queue : queues) {
+ String key = PREFIX + queue + postfix;
+ assertTrue("Key " + key + " is false, should be true",
+ csConfig.getBoolean(key, false));
+ }
+ }
+
+ private Configuration getConvertedYarnSiteConfig() {
+ ByteArrayInputStream input =
+ new ByteArrayInputStream(yarnSiteOut.toByteArray());
+ Configuration conf = new Configuration(false);
+ conf.addResource(input);
+
+ return conf;
+ }
+
+ private Configuration getConvertedCSconfig() {
+ ByteArrayInputStream input =
+ new ByteArrayInputStream(csConfigOut.toByteArray());
+ Configuration conf = new Configuration(false);
+ conf.addResource(input);
+
+ return conf;
+ }
+
+ private Configuration getConvertedCSconfig(String dir) throws IOException {
+ File capacityFile = new File(dir, "capacity-scheduler.xml");
+ ByteArrayInputStream input = new ByteArrayInputStream(FileUtils.readFileToByteArray(capacityFile));
+ Configuration conf = new Configuration(false);
+ conf.addResource(input);
+
+ return conf;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSConfigToCSConfigRuleHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSConfigToCSConfigRuleHandler.java
new file mode 100644
index 00000000000..386dc62bd75
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSConfigToCSConfigRuleHandler.java
@@ -0,0 +1,126 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.DYNAMIC_MAX_ASSIGN;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.MAX_CAPACITY_PERCENTAGE;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.MAX_CHILD_CAPACITY;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.MAX_CHILD_QUEUE_LIMIT;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.QUEUE_AUTO_CREATE;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.RESERVATION_SYSTEM;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.SPECIFIED_NOT_FIRST;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.USER_MAX_APPS_DEFAULT;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSConfigToCSConfigRuleHandler.USER_MAX_RUNNING_APPS;
+
+import java.io.IOException;
+import java.util.Properties;
+
+import org.junit.Test;
+
+public class TestFSConfigToCSConfigRuleHandler {
+ private static final String ABORT = "abort";
+ private static final String WARNING = "warning";
+
+ private FSConfigToCSConfigRuleHandler ruleHandler;
+
+ @Test
+ public void testInitPropertyActionsToWarning() throws IOException {
+ ruleHandler = new FSConfigToCSConfigRuleHandler(new Properties());
+
+ ruleHandler.handleChildQueueCount("test", 1);
+ ruleHandler.handleDynamicMaxAssign();
+ ruleHandler.handleMaxCapacityPercentage("test");
+ ruleHandler.handleMaxChildCapacity();
+ ruleHandler.handleQueueAutoCreate("test");
+ ruleHandler.handleReservationSystem();
+ ruleHandler.handleSecifiedNotFirstRule();
+ ruleHandler.handleUserMaxApps();
+ ruleHandler.handleUserMaxAppsDefault();
+ }
+
+ @Test
+ public void testAllRulesWarning() throws IOException {
+ Properties rules = new Properties();
+ rules.put(DYNAMIC_MAX_ASSIGN, WARNING);
+ rules.put(MAX_CAPACITY_PERCENTAGE, WARNING);
+ rules.put(MAX_CHILD_CAPACITY, WARNING);
+ rules.put(QUEUE_AUTO_CREATE, WARNING);
+ rules.put(RESERVATION_SYSTEM, WARNING);
+ rules.put(SPECIFIED_NOT_FIRST, WARNING);
+ rules.put(USER_MAX_APPS_DEFAULT, WARNING);
+ rules.put(USER_MAX_RUNNING_APPS, WARNING);
+
+ ruleHandler = new FSConfigToCSConfigRuleHandler(rules);
+
+ ruleHandler.handleDynamicMaxAssign();
+ ruleHandler.handleMaxCapacityPercentage("test");
+ ruleHandler.handleMaxChildCapacity();
+ ruleHandler.handleQueueAutoCreate("test");
+ ruleHandler.handleReservationSystem();
+ ruleHandler.handleSecifiedNotFirstRule();
+ ruleHandler.handleUserMaxApps();
+ ruleHandler.handleUserMaxAppsDefault();
+ }
+
+ @Test
+ public void testAllRulesAbort() throws IOException {
+ Properties rules = new Properties();
+ rules.put(DYNAMIC_MAX_ASSIGN, ABORT);
+ rules.put(MAX_CAPACITY_PERCENTAGE, ABORT);
+ rules.put(MAX_CHILD_CAPACITY, ABORT);
+ rules.put(QUEUE_AUTO_CREATE, ABORT);
+ rules.put(RESERVATION_SYSTEM, ABORT);
+ rules.put(SPECIFIED_NOT_FIRST, ABORT);
+ rules.put(USER_MAX_APPS_DEFAULT, ABORT);
+ rules.put(USER_MAX_RUNNING_APPS, ABORT);
+ rules.put(MAX_CHILD_QUEUE_LIMIT, "1");
+
+ ruleHandler = new FSConfigToCSConfigRuleHandler(rules);
+
+ expectAbort(() -> ruleHandler.handleChildQueueCount("test", 2),
+ ConversionException.class);
+ expectAbort(() -> ruleHandler.handleDynamicMaxAssign());
+ expectAbort(() -> ruleHandler.handleMaxCapacityPercentage("test"));
+ expectAbort(() -> ruleHandler.handleMaxChildCapacity());
+ expectAbort(() -> ruleHandler.handleQueueAutoCreate("test"));
+ expectAbort(() -> ruleHandler.handleReservationSystem());
+ expectAbort(() -> ruleHandler.handleSecifiedNotFirstRule());
+ expectAbort(() -> ruleHandler.handleUserMaxApps());
+ expectAbort(() -> ruleHandler.handleUserMaxAppsDefault());
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testMaxChildQueueCountNotInteger() throws IOException {
+ Properties rules = new Properties();
+ rules.put(MAX_CHILD_QUEUE_LIMIT, "abc");
+
+ ruleHandler = new FSConfigToCSConfigRuleHandler(rules);
+
+ ruleHandler.handleChildQueueCount("test", 1);
+ }
+
+ private void expectAbort(VoidCall call) {
+ expectAbort(call, UnsupportedPropertyException.class);
+ }
+
+ private void expectAbort(VoidCall call, Class> exceptionClass) {
+ boolean exceptionThrown = false;
+ Throwable thrown = null;
+
+ try {
+ call.apply();
+ } catch (Throwable t) {
+ thrown = t;
+ exceptionThrown = true;
+ }
+
+ assertTrue("Exception was not thrown", exceptionThrown);
+ assertEquals("Unexpected exception", exceptionClass, thrown.getClass());
+ }
+
+ @FunctionalInterface
+ private interface VoidCall {
+ void apply();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/conversion-rules.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/conversion-rules.properties
new file mode 100644
index 00000000000..b3e421d8d73
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/conversion-rules.properties
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License" you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+maxCapacityPercentage.action=abort
+maxChildCapacity.action=ABORT
+userMaxRunningApps.action=abort
+userMaxAppsDefault.action=ABORT
+dynamicMaxAssign.action=abort
+specifiedNotFirstRule.action=ABORT
+reservationSystem.action=abort
+queueAutoCreate.action=ABORT
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-invalid.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-invalid.xml
new file mode 100644
index 00000000000..c7a0b0b5598
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-invalid.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-max-resources-percentage.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-max-resources-percentage.xml
new file mode 100644
index 00000000000..855e9fa36f3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-max-resources-percentage.xml
@@ -0,0 +1,73 @@
+
+
+
+ 1.0
+ drf
+ alice,bob,joe,john hadoop_users
+ alice,bob,joe,john hadoop_users
+
+ 1.0
+ drf
+
+
+ 1.0
+ drf
+
+ 1.0
+ drf
+ john
+ john
+ vcores=2,memory-mb=8192
+
+
+ memory-mb=50.0%, vcores=50.0%
+ 3.0
+ false
+ drf
+ joe
+ joe
+
+
+
+ memory-mb=8192, vcores=1
+ 1.0
+ drf
+ vcores=3,memory-mb=4096
+
+ memory-mb=16384, vcores=4
+ 2
+ 3.0
+ false
+ drf
+ alice
+ alice
+ 0.15
+ memory-mb=16384, vcores=4
+
+
+ memory-mb=8192, vcores=2
+ 1.0
+ drf
+ bob
+ bob
+ -1.0
+
+
+
+ 23
+ 24
+ 0.12
+ 15
+ fair
+ 0.16
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-orderingpolicy-mixed.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-orderingpolicy-mixed.xml
new file mode 100644
index 00000000000..9c13360cd8c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-orderingpolicy-mixed.xml
@@ -0,0 +1,72 @@
+
+
+
+ 1.0
+ drf
+ alice,bob,joe,john hadoop_users
+ alice,bob,joe,john hadoop_users
+
+ 1.0
+ fair
+
+
+ 1.0
+ drf
+
+ 1.0
+ drf
+ john
+ john
+ vcores=2,memory-mb=8192
+
+
+ memory-mb=50.0%, vcores=50.0%
+ 3.0
+ false
+ drf
+ joe
+ joe
+
+
+
+ memory-mb=8192, vcores=1
+ 1.0
+ fair
+ vcores=3,memory-mb=4096
+
+ memory-mb=16384, vcores=4
+ 2
+ 3.0
+ false
+ drf
+ alice
+ alice
+ 0.15
+
+
+ memory-mb=8192, vcores=2
+ 1.0
+ drf
+ bob
+ bob
+ -1.0
+
+
+
+ 23
+ 24
+ 0.12
+ 15
+ drf
+ 0.16
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-orderingpolicy.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-orderingpolicy.xml
new file mode 100644
index 00000000000..9d0ef4fbe17
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-orderingpolicy.xml
@@ -0,0 +1,72 @@
+
+
+
+ 1.0
+ fair
+ alice,bob,joe,john hadoop_users
+ alice,bob,joe,john hadoop_users
+
+ 1.0
+ fair
+
+
+ 1.0
+ fair
+
+ 1.0
+ fifo
+ john
+ john
+ vcores=2,memory-mb=8192
+
+
+ memory-mb=50.0%, vcores=50.0%
+ 3.0
+ false
+ fair
+ joe
+ joe
+
+
+
+ memory-mb=8192, vcores=1
+ 1.0
+ fair
+ vcores=3,memory-mb=4096
+
+ memory-mb=16384, vcores=4
+ 2
+ 3.0
+ false
+ fifo
+ alice
+ alice
+ 0.15
+
+
+ memory-mb=8192, vcores=2
+ 1.0
+ fair
+ bob
+ bob
+ -1.0
+
+
+
+ 23
+ 24
+ 0.12
+ 15
+ fair
+ 0.16
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-sameleafqueue.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-sameleafqueue.xml
new file mode 100644
index 00000000000..5505f845690
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler-sameleafqueue.xml
@@ -0,0 +1,73 @@
+
+
+
+ 1.0
+ drf
+ alice,bob,joe,john hadoop_users
+ alice,bob,joe,john hadoop_users
+
+ 1.0
+ drf
+
+
+ 1.0
+ drf
+
+ 1.0
+ drf
+ john
+ john
+ vcores=2,memory-mb=8192
+
+
+ memory-mb=50.0%, vcores=50.0%
+ 3.0
+ false
+ drf
+ joe
+ joe
+
+
+
+ memory-mb=8192, vcores=1
+ 1.0
+ drf
+ vcores=3,memory-mb=4096
+
+ memory-mb=16384, vcores=4
+ 2
+ 3.0
+ false
+ drf
+ alice
+ alice
+ 0.15
+ memory-mb=16384, vcores=4
+
+
+ memory-mb=8192, vcores=2
+ 1.0
+ drf
+ bob
+ bob
+ -1.0
+
+
+
+ 23
+ 24
+ 0.12
+ 15
+ fair
+ 0.16
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler.xml
new file mode 100644
index 00000000000..be9d08343c3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/fair-scheduler.xml
@@ -0,0 +1,77 @@
+
+
+
+ 1.0
+ drf
+ alice,bob,joe,john hadoop_users
+ alice,bob,joe,john hadoop_users
+
+ 1.0
+ drf
+
+
+ 1.0
+ drf
+
+ 1.0
+ drf
+ john
+ john
+ vcores=2,memory-mb=8192
+
+
+ memory-mb=50.0%, vcores=50.0%
+ 3.0
+ false
+ drf
+ joe
+ joe
+
+
+
+ memory-mb=8192, vcores=1
+ 1.0
+ drf
+ vcores=3,memory-mb=4096
+
+ memory-mb=16384, vcores=4
+ 2
+ 3.0
+ false
+ drf
+ alice
+ alice
+ 0.15
+ memory-mb=16384, vcores=4
+
+
+ memory-mb=8192, vcores=2
+ 1.0
+ drf
+ bob
+ bob
+ -1.0
+
+
+
+
+ 30
+
+ 10
+ 23
+ 24
+ 0.12
+ 15
+ fair
+ 0.16
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/yarn-site-with-allocation-file-ref.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/yarn-site-with-allocation-file-ref.xml
new file mode 100644
index 00000000000..fcce7f55a86
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/yarn-site-with-allocation-file-ref.xml
@@ -0,0 +1,23 @@
+
+
+
+
+
+ yarn.scheduler.fair.allocation.file
+ fair-scheduler.xml
+
+
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/yarn-site-with-invalid-allocation-file-ref.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/yarn-site-with-invalid-allocation-file-ref.xml
new file mode 100644
index 00000000000..248040d928c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/yarn-site-with-invalid-allocation-file-ref.xml
@@ -0,0 +1,37 @@
+
+
+
+
+
+
+
+
+ yarn.scheduler.fair.allocation.file
+
+
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
index e17538ccdfc..39ee965df97 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
@@ -193,6 +193,8 @@ Usage: `yarn resourcemanager [-format-state-store]`
|:---- |:---- |
| -format-state-store | Formats the RMStateStore. This will clear the RMStateStore and is useful if past applications are no longer needed. This should be run only when the ResourceManager is not running. |
| -remove-application-from-state-store \ | Remove the application from RMStateStore. This should be run only when the ResourceManager is not running. |
+| -convert-fs-configuration [-y|yarnsiteconfig] [-f|fsconfig] [-r|rulesconfig] [-c|console] | Converts the specified Fair Scheduler configuration to Capacity Scheduler configuration. Requires two mandatory input files. First, the yarn-site.xml with the following format: [-y|yarnsiteconfig [\]. Secondly, the fair-scheduler.xml with the following format: [-f|fsconfig [\]. An optional rules config file could be also specified with the following format: [-r|rulesconfig \]. The rule config file's format is a property file. There's an additional \[-s|stdout\] parameter, which is optional. If defined, the configuration will be emitted to the console instead. In normal operation, the output of this command is path to the generated configuration files: yarn-site.xml and capacity-scheduler.xml. ] |
+
Start the ResourceManager