diff --git hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java index c3fe3a39f5bf4ce831032323231f8da731b1376f..2e4d8b1c1baf316beceaf608f330b836a062335a 100644 --- hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java +++ hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java @@ -147,6 +147,12 @@ private Set xmlFieldsMissingInConfiguration = null; /** + * Member variable for debugging base class operation + */ + protected boolean configDebug = false; + protected boolean xmlDebug = false; + + /** * Abstract method to be used by subclasses for initializing base * members. */ @@ -168,13 +174,16 @@ HashMap retVal = new HashMap(); // Setup regexp for valid properties - String propRegex = "^[A-Za-z_-]+(\\.[A-Za-z_-]+)+$"; + String propRegex = "^[A-Za-z][A-Za-z0-9_-]+(\\.[A-Za-z0-9_-]+)+$"; Pattern p = Pattern.compile(propRegex); // Iterate through class member variables int totalFields = 0; String value; for (Field f : fields) { + if (configDebug) { + System.out.println("Field: " + f); + } // Filter out anything that isn't "public static final" if (!Modifier.isStatic(f.getModifiers()) || !Modifier.isPublic(f.getModifiers()) || @@ -192,6 +201,9 @@ } catch (IllegalAccessException iaException) { continue; } + if (configDebug) { + System.out.println(" Value: " + value); + } // Special Case: Detect and ignore partial properties (ending in x) // or file properties (ending in .xml) if (value.endsWith(".xml") || @@ -221,11 +233,23 @@ // something like: blah.blah2(.blah3.blah4...) Matcher m = p.matcher(value); if (!m.find()) { + if (configDebug) { + System.out.println(" Passes Regex: false"); + } continue; } + if (configDebug) { + System.out.println(" Passes Regex: true"); + } // Save member variable/value as hash - retVal.put(value,f.getName()); + if (!retVal.containsKey(value)) { + retVal.put(value,f.getName()); + } else { + if (configDebug) { + System.out.println("ERROR: Already found key for property " + value); + } + } } return retVal; @@ -256,6 +280,9 @@ // Ignore known xml props if (xmlPropsToSkipCompare != null) { if (xmlPropsToSkipCompare.contains(key)) { + if (xmlDebug) { + System.out.println(" Skipping Full Key: " + key); + } continue; } } @@ -270,14 +297,23 @@ } } if (skipPrefix) { + if (xmlDebug) { + System.out.println(" Skipping Prefix Key: " + key); + } continue; } if (conf.onlyKeyExists(key)) { retVal.put(key,null); + if (xmlDebug) { + System.out.println(" XML Key,Null Value: " + key); + } } else { String value = conf.get(key); if (value!=null) { retVal.put(key,entry.getValue()); + if (xmlDebug) { + System.out.println(" XML Key,Valid Value: " + key); + } } } kvItr.remove(); @@ -312,6 +348,10 @@ public void setupTestConfigurationFields() throws Exception { // Create class member/value map configurationMemberVariables = new HashMap(); + if (configDebug) { + System.out.println("Reading configuration classes"); + System.out.println(""); + } for (Class c : configurationClasses) { Field[] fields = c.getDeclaredFields(); Map memberMap = @@ -320,9 +360,23 @@ public void setupTestConfigurationFields() throws Exception { configurationMemberVariables.putAll(memberMap); } } + if (configDebug) { + System.out.println(""); + System.out.println("====="); + System.out.println(""); + } // Create XML key/value map + if (xmlDebug) { + System.out.println("Reading XML property files"); + System.out.println(""); + } xmlKeyValueMap = extractPropertiesFromXml(xmlFilename); + if (xmlDebug) { + System.out.println(""); + System.out.println("====="); + System.out.println(""); + } // Find class members not in the XML file configurationFieldsMissingInXmlFile = compareConfigurationToXmlFields diff --git hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java new file mode 100644 index 0000000000000000000000000000000000000000..0e75d817d6a77d3d4a7eaf064fbd7a695d4080cd --- /dev/null +++ hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.tools; + +import java.util.HashSet; + +import org.apache.hadoop.conf.TestConfigurationFieldsBase; +import org.apache.hadoop.hdfs.DFSConfigKeys; + +/** + * Unit test class to compare the following MR Configuration classes: + *

+ * {@link org.apache.hadoop.hdfs.DFSConfigKeys} + *

+ * against hdfs-default.xml for missing properties. Currently only + * throws an error if the class is missing a property. + *

+ * Refer to {@link org.apache.hadoop.conf.TestConfigurationFieldsBase} + * for how this class works. + */ +public class TestHdfsConfigFields extends TestConfigurationFieldsBase { + + @Override + public void initializeMemberVariables() { + xmlFilename = new String("hdfs-default.xml"); + configurationClasses = new Class[] { DFSConfigKeys.class }; + + // Set error modes + errorIfMissingConfigProps = true; + errorIfMissingXmlProps = false; + + // Allocate + xmlPropsToSkipCompare = new HashSet(); + xmlPrefixToSkipCompare = new HashSet(); + + // Used in native code fuse_connect.c + xmlPropsToSkipCompare.add("hadoop.fuse.timer.period"); + xmlPropsToSkipCompare.add("hadoop.fuse.connection.timeout"); + + // Used dynamically as part of DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + xmlPropsToSkipCompare.add("dfs.namenode.edits.journal-plugin.qjournal"); + + // Example (not real) property in hdfs-default.xml + xmlPropsToSkipCompare.add("dfs.ha.namenodes.EXAMPLENAMESERVICE"); + + // Defined in org.apache.hadoop.fs.CommonConfigurationKeys + xmlPropsToSkipCompare.add("hadoop.user.group.metrics.percentiles.intervals"); + + // Used oddly by DataNode to create new config String + xmlPropsToSkipCompare.add("hadoop.hdfs.configuration.version"); + + // Kept in the NfsConfiguration class in the hadoop-hdfs-nfs module + xmlPrefixToSkipCompare.add("nfs"); + + // Not a hardcoded property. Used by SaslRpcClient + xmlPrefixToSkipCompare.add("dfs.namenode.kerberos.principal.pattern"); + + // Skip comparing in branch-2. Removed in trunk with HDFS-7985. + xmlPropsToSkipCompare.add("dfs.webhdfs.enabled"); + + // Some properties have moved to HdfsClientConfigKeys + xmlPropsToSkipCompare.add("dfs.client.short.circuit.replica.stale.threshold.ms"); + } +} diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestMapreduceConfigFields.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestMapreduceConfigFields.java new file mode 100644 index 0000000000000000000000000000000000000000..7f187147c011f7878d973cecaa0bb32be1c4a165 --- /dev/null +++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestMapreduceConfigFields.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce; + +import java.util.HashSet; + +import org.apache.hadoop.conf.TestConfigurationFieldsBase; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.ShuffleHandler; +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; +import org.apache.hadoop.mapreduce.lib.input.NLineInputFormat; +import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter; +import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; + +/** + * Unit test class to compare the following MR Configuration classes: + *

+ * {@link org.apache.hadoop.mapreduce.MRJobConfig} + * {@link org.apache.hadoop.mapreduce.MRConfig} + * {@link org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig} + * {@link org.apache.hadoop.mapred.ShuffleHandler} + * {@link org.apache.hadoop.mapreduce.lib.output.FileOutputFormat} + * {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat} + * {@link org.apache.hadoop.mapreduce.Job} + * {@link org.apache.hadoop.mapreduce.lib.input.NLineInputFormat} + * {@link org.apache.hadoop.mapred.JobConf} + *

+ * against mapred-default.xml for missing properties. Currently only + * throws an error if the class is missing a property. + *

+ * Refer to {@link org.apache.hadoop.conf.TestConfigurationFieldsBase} + * for how this class works. + */ +public class TestMapreduceConfigFields extends TestConfigurationFieldsBase { + + @SuppressWarnings("deprecation") + @Override + public void initializeMemberVariables() { + xmlFilename = new String("mapred-default.xml"); + configurationClasses = new Class[] { MRJobConfig.class, MRConfig.class, + JHAdminConfig.class, ShuffleHandler.class, FileOutputFormat.class, + FileInputFormat.class, Job.class, NLineInputFormat.class, + JobConf.class, FileOutputCommitter.class }; + + // Initialize used variables + configurationPropsToSkipCompare = new HashSet(); + + // Set error modes + errorIfMissingConfigProps = true; + errorIfMissingXmlProps = false; + + // Ignore deprecated MR1 properties in JobConf + configurationPropsToSkipCompare + .add(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY); + configurationPropsToSkipCompare + .add(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY); + } + +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 66400c8831f5fc5b83c1d6b49d07b12b1513cf4d..6398df1a06b5375e51eb4db544cadd0488362853 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -24,7 +24,8 @@ - + + Factory to create client IPC classes. yarn.ipc.client.factory.class @@ -46,7 +47,8 @@ org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC - + + The hostname of the RM. yarn.resourcemanager.hostname @@ -123,6 +125,32 @@ + + The Kerberos keytab file to be used for spnego filter for the RM web + interface. + + yarn.resourcemanager.webapp.spnego-keytab-file + + + + + + The Kerberos principal to be used for spnego filter for the RM web + interface. + + yarn.resourcemanager.webapp.spnego-principal + + + + + + Add button to kill application in the RM Application view. + + yarn.resourcemanager.webapp.ui-actions.enabled + true + + + yarn.resourcemanager.resource-tracker.address ${yarn.resourcemanager.hostname}:8031 @@ -260,7 +288,16 @@ - Enable RM to recover state after starting. If true, then + + Used by node labels. If set to true, the port should be included in the + node name. Only usable if your scheduler supports node labels. + + yarn.scheduler.include-port-in-node-name + false + + + + Enable RM to recover state after starting. If true, then yarn.resourcemanager.store.class must be specified. yarn.resourcemanager.recovery.enabled false @@ -653,6 +690,16 @@ + + The value specifies the file system (e.g. HDFS) path where ResourceManager + loads configuration if yarn.resourcemanager.configuration.provider-class + is set to org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider. + + yarn.resourcemanager.configuration.file-system-based-store + /yarn/conf + + + The setting that controls whether yarn system metrics is published on the timeline server or not by RM. yarn.resourcemanager.system-metrics-publisher.enabled @@ -666,7 +713,108 @@ 10 - + + + RM DelegationTokenRenewer thread count + + yarn.resourcemanager.delegation-token-renewer.thread-count + 50 + + + + + RM secret key update interval in ms + + yarn.resourcemanager.delegation.key.update-interval + 86400000 + + + + + RM delegation token maximum lifetime + + yarn.resourcemanager.delegation.token.max-lifetime + 604800000 + + + + + RM delegation token update interval in ms + + yarn.resourcemanager.delegation.token.renew-interval + 86400000 + + + + + Thread pool size for RMApplicationHistoryWriter. + + yarn.resourcemanager.history-writer.multi-threaded-dispatcher.pool-size + 10 + + + + + Comma-separated list of values (in minutes) for schedule queue related + metrics. + + yarn.resourcemanager.metrics.runtime.buckets + 60,300,1440 + + + + + Interval for the roll over for the master key used to generate + NodeManager tokens. It is expected to be set to a value much larger + than yarn.nm.liveness-monitor.expiry-interval-ms. + + yarn.resourcemanager.nm-tokens.master-key-rolling-interval-secs + + + + + + Flag to enable the ResourceManager reservation system. + + yarn.resourcemanager.reservation-system.enable + false + + + + + The Java class to use as the ResourceManager reservation system. + + yarn.resourcemanager.reservation-system.class + + + + + + The plan follower policy class name to use for the ResourceManager + reservation system. + + yarn.resourcemanager.reservation-system.plan.follower + + + + + + Step size of the reservation system in ms + + yarn.resourcemanager.reservation-system.planfollower.time-step + 1000 + + + + + The expiry interval for a container + + yarn.resourcemanager.rm.container-allocation.expiry-interval-ms + 600000 + + + + The hostname of the NM. yarn.nodemanager.hostname @@ -852,6 +1000,17 @@ + + How long for ResourceManager to wait for NodeManager to report its + log aggregation status. If waiting time of which the log aggregation + status is reported from NodeManager exceeds the configured value, RM + will report log aggregation status for this NodeManager as TIME_OUT + + yarn.log-aggregation-status.time-out.ms + 60000 + + + Time in seconds to retain user logs. Only applicable if log aggregation is disabled @@ -929,6 +1088,32 @@ + + The https adddress of the NM web application. + + yarn.nodemanager.webapp.https.address + + + + + + The Kerberos keytab file to be used for spnego filter for the NM web + interface. + + yarn.nodemanager.webapp.spnego-keytab-file + + + + + + The Kerberos principal to be used for spnego filter for the NM web + interface. + + yarn.nodemanager.webapp.spnego-principal + + + + How often to monitor containers. yarn.nodemanager.container-monitor.interval-ms 3000 @@ -1036,6 +1221,12 @@ + Delay in ms between attempts to remove linux cgroup + yarn.nodemanager.linux-container-executor.cgroups.delete-delay-ms + 20> + + + This determines which of the two modes that LCE should use on a non-secure cluster. If this value is set to true, then all containers will be launched as the user specified in @@ -1089,6 +1280,23 @@ + + Interval of time the linux container executor should try cleaning up + cgroups entry when cleaning up a container. + + yarn.nodemanager.linux-container-executor.cgroups.delete-timeout-ms + + + + + + The UNIX group that the linux-container-executor should run as. + + yarn.nodemanager.linux-container-executor.group + + + + T-file compression types used to compress aggregated logs. yarn.nodemanager.log-aggregation.compression-type none @@ -1184,7 +1392,72 @@ ${hadoop.tmp.dir}/yarn-nm-recovery - + + + Adjustment to the container OS scheduling priority. In Linux, passed + directly to the nice command. + + yarn.nodemanager.container-executor.os.sched.priority.adjustment + 0 + + + + + Flag to enable container metrics + + yarn.nodemanager.container-metrics.enable + true + + + + + Container metrics flush period in ms. Set to -1 for flush on completion. + + yarn.nodemanager.container-metrics.period-ms + -1 + + + + + Class used to calculate current container resource utilization. + + yarn.nodemanager.container-monitor.process-tree.class + + + + + + Flag to enable NodeManager disk health checker + + yarn.nodemanager.disk-health-checker.enable + true + + + + + Number of threads to use in NM log cleanup. Used when log aggregation + is disabled. + + yarn.nodemanager.log.deletion-threads-count + 4 + + + + + + yarn.nodemanager.user-home-dir + + + + + + The Windows group that the windows-container-executor should run as. + + yarn.nodemanager.windows-secure-container-executor.group + + + + yarn.nodemanager.docker-container-executor.exec-name @@ -1194,24 +1467,23 @@ - - yarn.nodemanager.aux-services.mapreduce_shuffle.class - org.apache.hadoop.mapred.ShuffleHandler + + The Docker image name to use for DockerContainerExecutor + + yarn.nodemanager.docker-container-executor.image-name + - - mapreduce.job.jar - - + - mapreduce.job.hdfs-servers - ${fs.defaultFS} + yarn.nodemanager.aux-services.mapreduce_shuffle.class + org.apache.hadoop.mapred.ShuffleHandler - - + + The kerberos principal for the proxy, if the proxy is not running as part of the RM. @@ -1232,8 +1504,8 @@ - - + + CLASSPATH for YARN applications. A comma-separated list @@ -1260,7 +1532,7 @@ - + Indicate to clients whether timeline service is enabled or not. @@ -1435,6 +1707,7 @@ + Whether the shared cache is enabled yarn.sharedcache.enabled @@ -1576,7 +1849,93 @@ 20 - + + + ACL protocol for use in the Timeline server. + + security.applicationhistory.protocol.acl + + + + + + + + Set to true for MiniYARNCluster unit tests + + yarn.is.minicluster + false + + + + + Set for MiniYARNCluster unit tests to control resource monitoring + + yarn.minicluster.control-resource-monitoring + false + + + + + Set to false in order to allow MiniYARNCluster to run tests without + port conflicts. + + yarn.minicluster.fixed.ports + false + + + + + Set to false in order to allow the NodeManager in MiniYARNCluster to + use RPC to talk to the RM. + + yarn.minicluster.use-rpc + false + + + + + As yarn.nodemanager.resource.memory-mb property but for the NodeManager + in a MiniYARNCluster. + + yarn.minicluster.yarn.nodemanager.resource.memory-mb + + + + + + + + Enable node labels feature + + yarn.node-labels.enabled + false + + + + + + yarn.node-labels.fs-store.retry-policy-spec + + + + + + URI for NodeLabelManager + + yarn.node-labels.fs-store.root-dir + + + + + + + yarn.node-labels.configuration-type + centralized + + + + The interval that the yarn client library uses to poll the completion status of the asynchronous API of application client protocol. @@ -1586,7 +1945,25 @@ - RSS usage of a process computed via + + The duration (in ms) the YARN client waits for an expected state change + to occur. -1 means unlimited wait time. + + yarn.client.application-client-protocol.poll-timeout-ms + -1 + + + + + Deprecated in favor of + yarn.client.application-client-protocol.poll-timeout-ms. + + yarn.client.app-submission.poll-interval + + + + + RSS usage of a process computed via /proc/pid/stat is not very accurate as it includes shared pages of a process. /proc/pid/smaps provides useful information like Private_Dirty, Private_Clean, Shared_Dirty, Shared_Clean which can be used @@ -1599,6 +1976,157 @@ + + URL for log aggregation server + + yarn.log.server.url + + + + + + RM Application Tracking URL + + yarn.tracking.url.generator + + + + + + Class to be used for YarnAuthorizationProvider + + yarn.authorization-provider + + + + + + + + Is the registry enabled: does the RM start it up, + create the user and system paths, and purge + service records when containers, application attempts + and applications complete + + hadoop.registry.rm.enabled + false + + + + + Root path in the ZK tree for the registry + + hadoop.registry.zk.root + /registry + + + + + Zookeeper session timeout in milliseconds + + hadoop.registry.zk.session.timeout.ms + 60000 + + + + + Zookeeper session timeout in milliseconds + + hadoop.registry.zk.connection.timeout.ms + 15000 + + + + + Zookeeper connection retry count before failing + + hadoop.registry.zk.retry.times + 5 + + + + + Zookeeper connect interval in milliseconds + + hadoop.registry.zk.retry.interval.ms + 1000 + + + + + Zookeeper retry limit in milliseconds, during + exponential backoff: {@value} + + This places a limit even + if the retry times and interval limit, combined + with the backoff policy, result in a long retry + period + + hadoop.registry.zk.retry.ceiling.ms + 60000 + + + + + List of hostname:port pairs defining the + zookeeper quorum binding for the registry + + hadoop.registry.zk.quorum + localhost:2181 + + + + + Key to set if the registry is secure. Turning it on + changes the permissions policy from "open access" + to restrictions on kerberos with the option of + a user adding one or more auth key pairs down their + own tree. + + hadoop.registry.secure + false + + + + + A comma separated list of Zookeeper ACL identifiers with + system access to the registry in a secure cluster. + + These are given full access to all entries. + + If there is an "@" at the end of a SASL entry it + instructs the registry client to append the default kerberos domain. + + hadoop.registry.system.acls + sasl:yarn@, sasl:mapred@, sasl:mapred@hdfs@ + + + + + The kerberos realm: used to set the realm of + system principals which do not declare their realm, + and any other accounts that need the value. + + If empty, the default realm of the running process + is used. + + If neither are known and the realm is needed, then the registry + service/client will fail. + + hadoop.registry.kerberos.realm + + + + + + Key to define the JAAS context. Used in secure + mode + + hadoop.registry.jaas.context + Client + + + Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded when the application is finished. By setting this configure, logs can be uploaded @@ -1608,4 +2136,5 @@ yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds -1 + diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java index 9075d9f6dd3e589399b1c6d46ab40f77f181fa3b..d20c626af810b9177736327480894c8120fd16ce 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java @@ -37,14 +37,14 @@ public void initializeMemberVariables() { xmlFilename = new String("yarn-default.xml"); configurationClasses = new Class[] { YarnConfiguration.class }; - // Allocate for usage configurationPropsToSkipCompare = new HashSet(); + configurationPrefixToSkipCompare = new HashSet(); // Set error modes errorIfMissingConfigProps = true; - errorIfMissingXmlProps = false; + errorIfMissingXmlProps = true; // Specific properties to skip configurationPropsToSkipCompare @@ -79,6 +79,30 @@ public void initializeMemberVariables() { configurationPropsToSkipCompare .add(YarnConfiguration .YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL); + configurationPropsToSkipCompare + .add(YarnConfiguration.DEFAULT_SCM_STORE_CLASS); + configurationPropsToSkipCompare + .add(YarnConfiguration.DEFAULT_SCM_APP_CHECKER_CLASS); + configurationPropsToSkipCompare + .add(YarnConfiguration.DEFAULT_SHARED_CACHE_CHECKSUM_ALGO_IMPL); + + // Ignore all YARN Application Timeline Service (version 1) properties + configurationPrefixToSkipCompare.add("yarn.timeline-service."); + + // Used as Java command line properties, not XML + configurationPrefixToSkipCompare.add("yarn.app.container"); + + // Ignore NodeManager "work in progress" variables + configurationPrefixToSkipCompare + .add(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED); + configurationPrefixToSkipCompare + .add(YarnConfiguration.NM_NETWORK_RESOURCE_INTERFACE); + configurationPrefixToSkipCompare + .add(YarnConfiguration.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT); + configurationPrefixToSkipCompare + .add(YarnConfiguration.NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT); + configurationPrefixToSkipCompare + .add(YarnConfiguration.NM_DISK_RESOURCE_ENABLED); // Allocate for usage xmlPropsToSkipCompare = new HashSet(); @@ -94,13 +118,11 @@ public void initializeMemberVariables() { // Used in the XML file as a variable reference internal to the XML file xmlPropsToSkipCompare.add("yarn.nodemanager.hostname"); - xmlPropsToSkipCompare.add("yarn.timeline-service.hostname"); - // Currently defined in TimelineAuthenticationFilterInitializer - xmlPrefixToSkipCompare.add("yarn.timeline-service.http-authentication"); + // Ignore all YARN Application Timeline Service (version 1) properties + xmlPrefixToSkipCompare.add("yarn.timeline-service"); // Currently defined in RegistryConstants xmlPrefixToSkipCompare.add("hadoop.registry"); } - }