diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java index 4ef4d8d..6542cb7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java @@ -128,9 +128,7 @@ static ApplicationId applicationId; static { - applicationId = recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); + applicationId = ApplicationId.createApplicationId(0, 0); } public MRApp(int maps, int reduces, boolean autoComplete, String testName, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java index 0e20d6f..30bd199 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java @@ -789,9 +789,7 @@ public MyAppMaster(Clock clock) { private final Map allJobs; MyAppContext(int numberMaps, int numberReduces) { - myApplicationID = recordFactory.newRecordInstance(ApplicationId.class); - myApplicationID.setClusterTimestamp(clock.getTime()); - myApplicationID.setId(1); + myApplicationID = ApplicationId.createApplicationId(clock.getTime(), 1); myAppAttemptID = recordFactory .newRecordInstance(ApplicationAttemptId.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java index 10b79ab..bb48dc7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java @@ -83,9 +83,7 @@ public void testDeletionofStaging() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(0); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.createApplicationId(System.currentTimeMillis(), 0); attemptId.setApplicationId(appId); JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); @@ -111,9 +109,7 @@ public void testNoDeletionofStagingOnReboot() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(0); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.createApplicationId(System.currentTimeMillis(), 0); attemptId.setApplicationId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, @@ -138,9 +134,7 @@ public void testDeletionofStagingOnReboot() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(1); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.createApplicationId(System.currentTimeMillis(), 0); attemptId.setApplicationId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, @@ -166,9 +160,7 @@ public void testDeletionofStagingOnKill() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(0); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.createApplicationId(System.currentTimeMillis(), 0); attemptId.setApplicationId(appId); JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); @@ -194,9 +186,7 @@ public void testDeletionofStagingOnKillLastTry() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(1); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.createApplicationId(System.currentTimeMillis(), 0); attemptId.setApplicationId(appId); JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java index 9fd0fb8..16d07d6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java @@ -224,9 +224,7 @@ public void setup() { metrics = mock(MRAppMetrics.class); dataLocations = new String[1]; - appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(1); + appId = ApplicationId.createApplicationId(System.currentTimeMillis(), 1); jobId = Records.newRecord(JobId.class); jobId.setId(1); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java index ff38ff3..5d12aab 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java @@ -76,9 +76,8 @@ public static JobId toYarn(org.apache.hadoop.mapreduce.JobID id) { JobId jobId = recordFactory.newRecordInstance(JobId.class); jobId.setId(id.getId()); //currently there is 1-1 mapping between appid and jobid - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setId(id.getId()); - appId.setClusterTimestamp(toClusterTimeStamp(id.getJtIdentifier())); + ApplicationId appId = ApplicationId.createApplicationId( + toClusterTimeStamp(id.getJtIdentifier()), id.getId()); jobId.setAppId(appId); return jobId; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java index e172be5..1a9d8ec 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java @@ -18,29 +18,28 @@ package org.apache.hadoop.mapreduce; -import org.junit.Test; -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.fs.LocalDirAllocator; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.mapred.Task.CombineOutputCollector; +import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.io.compress.CompressionCodec; -import org.apache.hadoop.util.Progress; -import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.mapreduce.task.reduce.Shuffle; -import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.Counters.Counter; -import org.apache.hadoop.mapred.MapOutputFile; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.Task; +import org.apache.hadoop.mapred.MapOutputFile; +import org.apache.hadoop.mapred.RawKeyValueIterator; import org.apache.hadoop.mapred.ReduceTask; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.ShuffleConsumerPlugin; +import org.apache.hadoop.mapred.Task; +import org.apache.hadoop.mapred.Task.CombineOutputCollector; import org.apache.hadoop.mapred.TaskStatus; import org.apache.hadoop.mapred.TaskUmbilicalProtocol; -import org.apache.hadoop.mapred.ShuffleConsumerPlugin; -import org.apache.hadoop.mapred.RawKeyValueIterator; -import org.apache.hadoop.mapred.Reducer; +import org.apache.hadoop.mapreduce.task.reduce.Shuffle; +import org.apache.hadoop.util.Progress; +import org.apache.hadoop.util.ReflectionUtils; +import org.junit.Test; /** * A JUnit for testing availability and accessibility of shuffle related API. @@ -181,10 +180,6 @@ public void testConsumerApi() { * AuxiliaryService(s) which are "Shuffle-Providers" (ShuffleHandler and 3rd party plugins) */ public void testProviderApi() { - - ApplicationId mockApplicationId = mock(ApplicationId.class); - mockApplicationId.setClusterTimestamp(new Long(10)); - mockApplicationId.setId(mock(JobID.class).getId()); LocalDirAllocator mockLocalDirAllocator = mock(LocalDirAllocator.class); JobConf mockJobConf = mock(JobConf.class); try { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java index 601268a..3f16996 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java @@ -140,9 +140,7 @@ public ApplicationSubmissionContext answer(InvocationOnMock invocation) ).when(yarnRunner).createApplicationSubmissionContext(any(Configuration.class), any(String.class), any(Credentials.class)); - appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(1); + appId = ApplicationId.createApplicationId(System.currentTimeMillis(), 1); jobId = TypeConverter.fromYarn(appId); if (testWorkDir.exists()) { FileContext.getLocalFSFileContext().delete(new Path(testWorkDir.toString()), true); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java index 0beb430..1e2db2e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java @@ -60,7 +60,6 @@ import org.apache.hadoop.io.ReadaheadPool; import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.security.SecureShuffleUtils; -import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier; import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager; import org.apache.hadoop.mapreduce.task.reduce.ShuffleHeader; @@ -71,6 +70,7 @@ import org.apache.hadoop.metrics2.lib.MutableCounterInt; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -78,7 +78,6 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.util.ConverterUtils; -import org.apache.hadoop.yarn.util.Records; import org.jboss.netty.bootstrap.ServerBootstrap; import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.channel.Channel; @@ -542,9 +541,8 @@ protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch, // $x/$user/appcache/$appId/output/$mapId // TODO: Once Shuffle is out of NM, this can use MR APIs to convert between App and Job JobID jobID = JobID.forName(jobId); - ApplicationId appID = Records.newRecord(ApplicationId.class); - appID.setClusterTimestamp(Long.parseLong(jobID.getJtIdentifier())); - appID.setId(jobID.getId()); + ApplicationId appID = ApplicationId.createApplicationId( + Long.parseLong(jobID.getJtIdentifier()), jobID.getId()); final String base = ContainerLocalizer.USERCACHE + "/" + user + "/" + ContainerLocalizer.APPCACHE + "/" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/YarnException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/YarnException.java new file mode 100644 index 0000000..b12fc81 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/YarnException.java @@ -0,0 +1,29 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn; + +/** Base Yarn Exception. + */ +public class YarnException extends RuntimeException { + public YarnException(Throwable cause) { super(cause); } + public YarnException(String message) { super(message); } + public YarnException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java index 097a533..21e9e2e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; /** *

ApplicationId represents the globally unique @@ -52,7 +53,7 @@ @Private @Unstable - public abstract void setId(int id); + protected abstract void setId(int id); /** * Get the start time of the ResourceManager which is @@ -63,9 +64,21 @@ @Private @Unstable - public abstract void setClusterTimestamp(long clusterTimestamp); + protected abstract void setClusterTimestamp(long clusterTimestamp); + /** + * Meant to build the serialized form once all setters have been called. + */ + protected abstract void build(); + + public static ApplicationId createApplicationId(long clusterTimestamp, int id) { + ApplicationId appId = Records.newRecord(ApplicationId.class); + appId.setClusterTimestamp(clusterTimestamp); + appId.setId(id); + appId.build(); + return appId; + } static final ThreadLocal appIdFormat = new ThreadLocal() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java index ad5c778..a0cb4a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java @@ -19,9 +19,9 @@ package org.apache.hadoop.yarn.api.records.impl.pb; +import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProtoOrBuilder; @@ -32,6 +32,7 @@ public ApplicationIdPBImpl() { builder = ApplicationIdProto.newBuilder(); + viaProto = false; } public ApplicationIdPBImpl(ApplicationIdProto proto) { @@ -39,40 +40,39 @@ public ApplicationIdPBImpl(ApplicationIdProto proto) { viaProto = true; } - public synchronized ApplicationIdProto getProto() { + public ApplicationIdProto getProto() { proto = viaProto ? proto : builder.build(); viaProto = true; return proto; } - private synchronized void maybeInitBuilder() { - if (viaProto || builder == null) { - builder = ApplicationIdProto.newBuilder(proto); - } - viaProto = false; - } - - @Override - public synchronized int getId() { - ApplicationIdProtoOrBuilder p = viaProto ? proto : builder; - return (p.getId()); + public int getId() { + return proto.getId(); } @Override - public synchronized void setId(int id) { - maybeInitBuilder(); + protected void setId(int id) { + if (viaProto) { + throw new YarnException("FIXME to throw a better exception - cannot be set if the proto is already being used"); + } builder.setId((id)); } @Override - public synchronized long getClusterTimestamp() { - ApplicationIdProtoOrBuilder p = viaProto ? proto : builder; - return (p.getClusterTimestamp()); + public long getClusterTimestamp() { + return proto.getClusterTimestamp(); } @Override - public synchronized void setClusterTimestamp(long clusterTimestamp) { - maybeInitBuilder(); + protected void setClusterTimestamp(long clusterTimestamp) { + if (viaProto) { + throw new YarnException("FIXME to throw a better exception - cannot be set if the proto is already being used"); + } builder.setClusterTimestamp((clusterTimestamp)); } + + @Override + protected void build() { + getProto(); // TODO + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java new file mode 100644 index 0000000..de8b6d1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -0,0 +1,757 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.conf; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.Arrays; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.api.ApplicationConstants; + +import com.google.common.base.Joiner; + +public class YarnConfiguration extends Configuration { + + private static final Joiner JOINER = Joiner.on(""); + + private static final String YARN_DEFAULT_XML_FILE = "yarn-default.xml"; + private static final String YARN_SITE_XML_FILE = "yarn-site.xml"; + + static { + Configuration.addDefaultResource(YARN_DEFAULT_XML_FILE); + Configuration.addDefaultResource(YARN_SITE_XML_FILE); + } + + //Configurations + + public static final String YARN_PREFIX = "yarn."; + + /** Delay before deleting resource to ease debugging of NM issues */ + public static final String DEBUG_NM_DELETE_DELAY_SEC = + YarnConfiguration.NM_PREFIX + "delete.debug-delay-sec"; + + //////////////////////////////// + // IPC Configs + //////////////////////////////// + public static final String IPC_PREFIX = YARN_PREFIX + "ipc."; + + /** Factory to create client IPC classes.*/ + public static final String IPC_CLIENT_FACTORY = + IPC_PREFIX + "client.factory.class"; + + /** Type of serialization to use.*/ + public static final String IPC_SERIALIZER_TYPE = + IPC_PREFIX + "serializer.type"; + public static final String DEFAULT_IPC_SERIALIZER_TYPE = "protocolbuffers"; + + /** Factory to create server IPC classes.*/ + public static final String IPC_SERVER_FACTORY = + IPC_PREFIX + "server.factory.class"; + + /** Factory to create IPC exceptions.*/ + public static final String IPC_EXCEPTION_FACTORY = + IPC_PREFIX + "exception.factory.class"; + + /** Factory to create serializeable records.*/ + public static final String IPC_RECORD_FACTORY_CLASS = + IPC_PREFIX + "record.factory.class"; + public static final String DEFAULT_IPC_RECORD_FACTORY_CLASS = + "org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl"; + + /** RPC class implementation*/ + public static final String IPC_RPC_IMPL = + IPC_PREFIX + "rpc.class"; + public static final String DEFAULT_IPC_RPC_IMPL = + "org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC"; + + //////////////////////////////// + // Resource Manager Configs + //////////////////////////////// + public static final String RM_PREFIX = "yarn.resourcemanager."; + + /** The address of the applications manager interface in the RM.*/ + public static final String RM_ADDRESS = + RM_PREFIX + "address"; + public static final int DEFAULT_RM_PORT = 8032; + public static final String DEFAULT_RM_ADDRESS = + "0.0.0.0:" + DEFAULT_RM_PORT; + + /** The number of threads used to handle applications manager requests.*/ + public static final String RM_CLIENT_THREAD_COUNT = + RM_PREFIX + "client.thread-count"; + public static final int DEFAULT_RM_CLIENT_THREAD_COUNT = 50; + + /** The Kerberos principal for the resource manager.*/ + public static final String RM_PRINCIPAL = + RM_PREFIX + "principal"; + + /** The address of the scheduler interface.*/ + public static final String RM_SCHEDULER_ADDRESS = + RM_PREFIX + "scheduler.address"; + public static final int DEFAULT_RM_SCHEDULER_PORT = 8030; + public static final String DEFAULT_RM_SCHEDULER_ADDRESS = "0.0.0.0:" + + DEFAULT_RM_SCHEDULER_PORT; + + /** Miniumum request grant-able by the RM scheduler. */ + public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_MB = + YARN_PREFIX + "scheduler.minimum-allocation-mb"; + public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB = 1024; + public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES = + YARN_PREFIX + "scheduler.minimum-allocation-vcores"; + public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES = 1; + + /** Maximum request grant-able by the RM scheduler. */ + public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_MB = + YARN_PREFIX + "scheduler.maximum-allocation-mb"; + public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB = 8192; + public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES = + YARN_PREFIX + "scheduler.maximum-allocation-vcores"; + public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES = 4; + + /** Number of threads to handle scheduler interface.*/ + public static final String RM_SCHEDULER_CLIENT_THREAD_COUNT = + RM_PREFIX + "scheduler.client.thread-count"; + public static final int DEFAULT_RM_SCHEDULER_CLIENT_THREAD_COUNT = 50; + + /** The address of the RM web application.*/ + public static final String RM_WEBAPP_ADDRESS = + RM_PREFIX + "webapp.address"; + + public static final int DEFAULT_RM_WEBAPP_PORT = 8088; + public static final String DEFAULT_RM_WEBAPP_ADDRESS = "0.0.0.0:" + + DEFAULT_RM_WEBAPP_PORT; + + public static final String RM_RESOURCE_TRACKER_ADDRESS = + RM_PREFIX + "resource-tracker.address"; + public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8031; + public static final String DEFAULT_RM_RESOURCE_TRACKER_ADDRESS = + "0.0.0.0:" + DEFAULT_RM_RESOURCE_TRACKER_PORT; + + /** The expiry interval for application master reporting.*/ + public static final String RM_AM_EXPIRY_INTERVAL_MS = + YARN_PREFIX + "am.liveness-monitor.expiry-interval-ms"; + public static final int DEFAULT_RM_AM_EXPIRY_INTERVAL_MS = 600000; + + /** How long to wait until a node manager is considered dead.*/ + public static final String RM_NM_EXPIRY_INTERVAL_MS = + YARN_PREFIX + "nm.liveness-monitor.expiry-interval-ms"; + public static final int DEFAULT_RM_NM_EXPIRY_INTERVAL_MS = 600000; + + /** Are acls enabled.*/ + public static final String YARN_ACL_ENABLE = + YARN_PREFIX + "acl.enable"; + public static final boolean DEFAULT_YARN_ACL_ENABLE = false; + + /** ACL of who can be admin of YARN cluster.*/ + public static final String YARN_ADMIN_ACL = + YARN_PREFIX + "admin.acl"; + public static final String DEFAULT_YARN_ADMIN_ACL = "*"; + + /** ACL used in case none is found. Allows nothing. */ + public static final String DEFAULT_YARN_APP_ACL = " "; + + /** The address of the RM admin interface.*/ + public static final String RM_ADMIN_ADDRESS = + RM_PREFIX + "admin.address"; + public static final int DEFAULT_RM_ADMIN_PORT = 8033; + public static final String DEFAULT_RM_ADMIN_ADDRESS = "0.0.0.0:" + + DEFAULT_RM_ADMIN_PORT; + + /**Number of threads used to handle RM admin interface.*/ + public static final String RM_ADMIN_CLIENT_THREAD_COUNT = + RM_PREFIX + "admin.client.thread-count"; + public static final int DEFAULT_RM_ADMIN_CLIENT_THREAD_COUNT = 1; + + /** + * The maximum number of application attempts. + * It's a global setting for all application masters. + */ + public static final String RM_AM_MAX_ATTEMPTS = + RM_PREFIX + "am.max-attempts"; + public static final int DEFAULT_RM_AM_MAX_ATTEMPTS = 2; + + /** The keytab for the resource manager.*/ + public static final String RM_KEYTAB = + RM_PREFIX + "keytab"; + + /** How long to wait until a container is considered dead.*/ + public static final String RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = + RM_PREFIX + "rm.container-allocation.expiry-interval-ms"; + public static final int DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = 600000; + + /** Path to file with nodes to include.*/ + public static final String RM_NODES_INCLUDE_FILE_PATH = + RM_PREFIX + "nodes.include-path"; + public static final String DEFAULT_RM_NODES_INCLUDE_FILE_PATH = ""; + + /** Path to file with nodes to exclude.*/ + public static final String RM_NODES_EXCLUDE_FILE_PATH = + RM_PREFIX + "nodes.exclude-path"; + public static final String DEFAULT_RM_NODES_EXCLUDE_FILE_PATH = ""; + + /** Number of threads to handle resource tracker calls.*/ + public static final String RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT = + RM_PREFIX + "resource-tracker.client.thread-count"; + public static final int DEFAULT_RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT = 50; + + /** The class to use as the resource scheduler.*/ + public static final String RM_SCHEDULER = + RM_PREFIX + "scheduler.class"; + + public static final String DEFAULT_RM_SCHEDULER = + "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler"; + + /** RM set next Heartbeat interval for NM */ + public static final String RM_NM_HEARTBEAT_INTERVAL_MS = + RM_PREFIX + "nodemanagers.heartbeat-interval-ms"; + public static final long DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS = 1000; + + //Delegation token related keys + public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY = + RM_PREFIX + "delegation.key.update-interval"; + public static final long DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT = + 24*60*60*1000; // 1 day + public static final String DELEGATION_TOKEN_RENEW_INTERVAL_KEY = + RM_PREFIX + "delegation.token.renew-interval"; + public static final long DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT = + 24*60*60*1000; // 1 day + public static final String DELEGATION_TOKEN_MAX_LIFETIME_KEY = + RM_PREFIX + "delegation.token.max-lifetime"; + public static final long DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT = + 7*24*60*60*1000; // 7 days + + public static final String RECOVERY_ENABLED = RM_PREFIX + "recovery.enabled"; + public static final boolean DEFAULT_RM_RECOVERY_ENABLED = false; + + /** The class to use as the persistent store.*/ + public static final String RM_STORE = RM_PREFIX + "store.class"; + + /** URI for FileSystemRMStateStore */ + public static final String FS_RM_STATE_STORE_URI = + RM_PREFIX + "fs.rm-state-store.uri"; + + /** The maximum number of completed applications RM keeps. */ + public static final String RM_MAX_COMPLETED_APPLICATIONS = + RM_PREFIX + "max-completed-applications"; + public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 10000; + + /** Default application name */ + public static final String DEFAULT_APPLICATION_NAME = "N/A"; + + /** Default queue name */ + public static final String DEFAULT_QUEUE_NAME = "default"; + + /** + * Buckets (in minutes) for the number of apps running in each queue. + */ + public static final String RM_METRICS_RUNTIME_BUCKETS = + RM_PREFIX + "metrics.runtime.buckets"; + + /** + * Default sizes of the runtime metric buckets in minutes. + */ + public static final String DEFAULT_RM_METRICS_RUNTIME_BUCKETS = + "60,300,1440"; + + public static final String RM_APP_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = RM_PREFIX + + "application-tokens.master-key-rolling-interval-secs"; + + public static final long DEFAULT_RM_APP_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = + 24 * 60 * 60; + + public static final String RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = + RM_PREFIX + "container-tokens.master-key-rolling-interval-secs"; + + public static final long DEFAULT_RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = + 24 * 60 * 60; + + //////////////////////////////// + // Node Manager Configs + //////////////////////////////// + + /** Prefix for all node manager configs.*/ + public static final String NM_PREFIX = "yarn.nodemanager."; + + /** Environment variables that will be sent to containers.*/ + public static final String NM_ADMIN_USER_ENV = NM_PREFIX + "admin-env"; + public static final String DEFAULT_NM_ADMIN_USER_ENV = "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX"; + + /** Environment variables that containers may override rather than use NodeManager's default.*/ + public static final String NM_ENV_WHITELIST = NM_PREFIX + "env-whitelist"; + public static final String DEFAULT_NM_ENV_WHITELIST = StringUtils.join(",", + Arrays.asList(ApplicationConstants.Environment.JAVA_HOME.key(), + ApplicationConstants.Environment.HADOOP_COMMON_HOME.key(), + ApplicationConstants.Environment.HADOOP_HDFS_HOME.key(), + ApplicationConstants.Environment.HADOOP_CONF_DIR.key(), + ApplicationConstants.Environment.HADOOP_YARN_HOME.key())); + + /** address of node manager IPC.*/ + public static final String NM_ADDRESS = NM_PREFIX + "address"; + public static final int DEFAULT_NM_PORT = 0; + public static final String DEFAULT_NM_ADDRESS = "0.0.0.0:" + + DEFAULT_NM_PORT; + + /** who will execute(launch) the containers.*/ + public static final String NM_CONTAINER_EXECUTOR = + NM_PREFIX + "container-executor.class"; + + /** + * Adjustment to make to the container os scheduling priority. + * The valid values for this could vary depending on the platform. + * On Linux, higher values mean run the containers at a less + * favorable priority than the NM. + * The value specified is an int. + */ + public static final String NM_CONTAINER_EXECUTOR_SCHED_PRIORITY = + NM_PREFIX + "container-executor.os.sched.priority.adjustment"; + public static final int DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY = 0; + + /** Number of threads container manager uses.*/ + public static final String NM_CONTAINER_MGR_THREAD_COUNT = + NM_PREFIX + "container-manager.thread-count"; + public static final int DEFAULT_NM_CONTAINER_MGR_THREAD_COUNT = 20; + + /** Number of threads used in cleanup.*/ + public static final String NM_DELETE_THREAD_COUNT = + NM_PREFIX + "delete.thread-count"; + public static final int DEFAULT_NM_DELETE_THREAD_COUNT = 4; + + /** Keytab for NM.*/ + public static final String NM_KEYTAB = NM_PREFIX + "keytab"; + + /**List of directories to store localized files in.*/ + public static final String NM_LOCAL_DIRS = NM_PREFIX + "local-dirs"; + public static final String DEFAULT_NM_LOCAL_DIRS = "/tmp/nm-local-dir"; + + /** + * Number of files in each localized directories + * Avoid tuning this too low. + */ + public static final String NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY = + NM_PREFIX + "local-cache.max-files-per-directory"; + public static final int DEFAULT_NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY = 8192; + + /** Address where the localizer IPC is.*/ + public static final String NM_LOCALIZER_ADDRESS = + NM_PREFIX + "localizer.address"; + public static final int DEFAULT_NM_LOCALIZER_PORT = 8040; + public static final String DEFAULT_NM_LOCALIZER_ADDRESS = "0.0.0.0:" + + DEFAULT_NM_LOCALIZER_PORT; + + /** Interval in between cache cleanups.*/ + public static final String NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS = + NM_PREFIX + "localizer.cache.cleanup.interval-ms"; + public static final long DEFAULT_NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS = + 10 * 60 * 1000; + + /** Target size of localizer cache in MB, per local directory.*/ + public static final String NM_LOCALIZER_CACHE_TARGET_SIZE_MB = + NM_PREFIX + "localizer.cache.target-size-mb"; + public static final long DEFAULT_NM_LOCALIZER_CACHE_TARGET_SIZE_MB = 10 * 1024; + + /** Number of threads to handle localization requests.*/ + public static final String NM_LOCALIZER_CLIENT_THREAD_COUNT = + NM_PREFIX + "localizer.client.thread-count"; + public static final int DEFAULT_NM_LOCALIZER_CLIENT_THREAD_COUNT = 5; + + /** Number of threads to use for localization fetching.*/ + public static final String NM_LOCALIZER_FETCH_THREAD_COUNT = + NM_PREFIX + "localizer.fetch.thread-count"; + public static final int DEFAULT_NM_LOCALIZER_FETCH_THREAD_COUNT = 4; + + /** Where to store container logs.*/ + public static final String NM_LOG_DIRS = NM_PREFIX + "log-dirs"; + public static final String DEFAULT_NM_LOG_DIRS = "/tmp/logs"; + + /** Interval at which the delayed token removal thread runs */ + public static final String RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS = + RM_PREFIX + "delayed.delegation-token.removal-interval-ms"; + public static final long DEFAULT_RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS = + 30000l; + + /** Whether to enable log aggregation */ + public static final String LOG_AGGREGATION_ENABLED = YARN_PREFIX + + "log-aggregation-enable"; + public static final boolean DEFAULT_LOG_AGGREGATION_ENABLED = false; + + /** + * How long to wait before deleting aggregated logs, -1 disables. + * Be careful set this too small and you will spam the name node. + */ + public static final String LOG_AGGREGATION_RETAIN_SECONDS = YARN_PREFIX + + "log-aggregation.retain-seconds"; + public static final long DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS = -1; + + /** + * How long to wait between aggregated log retention checks. If set to + * a value <= 0 then the value is computed as one-tenth of the log retention + * setting. Be careful set this too small and you will spam the name node. + */ + public static final String LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS = + YARN_PREFIX + "log-aggregation.retain-check-interval-seconds"; + public static final long DEFAULT_LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS = -1; + + /** + * Number of seconds to retain logs on the NodeManager. Only applicable if Log + * aggregation is disabled + */ + public static final String NM_LOG_RETAIN_SECONDS = NM_PREFIX + + "log.retain-seconds"; + + /** + * Number of threads used in log cleanup. Only applicable if Log aggregation + * is disabled + */ + public static final String NM_LOG_DELETION_THREADS_COUNT = + NM_PREFIX + "log.deletion-threads-count"; + public static final int DEFAULT_NM_LOG_DELETE_THREAD_COUNT = 4; + + /** Where to aggregate logs to.*/ + public static final String NM_REMOTE_APP_LOG_DIR = + NM_PREFIX + "remote-app-log-dir"; + public static final String DEFAULT_NM_REMOTE_APP_LOG_DIR = "/tmp/logs"; + + /** + * The remote log dir will be created at + * NM_REMOTE_APP_LOG_DIR/${user}/NM_REMOTE_APP_LOG_DIR_SUFFIX/${appId} + */ + public static final String NM_REMOTE_APP_LOG_DIR_SUFFIX = + NM_PREFIX + "remote-app-log-dir-suffix"; + public static final String DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX="logs"; + + public static final String YARN_LOG_SERVER_URL = + YARN_PREFIX + "log.server.url"; + + public static final String YARN_TRACKING_URL_GENERATOR = + YARN_PREFIX + "tracking.url.generator"; + + /** Amount of memory in GB that can be allocated for containers.*/ + public static final String NM_PMEM_MB = NM_PREFIX + "resource.memory-mb"; + public static final int DEFAULT_NM_PMEM_MB = 8 * 1024; + + /** Specifies whether physical memory check is enabled. */ + public static final String NM_PMEM_CHECK_ENABLED = NM_PREFIX + + "pmem-check-enabled"; + public static final boolean DEFAULT_NM_PMEM_CHECK_ENABLED = true; + + /** Specifies whether physical memory check is enabled. */ + public static final String NM_VMEM_CHECK_ENABLED = NM_PREFIX + + "vmem-check-enabled"; + public static final boolean DEFAULT_NM_VMEM_CHECK_ENABLED = true; + + /** Conversion ratio for physical memory to virtual memory. */ + public static final String NM_VMEM_PMEM_RATIO = + NM_PREFIX + "vmem-pmem-ratio"; + public static final float DEFAULT_NM_VMEM_PMEM_RATIO = 2.1f; + + /** Number of Physical CPU Cores which can be allocated for containers.*/ + public static final String NM_VCORES = NM_PREFIX + "resource.cpu-cores"; + public static final int DEFAULT_NM_VCORES = 8; + + /** Conversion ratio for physical cores to virtual cores. */ + public static final String NM_VCORES_PCORES_RATIO = + NM_PREFIX + "vcores-pcores-ratio"; + public static final float DEFAULT_NM_VCORES_PCORES_RATIO = 2.0f; + + /** NM Webapp address.**/ + public static final String NM_WEBAPP_ADDRESS = NM_PREFIX + "webapp.address"; + public static final int DEFAULT_NM_WEBAPP_PORT = 8042; + public static final String DEFAULT_NM_WEBAPP_ADDRESS = "0.0.0.0:" + + DEFAULT_NM_WEBAPP_PORT; + + /** How often to monitor containers.*/ + public final static String NM_CONTAINER_MON_INTERVAL_MS = + NM_PREFIX + "container-monitor.interval-ms"; + public final static int DEFAULT_NM_CONTAINER_MON_INTERVAL_MS = 3000; + + /** Class that calculates containers current resource utilization.*/ + public static final String NM_CONTAINER_MON_RESOURCE_CALCULATOR = + NM_PREFIX + "container-monitor.resource-calculator.class"; + /** Class that calculates process tree resource utilization.*/ + public static final String NM_CONTAINER_MON_PROCESS_TREE = + NM_PREFIX + "container-monitor.process-tree.class"; + + /** + * Enable/Disable disks' health checker. Default is true. + * An expert level configuration property. + */ + public static final String NM_DISK_HEALTH_CHECK_ENABLE = + NM_PREFIX + "disk-health-checker.enable"; + /** Frequency of running disks' health checker.*/ + public static final String NM_DISK_HEALTH_CHECK_INTERVAL_MS = + NM_PREFIX + "disk-health-checker.interval-ms"; + /** By default, disks' health is checked every 2 minutes. */ + public static final long DEFAULT_NM_DISK_HEALTH_CHECK_INTERVAL_MS = + 2 * 60 * 1000; + + /** + * The minimum fraction of number of disks to be healthy for the nodemanager + * to launch new containers. This applies to nm-local-dirs and nm-log-dirs. + */ + public static final String NM_MIN_HEALTHY_DISKS_FRACTION = + NM_PREFIX + "disk-health-checker.min-healthy-disks"; + /** + * By default, at least 5% of disks are to be healthy to say that the node + * is healthy in terms of disks. + */ + public static final float DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION + = 0.25F; + + /** Frequency of running node health script.*/ + public static final String NM_HEALTH_CHECK_INTERVAL_MS = + NM_PREFIX + "health-checker.interval-ms"; + public static final long DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS = 10 * 60 * 1000; + + /** Health check script time out period.*/ + public static final String NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS = + NM_PREFIX + "health-checker.script.timeout-ms"; + public static final long DEFAULT_NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS = + 2 * DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS; + + /** The health check script to run.*/ + public static final String NM_HEALTH_CHECK_SCRIPT_PATH = + NM_PREFIX + "health-checker.script.path"; + + /** The arguments to pass to the health check script.*/ + public static final String NM_HEALTH_CHECK_SCRIPT_OPTS = + NM_PREFIX + "health-checker.script.opts"; + + /** The path to the Linux container executor.*/ + public static final String NM_LINUX_CONTAINER_EXECUTOR_PATH = + NM_PREFIX + "linux-container-executor.path"; + + /** + * The UNIX group that the linux-container-executor should run as. + * This is intended to be set as part of container-executor.cfg. + */ + public static final String NM_LINUX_CONTAINER_GROUP = + NM_PREFIX + "linux-container-executor.group"; + + /** The type of resource enforcement to use with the + * linux container executor. + */ + public static final String NM_LINUX_CONTAINER_RESOURCES_HANDLER = + NM_PREFIX + "linux-container-executor.resources-handler.class"; + + /** The path the linux container executor should use for cgroups */ + public static final String NM_LINUX_CONTAINER_CGROUPS_HIERARCHY = + NM_PREFIX + "linux-container-executor.cgroups.hierarchy"; + + /** Whether the linux container executor should mount cgroups if not found */ + public static final String NM_LINUX_CONTAINER_CGROUPS_MOUNT = + NM_PREFIX + "linux-container-executor.cgroups.mount"; + + /** Where the linux container executor should mount cgroups if not found */ + public static final String NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH = + NM_PREFIX + "linux-container-executor.cgroups.mount-path"; + + /** T-file compression types used to compress aggregated logs.*/ + public static final String NM_LOG_AGG_COMPRESSION_TYPE = + NM_PREFIX + "log-aggregation.compression-type"; + public static final String DEFAULT_NM_LOG_AGG_COMPRESSION_TYPE = "none"; + + /** The kerberos principal for the node manager.*/ + public static final String NM_PRINCIPAL = + NM_PREFIX + "principal"; + + public static final String NM_AUX_SERVICES = + NM_PREFIX + "aux-services"; + + public static final String NM_AUX_SERVICE_FMT = + NM_PREFIX + "aux-services.%s.class"; + + public static final String NM_USER_HOME_DIR = + NM_PREFIX + "user-home-dir"; + + public static final String DEFAULT_NM_USER_HOME_DIR= "/home/"; + + //////////////////////////////// + // Web Proxy Configs + //////////////////////////////// + public static final String PROXY_PREFIX = "yarn.web-proxy."; + + /** The kerberos principal for the proxy.*/ + public static final String PROXY_PRINCIPAL = + PROXY_PREFIX + "principal"; + + /** Keytab for Proxy.*/ + public static final String PROXY_KEYTAB = PROXY_PREFIX + "keytab"; + + /** The address for the web proxy.*/ + public static final String PROXY_ADDRESS = + PROXY_PREFIX + "address"; + + /** + * YARN Service Level Authorization + */ + public static final String + YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER = + "security.resourcetracker.protocol.acl"; + public static final String + YARN_SECURITY_SERVICE_AUTHORIZATION_CLIENT_RESOURCEMANAGER = + "security.client.resourcemanager.protocol.acl"; + public static final String + YARN_SECURITY_SERVICE_AUTHORIZATION_ADMIN = + "security.admin.protocol.acl"; + public static final String + YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_RESOURCEMANAGER = + "security.applicationmaster.resourcemanager.protocol.acl"; + + public static final String + YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGER = + "security.containermanager.protocol.acl"; + public static final String + YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER = + "security.resourcelocalizer.protocol.acl"; + + /** No. of milliseconds to wait between sending a SIGTERM and SIGKILL + * to a running container */ + public static final String NM_SLEEP_DELAY_BEFORE_SIGKILL_MS = + NM_PREFIX + "sleep-delay-before-sigkill.ms"; + public static final long DEFAULT_NM_SLEEP_DELAY_BEFORE_SIGKILL_MS = + 250; + + /** Max time to wait for a process to come up when trying to cleanup + * container resources */ + public static final String NM_PROCESS_KILL_WAIT_MS = + NM_PREFIX + "process-kill-wait.ms"; + public static final long DEFAULT_NM_PROCESS_KILL_WAIT_MS = + 2000; + + /** Max time to wait to establish a connection to RM when NM starts + */ + public static final String RESOURCEMANAGER_CONNECT_WAIT_SECS = + NM_PREFIX + "resourcemanager.connect.wait.secs"; + public static final int DEFAULT_RESOURCEMANAGER_CONNECT_WAIT_SECS = + 15*60; + + /** Time interval between each NM attempt to connect to RM + */ + public static final String RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS = + NM_PREFIX + "resourcemanager.connect.retry_interval.secs"; + public static final long DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS + = 30; + + /** + * CLASSPATH for YARN applications. A comma-separated list of CLASSPATH + * entries + */ + public static final String YARN_APPLICATION_CLASSPATH = YARN_PREFIX + + "application.classpath"; + + /** + * Default CLASSPATH for YARN applications. A comma-separated list of + * CLASSPATH entries + */ + public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = { + ApplicationConstants.Environment.HADOOP_CONF_DIR.$(), + ApplicationConstants.Environment.HADOOP_COMMON_HOME.$() + + "/share/hadoop/common/*", + ApplicationConstants.Environment.HADOOP_COMMON_HOME.$() + + "/share/hadoop/common/lib/*", + ApplicationConstants.Environment.HADOOP_HDFS_HOME.$() + + "/share/hadoop/hdfs/*", + ApplicationConstants.Environment.HADOOP_HDFS_HOME.$() + + "/share/hadoop/hdfs/lib/*", + ApplicationConstants.Environment.HADOOP_YARN_HOME.$() + + "/share/hadoop/yarn/*", + ApplicationConstants.Environment.HADOOP_YARN_HOME.$() + + "/share/hadoop/yarn/lib/*" }; + + /** Container temp directory */ + public static final String DEFAULT_CONTAINER_TEMP_DIR = "./tmp"; + + public static final String IS_MINI_YARN_CLUSTER = YARN_PREFIX + + "is.minicluster"; + + /** Whether to use fixed ports with the minicluster. */ + public static final String YARN_MINICLUSTER_FIXED_PORTS = YARN_PREFIX + + "minicluster.fixed.ports"; + + /** + * Default is false to be able to run tests concurrently without port + * conflicts. + */ + public static boolean DEFAULT_YARN_MINICLUSTER_FIXED_PORTS = false; + + //////////////////////////////// + // Other Configs + //////////////////////////////// + + /** + * The interval of the yarn client's querying application state after + * application submission. The unit is millisecond. + */ + public static final String YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS = + YARN_PREFIX + "client.app-submission.poll-interval"; + public static final long DEFAULT_YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS = + 1000; + + public YarnConfiguration() { + super(); + } + + public YarnConfiguration(Configuration conf) { + super(conf); + if (! (conf instanceof YarnConfiguration)) { + this.reloadConfiguration(); + } + } + + public static String getProxyHostAndPort(Configuration conf) { + String addr = conf.get(PROXY_ADDRESS); + if(addr == null || addr.isEmpty()) { + addr = getRMWebAppHostAndPort(conf); + } + return addr; + } + + public static String getRMWebAppHostAndPort(Configuration conf) { + InetSocketAddress address = conf.getSocketAddr( + YarnConfiguration.RM_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_RM_WEBAPP_PORT); + address = NetUtils.getConnectAddress(address); + StringBuffer sb = new StringBuffer(); + InetAddress resolved = address.getAddress(); + if (resolved == null || resolved.isAnyLocalAddress() || + resolved.isLoopbackAddress()) { + String lh = address.getHostName(); + try { + lh = InetAddress.getLocalHost().getCanonicalHostName(); + } catch (UnknownHostException e) { + //Ignore and fallback. + } + sb.append(lh); + } else { + sb.append(address.getHostName()); + } + sb.append(":").append(address.getPort()); + return sb.toString(); + } + + public static String getRMWebAppURL(Configuration conf) { + return JOINER.join("http://", getRMWebAppHostAndPort(conf)); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/factories/RecordFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/factories/RecordFactory.java new file mode 100644 index 0000000..e62d5a6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/factories/RecordFactory.java @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.factories; + +import org.apache.hadoop.yarn.YarnException; + + +public interface RecordFactory { + public T newRecordInstance(Class clazz) throws YarnException; +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider.java new file mode 100644 index 0000000..1866b05 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.factory.providers; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.YarnException; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.factories.RecordFactory; + +public class RecordFactoryProvider { + private static Configuration defaultConf; + + static { + defaultConf = new Configuration(); + } + + private RecordFactoryProvider() { + } + + public static RecordFactory getRecordFactory(Configuration conf) { + if (conf == null) { + //Assuming the default configuration has the correct factories set. + //Users can specify a particular factory by providing a configuration. + conf = defaultConf; + } + String recordFactoryClassName = conf.get(YarnConfiguration.IPC_RECORD_FACTORY_CLASS, YarnConfiguration.DEFAULT_IPC_RECORD_FACTORY_CLASS); + return (RecordFactory) getFactoryClassInstance(recordFactoryClassName); +// if (recordFactoryClassName == null) { +// String serializer = conf.get(YarnConfiguration.IPC_SERIALIZER_TYPE, YarnConfiguration.DEFAULT_IPC_SERIALIZER_TYPE); +// if (serializer.equals(YarnConfiguration.DEFAULT_IPC_SERIALIZER_TYPE)) { +// return RecordFactoryPBImpl.get(); +// } else { +// throw new YarnException("Unknown serializer: [" + conf.get(YarnConfiguration.IPC_SERIALIZER_TYPE) + "]. Use keys: [" + YarnConfiguration.IPC_RECORD_FACTORY_CLASS + "] to specify Record factory"); +// } +// } else { +// +// } + } + + private static Object getFactoryClassInstance(String factoryClassName) { + try { + Class clazz = Class.forName(factoryClassName); + Method method = clazz.getMethod("get", null); + method.setAccessible(true); + return method.invoke(null, null); + } catch (ClassNotFoundException e) { + throw new YarnException(e); + } catch (NoSuchMethodException e) { + throw new YarnException(e); + } catch (InvocationTargetException e) { + throw new YarnException(e); + } catch (IllegalAccessException e) { + throw new YarnException(e); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/Records.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/Records.java new file mode 100644 index 0000000..60711fd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/Records.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.util; + +import org.apache.hadoop.yarn.factories.RecordFactory; +import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; + +/** + * Convenient API record utils + */ +public class Records { + // The default record factory + private static final RecordFactory factory = + RecordFactoryProvider.getRecordFactory(null); + + public static T newRecord(Class cls) { + return factory.newRecordInstance(cls); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java index ccfc8d9..c45f78f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java @@ -36,7 +36,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; -import org.apache.hadoop.yarn.util.Records; import org.junit.Test; public class TestYarnClient { @@ -80,9 +79,8 @@ public void testSubmitApplication() { for (int i = 0; i < exitStates.length; ++i) { ApplicationSubmissionContext context = mock(ApplicationSubmissionContext.class); - ApplicationId applicationId = Records.newRecord(ApplicationId.class); - applicationId.setClusterTimestamp(System.currentTimeMillis()); - applicationId.setId(i); + ApplicationId applicationId = ApplicationId.createApplicationId( + System.currentTimeMillis(), i); when(context.getApplicationId()).thenReturn(applicationId); ((MockYarnClient) client).setYarnApplicationState(exitStates[i]); try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnException.java deleted file mode 100644 index b12fc81..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/YarnException.java +++ /dev/null @@ -1,29 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop.yarn; - -/** Base Yarn Exception. - */ -public class YarnException extends RuntimeException { - public YarnException(Throwable cause) { super(cause); } - public YarnException(String message) { super(message); } - public YarnException(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java deleted file mode 100644 index f9b017d..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ /dev/null @@ -1,755 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package org.apache.hadoop.yarn.conf; - -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.Arrays; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.api.ApplicationConstants; - -import com.google.common.base.Joiner; - -public class YarnConfiguration extends Configuration { - - private static final Joiner JOINER = Joiner.on(""); - - private static final String YARN_DEFAULT_XML_FILE = "yarn-default.xml"; - private static final String YARN_SITE_XML_FILE = "yarn-site.xml"; - - static { - Configuration.addDefaultResource(YARN_DEFAULT_XML_FILE); - Configuration.addDefaultResource(YARN_SITE_XML_FILE); - } - - //Configurations - - public static final String YARN_PREFIX = "yarn."; - - /** Delay before deleting resource to ease debugging of NM issues */ - public static final String DEBUG_NM_DELETE_DELAY_SEC = - YarnConfiguration.NM_PREFIX + "delete.debug-delay-sec"; - - //////////////////////////////// - // IPC Configs - //////////////////////////////// - public static final String IPC_PREFIX = YARN_PREFIX + "ipc."; - - /** Factory to create client IPC classes.*/ - public static final String IPC_CLIENT_FACTORY = - IPC_PREFIX + "client.factory.class"; - - /** Type of serialization to use.*/ - public static final String IPC_SERIALIZER_TYPE = - IPC_PREFIX + "serializer.type"; - public static final String DEFAULT_IPC_SERIALIZER_TYPE = "protocolbuffers"; - - /** Factory to create server IPC classes.*/ - public static final String IPC_SERVER_FACTORY = - IPC_PREFIX + "server.factory.class"; - - /** Factory to create IPC exceptions.*/ - public static final String IPC_EXCEPTION_FACTORY = - IPC_PREFIX + "exception.factory.class"; - - /** Factory to create serializeable records.*/ - public static final String IPC_RECORD_FACTORY = - IPC_PREFIX + "record.factory.class"; - - /** RPC class implementation*/ - public static final String IPC_RPC_IMPL = - IPC_PREFIX + "rpc.class"; - public static final String DEFAULT_IPC_RPC_IMPL = - "org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC"; - - //////////////////////////////// - // Resource Manager Configs - //////////////////////////////// - public static final String RM_PREFIX = "yarn.resourcemanager."; - - /** The address of the applications manager interface in the RM.*/ - public static final String RM_ADDRESS = - RM_PREFIX + "address"; - public static final int DEFAULT_RM_PORT = 8032; - public static final String DEFAULT_RM_ADDRESS = - "0.0.0.0:" + DEFAULT_RM_PORT; - - /** The number of threads used to handle applications manager requests.*/ - public static final String RM_CLIENT_THREAD_COUNT = - RM_PREFIX + "client.thread-count"; - public static final int DEFAULT_RM_CLIENT_THREAD_COUNT = 50; - - /** The Kerberos principal for the resource manager.*/ - public static final String RM_PRINCIPAL = - RM_PREFIX + "principal"; - - /** The address of the scheduler interface.*/ - public static final String RM_SCHEDULER_ADDRESS = - RM_PREFIX + "scheduler.address"; - public static final int DEFAULT_RM_SCHEDULER_PORT = 8030; - public static final String DEFAULT_RM_SCHEDULER_ADDRESS = "0.0.0.0:" + - DEFAULT_RM_SCHEDULER_PORT; - - /** Miniumum request grant-able by the RM scheduler. */ - public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_MB = - YARN_PREFIX + "scheduler.minimum-allocation-mb"; - public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB = 1024; - public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES = - YARN_PREFIX + "scheduler.minimum-allocation-vcores"; - public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES = 1; - - /** Maximum request grant-able by the RM scheduler. */ - public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_MB = - YARN_PREFIX + "scheduler.maximum-allocation-mb"; - public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB = 8192; - public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES = - YARN_PREFIX + "scheduler.maximum-allocation-vcores"; - public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES = 4; - - /** Number of threads to handle scheduler interface.*/ - public static final String RM_SCHEDULER_CLIENT_THREAD_COUNT = - RM_PREFIX + "scheduler.client.thread-count"; - public static final int DEFAULT_RM_SCHEDULER_CLIENT_THREAD_COUNT = 50; - - /** The address of the RM web application.*/ - public static final String RM_WEBAPP_ADDRESS = - RM_PREFIX + "webapp.address"; - - public static final int DEFAULT_RM_WEBAPP_PORT = 8088; - public static final String DEFAULT_RM_WEBAPP_ADDRESS = "0.0.0.0:" + - DEFAULT_RM_WEBAPP_PORT; - - public static final String RM_RESOURCE_TRACKER_ADDRESS = - RM_PREFIX + "resource-tracker.address"; - public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8031; - public static final String DEFAULT_RM_RESOURCE_TRACKER_ADDRESS = - "0.0.0.0:" + DEFAULT_RM_RESOURCE_TRACKER_PORT; - - /** The expiry interval for application master reporting.*/ - public static final String RM_AM_EXPIRY_INTERVAL_MS = - YARN_PREFIX + "am.liveness-monitor.expiry-interval-ms"; - public static final int DEFAULT_RM_AM_EXPIRY_INTERVAL_MS = 600000; - - /** How long to wait until a node manager is considered dead.*/ - public static final String RM_NM_EXPIRY_INTERVAL_MS = - YARN_PREFIX + "nm.liveness-monitor.expiry-interval-ms"; - public static final int DEFAULT_RM_NM_EXPIRY_INTERVAL_MS = 600000; - - /** Are acls enabled.*/ - public static final String YARN_ACL_ENABLE = - YARN_PREFIX + "acl.enable"; - public static final boolean DEFAULT_YARN_ACL_ENABLE = false; - - /** ACL of who can be admin of YARN cluster.*/ - public static final String YARN_ADMIN_ACL = - YARN_PREFIX + "admin.acl"; - public static final String DEFAULT_YARN_ADMIN_ACL = "*"; - - /** ACL used in case none is found. Allows nothing. */ - public static final String DEFAULT_YARN_APP_ACL = " "; - - /** The address of the RM admin interface.*/ - public static final String RM_ADMIN_ADDRESS = - RM_PREFIX + "admin.address"; - public static final int DEFAULT_RM_ADMIN_PORT = 8033; - public static final String DEFAULT_RM_ADMIN_ADDRESS = "0.0.0.0:" + - DEFAULT_RM_ADMIN_PORT; - - /**Number of threads used to handle RM admin interface.*/ - public static final String RM_ADMIN_CLIENT_THREAD_COUNT = - RM_PREFIX + "admin.client.thread-count"; - public static final int DEFAULT_RM_ADMIN_CLIENT_THREAD_COUNT = 1; - - /** - * The maximum number of application attempts. - * It's a global setting for all application masters. - */ - public static final String RM_AM_MAX_ATTEMPTS = - RM_PREFIX + "am.max-attempts"; - public static final int DEFAULT_RM_AM_MAX_ATTEMPTS = 2; - - /** The keytab for the resource manager.*/ - public static final String RM_KEYTAB = - RM_PREFIX + "keytab"; - - /** How long to wait until a container is considered dead.*/ - public static final String RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = - RM_PREFIX + "rm.container-allocation.expiry-interval-ms"; - public static final int DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = 600000; - - /** Path to file with nodes to include.*/ - public static final String RM_NODES_INCLUDE_FILE_PATH = - RM_PREFIX + "nodes.include-path"; - public static final String DEFAULT_RM_NODES_INCLUDE_FILE_PATH = ""; - - /** Path to file with nodes to exclude.*/ - public static final String RM_NODES_EXCLUDE_FILE_PATH = - RM_PREFIX + "nodes.exclude-path"; - public static final String DEFAULT_RM_NODES_EXCLUDE_FILE_PATH = ""; - - /** Number of threads to handle resource tracker calls.*/ - public static final String RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT = - RM_PREFIX + "resource-tracker.client.thread-count"; - public static final int DEFAULT_RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT = 50; - - /** The class to use as the resource scheduler.*/ - public static final String RM_SCHEDULER = - RM_PREFIX + "scheduler.class"; - - public static final String DEFAULT_RM_SCHEDULER = - "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler"; - - /** RM set next Heartbeat interval for NM */ - public static final String RM_NM_HEARTBEAT_INTERVAL_MS = - RM_PREFIX + "nodemanagers.heartbeat-interval-ms"; - public static final long DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS = 1000; - - //Delegation token related keys - public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY = - RM_PREFIX + "delegation.key.update-interval"; - public static final long DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT = - 24*60*60*1000; // 1 day - public static final String DELEGATION_TOKEN_RENEW_INTERVAL_KEY = - RM_PREFIX + "delegation.token.renew-interval"; - public static final long DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT = - 24*60*60*1000; // 1 day - public static final String DELEGATION_TOKEN_MAX_LIFETIME_KEY = - RM_PREFIX + "delegation.token.max-lifetime"; - public static final long DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT = - 7*24*60*60*1000; // 7 days - - public static final String RECOVERY_ENABLED = RM_PREFIX + "recovery.enabled"; - public static final boolean DEFAULT_RM_RECOVERY_ENABLED = false; - - /** The class to use as the persistent store.*/ - public static final String RM_STORE = RM_PREFIX + "store.class"; - - /** URI for FileSystemRMStateStore */ - public static final String FS_RM_STATE_STORE_URI = - RM_PREFIX + "fs.rm-state-store.uri"; - - /** The maximum number of completed applications RM keeps. */ - public static final String RM_MAX_COMPLETED_APPLICATIONS = - RM_PREFIX + "max-completed-applications"; - public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 10000; - - /** Default application name */ - public static final String DEFAULT_APPLICATION_NAME = "N/A"; - - /** Default queue name */ - public static final String DEFAULT_QUEUE_NAME = "default"; - - /** - * Buckets (in minutes) for the number of apps running in each queue. - */ - public static final String RM_METRICS_RUNTIME_BUCKETS = - RM_PREFIX + "metrics.runtime.buckets"; - - /** - * Default sizes of the runtime metric buckets in minutes. - */ - public static final String DEFAULT_RM_METRICS_RUNTIME_BUCKETS = - "60,300,1440"; - - public static final String RM_APP_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = RM_PREFIX - + "application-tokens.master-key-rolling-interval-secs"; - - public static final long DEFAULT_RM_APP_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = - 24 * 60 * 60; - - public static final String RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = - RM_PREFIX + "container-tokens.master-key-rolling-interval-secs"; - - public static final long DEFAULT_RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = - 24 * 60 * 60; - - //////////////////////////////// - // Node Manager Configs - //////////////////////////////// - - /** Prefix for all node manager configs.*/ - public static final String NM_PREFIX = "yarn.nodemanager."; - - /** Environment variables that will be sent to containers.*/ - public static final String NM_ADMIN_USER_ENV = NM_PREFIX + "admin-env"; - public static final String DEFAULT_NM_ADMIN_USER_ENV = "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX"; - - /** Environment variables that containers may override rather than use NodeManager's default.*/ - public static final String NM_ENV_WHITELIST = NM_PREFIX + "env-whitelist"; - public static final String DEFAULT_NM_ENV_WHITELIST = StringUtils.join(",", - Arrays.asList(ApplicationConstants.Environment.JAVA_HOME.key(), - ApplicationConstants.Environment.HADOOP_COMMON_HOME.key(), - ApplicationConstants.Environment.HADOOP_HDFS_HOME.key(), - ApplicationConstants.Environment.HADOOP_CONF_DIR.key(), - ApplicationConstants.Environment.HADOOP_YARN_HOME.key())); - - /** address of node manager IPC.*/ - public static final String NM_ADDRESS = NM_PREFIX + "address"; - public static final int DEFAULT_NM_PORT = 0; - public static final String DEFAULT_NM_ADDRESS = "0.0.0.0:" - + DEFAULT_NM_PORT; - - /** who will execute(launch) the containers.*/ - public static final String NM_CONTAINER_EXECUTOR = - NM_PREFIX + "container-executor.class"; - - /** - * Adjustment to make to the container os scheduling priority. - * The valid values for this could vary depending on the platform. - * On Linux, higher values mean run the containers at a less - * favorable priority than the NM. - * The value specified is an int. - */ - public static final String NM_CONTAINER_EXECUTOR_SCHED_PRIORITY = - NM_PREFIX + "container-executor.os.sched.priority.adjustment"; - public static final int DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY = 0; - - /** Number of threads container manager uses.*/ - public static final String NM_CONTAINER_MGR_THREAD_COUNT = - NM_PREFIX + "container-manager.thread-count"; - public static final int DEFAULT_NM_CONTAINER_MGR_THREAD_COUNT = 20; - - /** Number of threads used in cleanup.*/ - public static final String NM_DELETE_THREAD_COUNT = - NM_PREFIX + "delete.thread-count"; - public static final int DEFAULT_NM_DELETE_THREAD_COUNT = 4; - - /** Keytab for NM.*/ - public static final String NM_KEYTAB = NM_PREFIX + "keytab"; - - /**List of directories to store localized files in.*/ - public static final String NM_LOCAL_DIRS = NM_PREFIX + "local-dirs"; - public static final String DEFAULT_NM_LOCAL_DIRS = "/tmp/nm-local-dir"; - - /** - * Number of files in each localized directories - * Avoid tuning this too low. - */ - public static final String NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY = - NM_PREFIX + "local-cache.max-files-per-directory"; - public static final int DEFAULT_NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY = 8192; - - /** Address where the localizer IPC is.*/ - public static final String NM_LOCALIZER_ADDRESS = - NM_PREFIX + "localizer.address"; - public static final int DEFAULT_NM_LOCALIZER_PORT = 8040; - public static final String DEFAULT_NM_LOCALIZER_ADDRESS = "0.0.0.0:" + - DEFAULT_NM_LOCALIZER_PORT; - - /** Interval in between cache cleanups.*/ - public static final String NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS = - NM_PREFIX + "localizer.cache.cleanup.interval-ms"; - public static final long DEFAULT_NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS = - 10 * 60 * 1000; - - /** Target size of localizer cache in MB, per local directory.*/ - public static final String NM_LOCALIZER_CACHE_TARGET_SIZE_MB = - NM_PREFIX + "localizer.cache.target-size-mb"; - public static final long DEFAULT_NM_LOCALIZER_CACHE_TARGET_SIZE_MB = 10 * 1024; - - /** Number of threads to handle localization requests.*/ - public static final String NM_LOCALIZER_CLIENT_THREAD_COUNT = - NM_PREFIX + "localizer.client.thread-count"; - public static final int DEFAULT_NM_LOCALIZER_CLIENT_THREAD_COUNT = 5; - - /** Number of threads to use for localization fetching.*/ - public static final String NM_LOCALIZER_FETCH_THREAD_COUNT = - NM_PREFIX + "localizer.fetch.thread-count"; - public static final int DEFAULT_NM_LOCALIZER_FETCH_THREAD_COUNT = 4; - - /** Where to store container logs.*/ - public static final String NM_LOG_DIRS = NM_PREFIX + "log-dirs"; - public static final String DEFAULT_NM_LOG_DIRS = "/tmp/logs"; - - /** Interval at which the delayed token removal thread runs */ - public static final String RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS = - RM_PREFIX + "delayed.delegation-token.removal-interval-ms"; - public static final long DEFAULT_RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS = - 30000l; - - /** Whether to enable log aggregation */ - public static final String LOG_AGGREGATION_ENABLED = YARN_PREFIX - + "log-aggregation-enable"; - public static final boolean DEFAULT_LOG_AGGREGATION_ENABLED = false; - - /** - * How long to wait before deleting aggregated logs, -1 disables. - * Be careful set this too small and you will spam the name node. - */ - public static final String LOG_AGGREGATION_RETAIN_SECONDS = YARN_PREFIX - + "log-aggregation.retain-seconds"; - public static final long DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS = -1; - - /** - * How long to wait between aggregated log retention checks. If set to - * a value <= 0 then the value is computed as one-tenth of the log retention - * setting. Be careful set this too small and you will spam the name node. - */ - public static final String LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS = - YARN_PREFIX + "log-aggregation.retain-check-interval-seconds"; - public static final long DEFAULT_LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS = -1; - - /** - * Number of seconds to retain logs on the NodeManager. Only applicable if Log - * aggregation is disabled - */ - public static final String NM_LOG_RETAIN_SECONDS = NM_PREFIX - + "log.retain-seconds"; - - /** - * Number of threads used in log cleanup. Only applicable if Log aggregation - * is disabled - */ - public static final String NM_LOG_DELETION_THREADS_COUNT = - NM_PREFIX + "log.deletion-threads-count"; - public static final int DEFAULT_NM_LOG_DELETE_THREAD_COUNT = 4; - - /** Where to aggregate logs to.*/ - public static final String NM_REMOTE_APP_LOG_DIR = - NM_PREFIX + "remote-app-log-dir"; - public static final String DEFAULT_NM_REMOTE_APP_LOG_DIR = "/tmp/logs"; - - /** - * The remote log dir will be created at - * NM_REMOTE_APP_LOG_DIR/${user}/NM_REMOTE_APP_LOG_DIR_SUFFIX/${appId} - */ - public static final String NM_REMOTE_APP_LOG_DIR_SUFFIX = - NM_PREFIX + "remote-app-log-dir-suffix"; - public static final String DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX="logs"; - - public static final String YARN_LOG_SERVER_URL = - YARN_PREFIX + "log.server.url"; - - public static final String YARN_TRACKING_URL_GENERATOR = - YARN_PREFIX + "tracking.url.generator"; - - /** Amount of memory in GB that can be allocated for containers.*/ - public static final String NM_PMEM_MB = NM_PREFIX + "resource.memory-mb"; - public static final int DEFAULT_NM_PMEM_MB = 8 * 1024; - - /** Specifies whether physical memory check is enabled. */ - public static final String NM_PMEM_CHECK_ENABLED = NM_PREFIX - + "pmem-check-enabled"; - public static final boolean DEFAULT_NM_PMEM_CHECK_ENABLED = true; - - /** Specifies whether physical memory check is enabled. */ - public static final String NM_VMEM_CHECK_ENABLED = NM_PREFIX - + "vmem-check-enabled"; - public static final boolean DEFAULT_NM_VMEM_CHECK_ENABLED = true; - - /** Conversion ratio for physical memory to virtual memory. */ - public static final String NM_VMEM_PMEM_RATIO = - NM_PREFIX + "vmem-pmem-ratio"; - public static final float DEFAULT_NM_VMEM_PMEM_RATIO = 2.1f; - - /** Number of Physical CPU Cores which can be allocated for containers.*/ - public static final String NM_VCORES = NM_PREFIX + "resource.cpu-cores"; - public static final int DEFAULT_NM_VCORES = 8; - - /** Conversion ratio for physical cores to virtual cores. */ - public static final String NM_VCORES_PCORES_RATIO = - NM_PREFIX + "vcores-pcores-ratio"; - public static final float DEFAULT_NM_VCORES_PCORES_RATIO = 2.0f; - - /** NM Webapp address.**/ - public static final String NM_WEBAPP_ADDRESS = NM_PREFIX + "webapp.address"; - public static final int DEFAULT_NM_WEBAPP_PORT = 8042; - public static final String DEFAULT_NM_WEBAPP_ADDRESS = "0.0.0.0:" + - DEFAULT_NM_WEBAPP_PORT; - - /** How often to monitor containers.*/ - public final static String NM_CONTAINER_MON_INTERVAL_MS = - NM_PREFIX + "container-monitor.interval-ms"; - public final static int DEFAULT_NM_CONTAINER_MON_INTERVAL_MS = 3000; - - /** Class that calculates containers current resource utilization.*/ - public static final String NM_CONTAINER_MON_RESOURCE_CALCULATOR = - NM_PREFIX + "container-monitor.resource-calculator.class"; - /** Class that calculates process tree resource utilization.*/ - public static final String NM_CONTAINER_MON_PROCESS_TREE = - NM_PREFIX + "container-monitor.process-tree.class"; - - /** - * Enable/Disable disks' health checker. Default is true. - * An expert level configuration property. - */ - public static final String NM_DISK_HEALTH_CHECK_ENABLE = - NM_PREFIX + "disk-health-checker.enable"; - /** Frequency of running disks' health checker.*/ - public static final String NM_DISK_HEALTH_CHECK_INTERVAL_MS = - NM_PREFIX + "disk-health-checker.interval-ms"; - /** By default, disks' health is checked every 2 minutes. */ - public static final long DEFAULT_NM_DISK_HEALTH_CHECK_INTERVAL_MS = - 2 * 60 * 1000; - - /** - * The minimum fraction of number of disks to be healthy for the nodemanager - * to launch new containers. This applies to nm-local-dirs and nm-log-dirs. - */ - public static final String NM_MIN_HEALTHY_DISKS_FRACTION = - NM_PREFIX + "disk-health-checker.min-healthy-disks"; - /** - * By default, at least 5% of disks are to be healthy to say that the node - * is healthy in terms of disks. - */ - public static final float DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION - = 0.25F; - - /** Frequency of running node health script.*/ - public static final String NM_HEALTH_CHECK_INTERVAL_MS = - NM_PREFIX + "health-checker.interval-ms"; - public static final long DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS = 10 * 60 * 1000; - - /** Health check script time out period.*/ - public static final String NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS = - NM_PREFIX + "health-checker.script.timeout-ms"; - public static final long DEFAULT_NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS = - 2 * DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS; - - /** The health check script to run.*/ - public static final String NM_HEALTH_CHECK_SCRIPT_PATH = - NM_PREFIX + "health-checker.script.path"; - - /** The arguments to pass to the health check script.*/ - public static final String NM_HEALTH_CHECK_SCRIPT_OPTS = - NM_PREFIX + "health-checker.script.opts"; - - /** The path to the Linux container executor.*/ - public static final String NM_LINUX_CONTAINER_EXECUTOR_PATH = - NM_PREFIX + "linux-container-executor.path"; - - /** - * The UNIX group that the linux-container-executor should run as. - * This is intended to be set as part of container-executor.cfg. - */ - public static final String NM_LINUX_CONTAINER_GROUP = - NM_PREFIX + "linux-container-executor.group"; - - /** The type of resource enforcement to use with the - * linux container executor. - */ - public static final String NM_LINUX_CONTAINER_RESOURCES_HANDLER = - NM_PREFIX + "linux-container-executor.resources-handler.class"; - - /** The path the linux container executor should use for cgroups */ - public static final String NM_LINUX_CONTAINER_CGROUPS_HIERARCHY = - NM_PREFIX + "linux-container-executor.cgroups.hierarchy"; - - /** Whether the linux container executor should mount cgroups if not found */ - public static final String NM_LINUX_CONTAINER_CGROUPS_MOUNT = - NM_PREFIX + "linux-container-executor.cgroups.mount"; - - /** Where the linux container executor should mount cgroups if not found */ - public static final String NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH = - NM_PREFIX + "linux-container-executor.cgroups.mount-path"; - - /** T-file compression types used to compress aggregated logs.*/ - public static final String NM_LOG_AGG_COMPRESSION_TYPE = - NM_PREFIX + "log-aggregation.compression-type"; - public static final String DEFAULT_NM_LOG_AGG_COMPRESSION_TYPE = "none"; - - /** The kerberos principal for the node manager.*/ - public static final String NM_PRINCIPAL = - NM_PREFIX + "principal"; - - public static final String NM_AUX_SERVICES = - NM_PREFIX + "aux-services"; - - public static final String NM_AUX_SERVICE_FMT = - NM_PREFIX + "aux-services.%s.class"; - - public static final String NM_USER_HOME_DIR = - NM_PREFIX + "user-home-dir"; - - public static final String DEFAULT_NM_USER_HOME_DIR= "/home/"; - - //////////////////////////////// - // Web Proxy Configs - //////////////////////////////// - public static final String PROXY_PREFIX = "yarn.web-proxy."; - - /** The kerberos principal for the proxy.*/ - public static final String PROXY_PRINCIPAL = - PROXY_PREFIX + "principal"; - - /** Keytab for Proxy.*/ - public static final String PROXY_KEYTAB = PROXY_PREFIX + "keytab"; - - /** The address for the web proxy.*/ - public static final String PROXY_ADDRESS = - PROXY_PREFIX + "address"; - - /** - * YARN Service Level Authorization - */ - public static final String - YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER = - "security.resourcetracker.protocol.acl"; - public static final String - YARN_SECURITY_SERVICE_AUTHORIZATION_CLIENT_RESOURCEMANAGER = - "security.client.resourcemanager.protocol.acl"; - public static final String - YARN_SECURITY_SERVICE_AUTHORIZATION_ADMIN = - "security.admin.protocol.acl"; - public static final String - YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_RESOURCEMANAGER = - "security.applicationmaster.resourcemanager.protocol.acl"; - - public static final String - YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGER = - "security.containermanager.protocol.acl"; - public static final String - YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER = - "security.resourcelocalizer.protocol.acl"; - - /** No. of milliseconds to wait between sending a SIGTERM and SIGKILL - * to a running container */ - public static final String NM_SLEEP_DELAY_BEFORE_SIGKILL_MS = - NM_PREFIX + "sleep-delay-before-sigkill.ms"; - public static final long DEFAULT_NM_SLEEP_DELAY_BEFORE_SIGKILL_MS = - 250; - - /** Max time to wait for a process to come up when trying to cleanup - * container resources */ - public static final String NM_PROCESS_KILL_WAIT_MS = - NM_PREFIX + "process-kill-wait.ms"; - public static final long DEFAULT_NM_PROCESS_KILL_WAIT_MS = - 2000; - - /** Max time to wait to establish a connection to RM when NM starts - */ - public static final String RESOURCEMANAGER_CONNECT_WAIT_SECS = - NM_PREFIX + "resourcemanager.connect.wait.secs"; - public static final int DEFAULT_RESOURCEMANAGER_CONNECT_WAIT_SECS = - 15*60; - - /** Time interval between each NM attempt to connect to RM - */ - public static final String RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS = - NM_PREFIX + "resourcemanager.connect.retry_interval.secs"; - public static final long DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS - = 30; - - /** - * CLASSPATH for YARN applications. A comma-separated list of CLASSPATH - * entries - */ - public static final String YARN_APPLICATION_CLASSPATH = YARN_PREFIX - + "application.classpath"; - - /** - * Default CLASSPATH for YARN applications. A comma-separated list of - * CLASSPATH entries - */ - public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = { - ApplicationConstants.Environment.HADOOP_CONF_DIR.$(), - ApplicationConstants.Environment.HADOOP_COMMON_HOME.$() - + "/share/hadoop/common/*", - ApplicationConstants.Environment.HADOOP_COMMON_HOME.$() - + "/share/hadoop/common/lib/*", - ApplicationConstants.Environment.HADOOP_HDFS_HOME.$() - + "/share/hadoop/hdfs/*", - ApplicationConstants.Environment.HADOOP_HDFS_HOME.$() - + "/share/hadoop/hdfs/lib/*", - ApplicationConstants.Environment.HADOOP_YARN_HOME.$() - + "/share/hadoop/yarn/*", - ApplicationConstants.Environment.HADOOP_YARN_HOME.$() - + "/share/hadoop/yarn/lib/*" }; - - /** Container temp directory */ - public static final String DEFAULT_CONTAINER_TEMP_DIR = "./tmp"; - - public static final String IS_MINI_YARN_CLUSTER = YARN_PREFIX - + "is.minicluster"; - - /** Whether to use fixed ports with the minicluster. */ - public static final String YARN_MINICLUSTER_FIXED_PORTS = YARN_PREFIX - + "minicluster.fixed.ports"; - - /** - * Default is false to be able to run tests concurrently without port - * conflicts. - */ - public static boolean DEFAULT_YARN_MINICLUSTER_FIXED_PORTS = false; - - //////////////////////////////// - // Other Configs - //////////////////////////////// - - /** - * The interval of the yarn client's querying application state after - * application submission. The unit is millisecond. - */ - public static final String YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS = - YARN_PREFIX + "client.app-submission.poll-interval"; - public static final long DEFAULT_YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS = - 1000; - - public YarnConfiguration() { - super(); - } - - public YarnConfiguration(Configuration conf) { - super(conf); - if (! (conf instanceof YarnConfiguration)) { - this.reloadConfiguration(); - } - } - - public static String getProxyHostAndPort(Configuration conf) { - String addr = conf.get(PROXY_ADDRESS); - if(addr == null || addr.isEmpty()) { - addr = getRMWebAppHostAndPort(conf); - } - return addr; - } - - public static String getRMWebAppHostAndPort(Configuration conf) { - InetSocketAddress address = conf.getSocketAddr( - YarnConfiguration.RM_WEBAPP_ADDRESS, - YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, - YarnConfiguration.DEFAULT_RM_WEBAPP_PORT); - address = NetUtils.getConnectAddress(address); - StringBuffer sb = new StringBuffer(); - InetAddress resolved = address.getAddress(); - if (resolved == null || resolved.isAnyLocalAddress() || - resolved.isLoopbackAddress()) { - String lh = address.getHostName(); - try { - lh = InetAddress.getLocalHost().getCanonicalHostName(); - } catch (UnknownHostException e) { - //Ignore and fallback. - } - sb.append(lh); - } else { - sb.append(address.getHostName()); - } - sb.append(":").append(address.getPort()); - return sb.toString(); - } - - public static String getRMWebAppURL(Configuration conf) { - return JOINER.join("http://", getRMWebAppHostAndPort(conf)); - } - -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RecordFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RecordFactory.java deleted file mode 100644 index e62d5a6..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/RecordFactory.java +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.factories; - -import org.apache.hadoop.yarn.YarnException; - - -public interface RecordFactory { - public T newRecordInstance(Class clazz) throws YarnException; -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider.java deleted file mode 100644 index 66ed159..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factory/providers/RecordFactoryProvider.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.yarn.factory.providers; - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.yarn.YarnException; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.factories.RecordFactory; -import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl; - -public class RecordFactoryProvider { - private static Configuration defaultConf; - - static { - defaultConf = new Configuration(); - } - - private RecordFactoryProvider() { - } - - public static RecordFactory getRecordFactory(Configuration conf) { - if (conf == null) { - //Assuming the default configuration has the correct factories set. - //Users can specify a particular factory by providing a configuration. - conf = defaultConf; - } - String recordFactoryClassName = conf.get(YarnConfiguration.IPC_RECORD_FACTORY); - if (recordFactoryClassName == null) { - String serializer = conf.get(YarnConfiguration.IPC_SERIALIZER_TYPE, YarnConfiguration.DEFAULT_IPC_SERIALIZER_TYPE); - if (serializer.equals(YarnConfiguration.DEFAULT_IPC_SERIALIZER_TYPE)) { - return RecordFactoryPBImpl.get(); - } else { - throw new YarnException("Unknown serializer: [" + conf.get(YarnConfiguration.IPC_SERIALIZER_TYPE) + "]. Use keys: [" + YarnConfiguration.IPC_RECORD_FACTORY + "] to specify Record factory"); - } - } else { - return (RecordFactory) getFactoryClassInstance(recordFactoryClassName); - } - } - - private static Object getFactoryClassInstance(String factoryClassName) { - try { - Class clazz = Class.forName(factoryClassName); - Method method = clazz.getMethod("get", null); - method.setAccessible(true); - return method.invoke(null, null); - } catch (ClassNotFoundException e) { - throw new YarnException(e); - } catch (NoSuchMethodException e) { - throw new YarnException(e); - } catch (InvocationTargetException e) { - throw new YarnException(e); - } catch (IllegalAccessException e) { - throw new YarnException(e); - } - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java index b3baff7..f5c9fff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java @@ -18,15 +18,16 @@ package org.apache.hadoop.yarn.util; +import static org.apache.hadoop.yarn.util.StringHelper._split; +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.util.StringHelper.sjoin; + import java.util.Iterator; import java.util.Map; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; - -import static org.apache.hadoop.yarn.util.StringHelper.*; /** * Yarn application related utilities @@ -45,10 +46,9 @@ public static ApplicationId toAppID(String prefix, String s, Iterator it throwParseException(sjoin(prefix, ID), s); } shouldHaveNext(prefix, s, it); - ApplicationId appId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - shouldHaveNext(prefix, s, it); - appId.setId(Integer.parseInt(it.next())); + ApplicationId appId = ApplicationId.createApplicationId(Long.parseLong(it.next()), Integer.parseInt(it.next())); + +// shouldHaveNext(prefix, s, it); return appId; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java index 7dc25de..b990cb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java @@ -126,30 +126,20 @@ public static LocalResource newLocalResource(URI uri, visibility, size, timestamp); } + // TODO Consider getting rid of these in favour of using AplicationId.createApplicationId everywhere. public static ApplicationId newApplicationId(RecordFactory recordFactory, long clustertimestamp, CharSequence id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(Integer.valueOf(id.toString())); - applicationId.setClusterTimestamp(clustertimestamp); - return applicationId; + return ApplicationId.createApplicationId(clustertimestamp, + Integer.valueOf(id.toString())); } public static ApplicationId newApplicationId(RecordFactory recordFactory, long clusterTimeStamp, int id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(id); - applicationId.setClusterTimestamp(clusterTimeStamp); - return applicationId; + return ApplicationId.createApplicationId(clusterTimeStamp, id); } public static ApplicationId newApplicationId(long clusterTimeStamp, int id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(id); - applicationId.setClusterTimestamp(clusterTimeStamp); - return applicationId; + return ApplicationId.createApplicationId(clusterTimeStamp, id); } public static ApplicationAttemptId newApplicationAttemptId( @@ -162,11 +152,8 @@ public static ApplicationAttemptId newApplicationAttemptId( } public static ApplicationId convert(long clustertimestamp, CharSequence id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(Integer.valueOf(id.toString())); - applicationId.setClusterTimestamp(clustertimestamp); - return applicationId; + return ApplicationId.createApplicationId(clustertimestamp, + Integer.valueOf(id.toString())); } public static ContainerId newContainerId(ApplicationAttemptId appAttemptId, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java index 21fe2d9..3f9dded 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java @@ -114,18 +114,14 @@ public static ApplicationId toApplicationId(RecordFactory recordFactory, private static ApplicationId toApplicationId(RecordFactory recordFactory, Iterator it) { - ApplicationId appId = - recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - appId.setId(Integer.parseInt(it.next())); - return appId; + return ApplicationId.createApplicationId(Long.parseLong(it.next()), + Integer.parseInt(it.next())); } private static ApplicationAttemptId toApplicationAttemptId( Iterator it) throws NumberFormatException { - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - appId.setId(Integer.parseInt(it.next())); + ApplicationId appId = ApplicationId.createApplicationId(Long.parseLong(it.next()), + Integer.parseInt(it.next())); ApplicationAttemptId appAttemptId = Records .newRecord(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); @@ -135,10 +131,8 @@ private static ApplicationAttemptId toApplicationAttemptId( private static ApplicationId toApplicationId( Iterator it) throws NumberFormatException { - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - appId.setId(Integer.parseInt(it.next())); - return appId; + return ApplicationId.createApplicationId(Long.parseLong(it.next()), + Integer.parseInt(it.next())); } public static String toString(ContainerId cId) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Records.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Records.java deleted file mode 100644 index 60711fd..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Records.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.util; - -import org.apache.hadoop.yarn.factories.RecordFactory; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; - -/** - * Convenient API record utils - */ -public class Records { - // The default record factory - private static final RecordFactory factory = - RecordFactoryProvider.getRecordFactory(null); - - public static T newRecord(Class cls) { - return factory.newRecordInstance(cls); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java index cc67ff7..f07ac67 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java @@ -62,10 +62,7 @@ public static String newQueue() { } public static ApplicationId newAppID(int i) { - ApplicationId id = Records.newRecord(ApplicationId.class); - id.setClusterTimestamp(TS); - id.setId(i); - return id; + return ApplicationId.createApplicationId(TS, i); } public static ApplicationAttemptId newAppAttemptID(ApplicationId appId, int i) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java index 295a38c..225760b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java @@ -93,12 +93,9 @@ private void testRPCTimeout(String rpcClass) throws Exception { containerLaunchContext.setUser("dummy-user"); ContainerId containerId = recordFactory .newRecordInstance(ContainerId.class); - ApplicationId applicationId = recordFactory - .newRecordInstance(ApplicationId.class); + ApplicationId applicationId = ApplicationId.createApplicationId(0, 0); ApplicationAttemptId applicationAttemptId = recordFactory .newRecordInstance(ApplicationAttemptId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); applicationAttemptId.setApplicationId(applicationId); applicationAttemptId.setAttemptId(0); containerId.setApplicationAttemptId(applicationAttemptId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java index 7d941e9..6a0ec81 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java @@ -116,12 +116,10 @@ private void test(String rpcClass) throws Exception { containerLaunchContext.setUser("dummy-user"); ContainerId containerId = recordFactory.newRecordInstance(ContainerId.class); - ApplicationId applicationId = + ApplicationId applicationId = ApplicationId.createApplicationId(0, 0); recordFactory.newRecordInstance(ApplicationId.class); ApplicationAttemptId applicationAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); applicationAttemptId.setApplicationId(applicationId); applicationAttemptId.setAttemptId(0); containerId.setApplicationAttemptId(applicationAttemptId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicationAttemptId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicationAttemptId.java index 764ab42..32fd974 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicationAttemptId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicationAttemptId.java @@ -61,9 +61,7 @@ private ApplicationAttemptId createAppAttemptId(long clusterTimeStamp, int id, int attemptId) { ApplicationAttemptId appAttemptId = Records.newRecord(ApplicationAttemptId.class); - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(clusterTimeStamp); - appId.setId(id); + ApplicationId appId = ApplicationId.createApplicationId(clusterTimeStamp, id); appAttemptId.setApplicationId(appId); appAttemptId.setAttemptId(attemptId); return appAttemptId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicationId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicationId.java index a7d701a..d5863b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicationId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicationId.java @@ -21,7 +21,6 @@ import junit.framework.Assert; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.util.Records; import org.junit.Test; public class TestApplicationId { @@ -52,9 +51,6 @@ public void testApplicationId() { } private ApplicationId createAppId(long clusterTimeStamp, int id) { - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(clusterTimeStamp); - appId.setId(id); - return appId; + return ApplicationId.createApplicationId(clusterTimeStamp, id); } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java index ac3a2a0..efd9b62 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java @@ -71,10 +71,7 @@ private ContainerId createContainerId(long clusterTimestamp, int appIdInt, } private ApplicationId createAppId(long clusterTimeStamp, int id) { - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(clusterTimeStamp); - appId.setId(id); - return appId; + return ApplicationId.createApplicationId(clusterTimeStamp, id); } private ApplicationAttemptId createAppAttemptId(ApplicationId appId, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java index 292d00f..4b56e73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java @@ -111,10 +111,7 @@ protected void startStatusUpdater() { ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); ContainerId cID = recordFactory.newRecordInstance(ContainerId.class); - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); + ApplicationId applicationId = ApplicationId.createApplicationId(0, 0); ApplicationAttemptId applicationAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); applicationAttemptId.setApplicationId(applicationId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java index 1436193..8db1d28 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java @@ -222,9 +222,7 @@ private void createFiles(String dir, String subDir, int numOfFiles) { } private ContainerId createContainerId() { - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.createApplicationId(0, 0); ApplicationAttemptId appAttemptId = Records.newRecord(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java index 1792988..0a3d155 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java @@ -24,10 +24,8 @@ import java.io.BufferedReader; import java.io.File; import java.io.FileReader; -import java.io.FileWriter; import java.io.IOException; import java.io.PrintWriter; -import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -62,7 +60,6 @@ import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.util.BuilderUtils; @@ -219,9 +216,7 @@ private void startContainers(NodeManager nm) throws IOException { } private ContainerId createContainerId() { - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.createApplicationId(0, 0); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 29d6a4c..6291d68 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -138,8 +138,8 @@ public RegisterNodeManagerResponse registerNodeManager( return response; } - ApplicationId applicationID = recordFactory - .newRecordInstance(ApplicationId.class); +// ApplicationId applicationID = recordFactory +// .newRecordInstance(ApplicationId.class); ApplicationAttemptId appAttemptID = recordFactory .newRecordInstance(ApplicationAttemptId.class); ContainerId firstContainerID = recordFactory @@ -178,12 +178,15 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) getAppToContainerStatusMap(nodeStatus.getContainersStatuses()); org.apache.hadoop.yarn.api.records.Container mockContainer = mock(org.apache.hadoop.yarn.api.records.Container.class); + + ApplicationId appId1 = ApplicationId.createApplicationId(0, 1); + ApplicationId appId2 = ApplicationId.createApplicationId(0, 2); + if (heartBeatID == 1) { Assert.assertEquals(0, nodeStatus.getContainersStatuses().size()); // Give a container to the NM. - applicationID.setId(heartBeatID); - appAttemptID.setApplicationId(applicationID); + appAttemptID.setApplicationId(appId1); firstContainerID.setApplicationAttemptId(appAttemptID); firstContainerID.setId(heartBeatID); ContainerLaunchContext launchContext = recordFactory @@ -200,7 +203,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) Assert.assertEquals("Number of applications should only be one!", 1, nodeStatus.getContainersStatuses().size()); Assert.assertEquals("Number of container for the app should be one!", - 1, appToContainers.get(applicationID).size()); + 1, appToContainers.get(appId1).size()); // Checks on the NM end ConcurrentMap activeContainers = @@ -208,8 +211,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) Assert.assertEquals(1, activeContainers.size()); // Give another container to the NM. - applicationID.setId(heartBeatID); - appAttemptID.setApplicationId(applicationID); + appAttemptID.setApplicationId(appId2); secondContainerID.setApplicationAttemptId(appAttemptID); secondContainerID.setId(heartBeatID); ContainerLaunchContext launchContext = recordFactory @@ -226,7 +228,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) Assert.assertEquals("Number of applications should only be one!", 1, appToContainers.size()); Assert.assertEquals("Number of container for the app should be two!", - 2, appToContainers.get(applicationID).size()); + 2, appToContainers.get(appId2).size()); // Checks on the NM end ConcurrentMap activeContainers = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java index 46c9faa..a1600a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java @@ -18,8 +18,12 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; -import org.junit.Test; -import static org.junit.Assert.*; +import static org.apache.hadoop.yarn.service.Service.STATE.INITED; +import static org.apache.hadoop.yarn.service.Service.STATE.STARTED; +import static org.apache.hadoop.yarn.service.Service.STATE.STOPPED; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -30,17 +34,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType; import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.service.Service; - - -import static org.apache.hadoop.yarn.service.Service.STATE.*; +import org.junit.Test; public class TestAuxServices { private static final Log LOG = LogFactory.getLog(TestAuxServices.class); @@ -123,18 +120,18 @@ public void testAuxEventDispatch() { aux.init(conf); aux.start(); - ApplicationId appId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - appId.setId(65); + ApplicationId appId1 = ApplicationId.createApplicationId(0, 65); ByteBuffer buf = ByteBuffer.allocate(6); buf.putChar('A'); buf.putInt(65); buf.flip(); AuxServicesEvent event = new AuxServicesEvent( - AuxServicesEventType.APPLICATION_INIT, "user0", appId, "Asrv", buf); + AuxServicesEventType.APPLICATION_INIT, "user0", appId1, "Asrv", buf); aux.handle(event); - appId.setId(66); + + ApplicationId appId2 = ApplicationId.createApplicationId(0, 66); event = new AuxServicesEvent( - AuxServicesEventType.APPLICATION_STOP, "user0", appId, "Bsrv", null); + AuxServicesEventType.APPLICATION_STOP, "user0", appId2, "Bsrv", null); // verify all services got the stop event aux.handle(event); Collection servs = aux.getServices(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java index 981ab39..e8c00d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java @@ -18,12 +18,14 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; -import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -53,7 +55,6 @@ import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; -import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal; import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState; @@ -63,7 +64,6 @@ import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.junit.Test; -import static org.mockito.Mockito.*; public class TestContainerManager extends BaseContainerManagerTest { @@ -76,9 +76,7 @@ public TestContainerManager() throws UnsupportedFileSystemException { } private ContainerId createContainerId() { - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.createApplicationId(0, 0); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index 0e07efb..f938710 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -18,7 +18,9 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.BufferedReader; import java.io.File; @@ -26,14 +28,14 @@ import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; -import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import junit.framework.Assert; + import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.util.Shell; @@ -57,7 +59,6 @@ import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -65,9 +66,6 @@ import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import org.junit.Before; import org.junit.Test; -import static org.mockito.Mockito.*; - -import junit.framework.Assert; public class TestContainerLaunch extends BaseContainerManagerTest { @@ -164,9 +162,7 @@ public void testContainerEnvVariables() throws Exception { Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.createApplicationId(0, 0); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); @@ -334,9 +330,7 @@ public void testDelayedKill() throws Exception { Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(1); - appId.setId(1); + ApplicationId appId = ApplicationId.createApplicationId(1, 1); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java index ccbf9f7..f58dc6c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java @@ -18,9 +18,19 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation; -import static org.mockito.Mockito.*; import static junit.framework.Assert.assertEquals; import static junit.framework.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyMap; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; @@ -47,15 +57,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; -import org.apache.hadoop.io.DataInputBuffer; -import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; +import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -71,9 +79,9 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat; -import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader; +import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; @@ -682,10 +690,7 @@ public void testLogAggregationForRealContainerLaunch() throws IOException, recordFactory.newRecordInstance(ContainerLaunchContext.class); Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = - recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.createApplicationId(0, 0); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1); ContainerId cId = BuilderUtils.newContainerId(appAttemptId, 0); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java index ad4a818..5a196e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java @@ -200,10 +200,7 @@ public void testContainerKillOnMemoryOverflow() throws IOException, recordFactory.newRecordInstance(ContainerLaunchContext.class); Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = - recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.createApplicationId(0, 0); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index aa7af9c..3868614 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -414,11 +414,7 @@ private void mockRMContext(YarnScheduler yarnScheduler, RMContext rmContext) } private ApplicationId getApplicationId(int id) { - ApplicationId applicationId = recordFactory - .newRecordInstance(ApplicationId.class); - applicationId.setClusterTimestamp(123456); - applicationId.setId(id); - return applicationId; + return ApplicationId.createApplicationId(123456, id); } private RMAppImpl getRMApp(RMContext rmContext, YarnScheduler yarnScheduler, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 66f9059..3e5e498 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.when; import java.io.IOException; import java.util.Comparator; @@ -30,6 +31,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; @@ -50,13 +52,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import static org.mockito.Mockito.*; public class TestCapacityScheduler { @@ -467,15 +466,9 @@ public void testApplicationComparator() { CapacityScheduler cs = new CapacityScheduler(); Comparator appComparator= cs.getApplicationComparator(); - ApplicationId id1 = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - id1.setClusterTimestamp(1); - id1.setId(1); - ApplicationId id2 = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - id2.setClusterTimestamp(1); - id2.setId(2); - ApplicationId id3 = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - id3.setClusterTimestamp(2); - id3.setId(1); + ApplicationId id1 = ApplicationId.createApplicationId(1, 1); + ApplicationId id2 = ApplicationId.createApplicationId(1, 2); + ApplicationId id3 = ApplicationId.createApplicationId(2, 1); //same clusterId FiCaSchedulerApp app1 = Mockito.mock(FiCaSchedulerApp.class); when(app1.getApplicationId()).thenReturn(id1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java index 62a1b9b..0babf6b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java @@ -36,8 +36,7 @@ private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationAttemptId attId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - ApplicationId appIdImpl = recordFactory.newRecordInstance(ApplicationId.class); - appIdImpl.setId(appId); + ApplicationId appIdImpl = ApplicationId.createApplicationId(0, appId); attId.setAttemptId(attemptId); attId.setApplicationId(appIdImpl); return attId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index e6761b9..dd026fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -138,8 +138,7 @@ private Configuration createConfiguration() { private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationAttemptId attId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - ApplicationId appIdImpl = recordFactory.newRecordInstance(ApplicationId.class); - appIdImpl.setId(appId); + ApplicationId appIdImpl = ApplicationId.createApplicationId(0, appId); attId.setAttemptId(attemptId); attId.setApplicationId(appIdImpl); return attId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 85076ba..6813cf7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -94,9 +94,7 @@ public void tearDown() throws Exception { private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationAttemptId attId = recordFactory .newRecordInstance(ApplicationAttemptId.class); - ApplicationId appIdImpl = recordFactory - .newRecordInstance(ApplicationId.class); - appIdImpl.setId(appId); + ApplicationId appIdImpl = ApplicationId.createApplicationId(0, appId); attId.setAttemptId(attemptId); attId.setApplicationId(appIdImpl); return attId;