diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java index c187bc8..4c8afae 100644 --- hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java +++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java @@ -128,9 +128,7 @@ static ApplicationId applicationId; static { - applicationId = recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); + applicationId = ApplicationId.newInstance(0, 0); } public MRApp(int maps, int reduces, boolean autoComplete, String testName, diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java index 0e20d6f..3539728 100644 --- hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java +++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java @@ -789,9 +789,7 @@ public MyAppMaster(Clock clock) { private final Map allJobs; MyAppContext(int numberMaps, int numberReduces) { - myApplicationID = recordFactory.newRecordInstance(ApplicationId.class); - myApplicationID.setClusterTimestamp(clock.getTime()); - myApplicationID.setId(1); + myApplicationID = ApplicationId.newInstance(clock.getTime(), 1); myAppAttemptID = recordFactory .newRecordInstance(ApplicationAttemptId.class); diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java index 0bd2215..9cc1192 100644 --- hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java +++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java @@ -83,9 +83,8 @@ public void testDeletionofStaging() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(0); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), + 0); attemptId.setApplicationId(appId); JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); @@ -112,9 +111,8 @@ public void testNoDeletionofStagingOnReboot() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(0); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), + 0); attemptId.setApplicationId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); @@ -140,9 +138,8 @@ public void testDeletionofStagingOnReboot() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(1); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), + 0); attemptId.setApplicationId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, @@ -168,9 +165,8 @@ public void testDeletionofStagingOnKill() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(0); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), + 0); attemptId.setApplicationId(appId); JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); @@ -196,9 +192,8 @@ public void testDeletionofStagingOnKillLastTry() throws IOException { ApplicationAttemptId attemptId = recordFactory.newRecordInstance( ApplicationAttemptId.class); attemptId.setAttemptId(1); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), + 0); attemptId.setApplicationId(appId); JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java index 9fd0fb8..4f701b0 100644 --- hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java +++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java @@ -224,9 +224,7 @@ public void setup() { metrics = mock(MRAppMetrics.class); dataLocations = new String[1]; - appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(1); + appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); jobId = Records.newRecord(JobId.class); jobId.setId(1); diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java index ff38ff3..892eb87 100644 --- hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java +++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java @@ -76,9 +76,8 @@ public static JobId toYarn(org.apache.hadoop.mapreduce.JobID id) { JobId jobId = recordFactory.newRecordInstance(JobId.class); jobId.setId(id.getId()); //currently there is 1-1 mapping between appid and jobid - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setId(id.getId()); - appId.setClusterTimestamp(toClusterTimeStamp(id.getJtIdentifier())); + ApplicationId appId = ApplicationId.newInstance( + toClusterTimeStamp(id.getJtIdentifier()), id.getId()); jobId.setAppId(appId); return jobId; } diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java index e172be5..ecf5b8f 100644 --- hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java +++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestShufflePlugin.java @@ -21,7 +21,6 @@ import org.junit.Test; import static org.junit.Assert.*; import static org.mockito.Mockito.*; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.fs.LocalDirAllocator; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.mapred.Task.CombineOutputCollector; @@ -30,7 +29,6 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.mapreduce.task.reduce.Shuffle; -import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.Counters.Counter; import org.apache.hadoop.mapred.MapOutputFile; import org.apache.hadoop.mapred.JobConf; @@ -40,7 +38,6 @@ import org.apache.hadoop.mapred.TaskUmbilicalProtocol; import org.apache.hadoop.mapred.ShuffleConsumerPlugin; import org.apache.hadoop.mapred.RawKeyValueIterator; -import org.apache.hadoop.mapred.Reducer; /** * A JUnit for testing availability and accessibility of shuffle related API. @@ -181,10 +178,6 @@ public void testConsumerApi() { * AuxiliaryService(s) which are "Shuffle-Providers" (ShuffleHandler and 3rd party plugins) */ public void testProviderApi() { - - ApplicationId mockApplicationId = mock(ApplicationId.class); - mockApplicationId.setClusterTimestamp(new Long(10)); - mockApplicationId.setId(mock(JobID.class).getId()); LocalDirAllocator mockLocalDirAllocator = mock(LocalDirAllocator.class); JobConf mockJobConf = mock(JobConf.class); try { diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java index 601268a..4063f38 100644 --- hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java +++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java @@ -140,9 +140,7 @@ public ApplicationSubmissionContext answer(InvocationOnMock invocation) ).when(yarnRunner).createApplicationSubmissionContext(any(Configuration.class), any(String.class), any(Credentials.class)); - appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(System.currentTimeMillis()); - appId.setId(1); + appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); jobId = TypeConverter.fromYarn(appId); if (testWorkDir.exists()) { FileContext.getLocalFSFileContext().delete(new Path(testWorkDir.toString()), true); diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java index 5715bd1..af358e4 100644 --- hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java +++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java @@ -79,7 +79,6 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.util.ConverterUtils; -import org.apache.hadoop.yarn.util.Records; import org.jboss.netty.bootstrap.ServerBootstrap; import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.channel.Channel; @@ -549,9 +548,8 @@ protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch, // $x/$user/appcache/$appId/output/$mapId // TODO: Once Shuffle is out of NM, this can use MR APIs to convert between App and Job JobID jobID = JobID.forName(jobId); - ApplicationId appID = Records.newRecord(ApplicationId.class); - appID.setClusterTimestamp(Long.parseLong(jobID.getJtIdentifier())); - appID.setId(jobID.getId()); + ApplicationId appID = ApplicationId.newInstance( + Long.parseLong(jobID.getJtIdentifier()), jobID.getId()); final String base = ContainerLocalizer.USERCACHE + "/" + user + "/" + ContainerLocalizer.APPCACHE + "/" diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java index 097a533..243d060 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; -import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; /** *

ApplicationId represents the globally unique @@ -40,6 +40,14 @@ public static final String appIdStrPrefix = "application_"; + public static ApplicationId newInstance(long clusterTimestamp, int id) { + ApplicationId appId = Records.newRecord(ApplicationId.class); + appId.setClusterTimestamp(clusterTimestamp); + appId.setId(id); + appId.build(); + return appId; + } + /** * Get the short integer identifier of the ApplicationId * which is unique for all applications started by a particular instance @@ -51,8 +59,7 @@ public abstract int getId(); @Private - @Unstable - public abstract void setId(int id); + protected abstract void setId(int id); /** * Get the start time of the ResourceManager which is @@ -62,10 +69,9 @@ public abstract long getClusterTimestamp(); @Private - @Unstable - public abstract void setClusterTimestamp(long clusterTimestamp); + protected abstract void setClusterTimestamp(long clusterTimestamp); - + protected abstract void build(); static final ThreadLocal appIdFormat = new ThreadLocal() { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java index ad5c778..031c194 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java @@ -21,58 +21,49 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProtoOrBuilder; + +import com.google.common.base.Preconditions; - public class ApplicationIdPBImpl extends ApplicationId { - ApplicationIdProto proto = ApplicationIdProto.getDefaultInstance(); + ApplicationIdProto proto = null; ApplicationIdProto.Builder builder = null; - boolean viaProto = false; - + public ApplicationIdPBImpl() { builder = ApplicationIdProto.newBuilder(); } public ApplicationIdPBImpl(ApplicationIdProto proto) { this.proto = proto; - viaProto = true; } - public synchronized ApplicationIdProto getProto() { - proto = viaProto ? proto : builder.build(); - viaProto = true; + public ApplicationIdProto getProto() { return proto; } - private synchronized void maybeInitBuilder() { - if (viaProto || builder == null) { - builder = ApplicationIdProto.newBuilder(proto); - } - viaProto = false; - } - - @Override - public synchronized int getId() { - ApplicationIdProtoOrBuilder p = viaProto ? proto : builder; - return (p.getId()); + public int getId() { + Preconditions.checkNotNull(proto); + return proto.getId(); } @Override - public synchronized void setId(int id) { - maybeInitBuilder(); - builder.setId((id)); + protected void setId(int id) { + builder.setId(id); } @Override - public synchronized long getClusterTimestamp() { - ApplicationIdProtoOrBuilder p = viaProto ? proto : builder; - return (p.getClusterTimestamp()); + public long getClusterTimestamp() { + Preconditions.checkNotNull(proto); + return proto.getClusterTimestamp(); } @Override - public synchronized void setClusterTimestamp(long clusterTimestamp) { - maybeInitBuilder(); + protected void setClusterTimestamp(long clusterTimestamp) { builder.setClusterTimestamp((clusterTimestamp)); } + + @Override + protected void build() { + proto = builder.build(); + } } \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java index 098c7d4..22c0e14 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnClient.java @@ -39,7 +39,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; -import org.apache.hadoop.yarn.util.Records; import org.junit.Test; public class TestYarnClient { @@ -83,9 +82,8 @@ public void testSubmitApplication() { for (int i = 0; i < exitStates.length; ++i) { ApplicationSubmissionContext context = mock(ApplicationSubmissionContext.class); - ApplicationId applicationId = Records.newRecord(ApplicationId.class); - applicationId.setClusterTimestamp(System.currentTimeMillis()); - applicationId.setId(i); + ApplicationId applicationId = ApplicationId.newInstance( + System.currentTimeMillis(), i); when(context.getApplicationId()).thenReturn(applicationId); ((MockYarnClient) client).setYarnApplicationState(exitStates[i]); try { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java index b3baff7..01fc38c 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java @@ -24,7 +24,6 @@ import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import static org.apache.hadoop.yarn.util.StringHelper.*; @@ -45,10 +44,8 @@ public static ApplicationId toAppID(String prefix, String s, Iterator it throwParseException(sjoin(prefix, ID), s); } shouldHaveNext(prefix, s, it); - ApplicationId appId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - shouldHaveNext(prefix, s, it); - appId.setId(Integer.parseInt(it.next())); + ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()), + Integer.parseInt(it.next())); return appId; } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java index 3e19069..6921bf2 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java @@ -129,28 +129,17 @@ public static LocalResource newLocalResource(URI uri, public static ApplicationId newApplicationId(RecordFactory recordFactory, long clustertimestamp, CharSequence id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(Integer.valueOf(id.toString())); - applicationId.setClusterTimestamp(clustertimestamp); - return applicationId; + return ApplicationId.newInstance(clustertimestamp, + Integer.valueOf(id.toString())); } public static ApplicationId newApplicationId(RecordFactory recordFactory, long clusterTimeStamp, int id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(id); - applicationId.setClusterTimestamp(clusterTimeStamp); - return applicationId; + return ApplicationId.newInstance(clusterTimeStamp, id); } public static ApplicationId newApplicationId(long clusterTimeStamp, int id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(id); - applicationId.setClusterTimestamp(clusterTimeStamp); - return applicationId; + return ApplicationId.newInstance(clusterTimeStamp, id); } public static ApplicationAttemptId newApplicationAttemptId( @@ -163,11 +152,8 @@ public static ApplicationAttemptId newApplicationAttemptId( } public static ApplicationId convert(long clustertimestamp, CharSequence id) { - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setId(Integer.valueOf(id.toString())); - applicationId.setClusterTimestamp(clustertimestamp); - return applicationId; + return ApplicationId.newInstance(clustertimestamp, + Integer.valueOf(id.toString())); } public static ContainerId newContainerId(ApplicationAttemptId appAttemptId, diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java index 21fe2d9..9a36708 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java @@ -114,18 +114,15 @@ public static ApplicationId toApplicationId(RecordFactory recordFactory, private static ApplicationId toApplicationId(RecordFactory recordFactory, Iterator it) { - ApplicationId appId = - recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - appId.setId(Integer.parseInt(it.next())); + ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()), + Integer.parseInt(it.next())); return appId; } private static ApplicationAttemptId toApplicationAttemptId( Iterator it) throws NumberFormatException { - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - appId.setId(Integer.parseInt(it.next())); + ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()), + Integer.parseInt(it.next())); ApplicationAttemptId appAttemptId = Records .newRecord(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); @@ -135,9 +132,8 @@ private static ApplicationAttemptId toApplicationAttemptId( private static ApplicationId toApplicationId( Iterator it) throws NumberFormatException { - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(Long.parseLong(it.next())); - appId.setId(Integer.parseInt(it.next())); + ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()), + Integer.parseInt(it.next())); return appId; } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java index cc67ff7..ecfac4d 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/MockApps.java @@ -62,10 +62,7 @@ public static String newQueue() { } public static ApplicationId newAppID(int i) { - ApplicationId id = Records.newRecord(ApplicationId.class); - id.setClusterTimestamp(TS); - id.setId(i); - return id; + return ApplicationId.newInstance(TS, i); } public static ApplicationAttemptId newAppAttemptID(ApplicationId appId, int i) { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java index cf0a441..f898729 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java @@ -92,12 +92,9 @@ private void testRPCTimeout(String rpcClass) throws Exception { containerLaunchContext.setUser("dummy-user"); ContainerId containerId = recordFactory .newRecordInstance(ContainerId.class); - ApplicationId applicationId = recordFactory - .newRecordInstance(ApplicationId.class); + ApplicationId applicationId = ApplicationId.newInstance(0, 0); ApplicationAttemptId applicationAttemptId = recordFactory .newRecordInstance(ApplicationAttemptId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); applicationAttemptId.setApplicationId(applicationId); applicationAttemptId.setAttemptId(0); containerId.setApplicationAttemptId(applicationAttemptId); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java index 5485927..fc42f17 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java @@ -114,12 +114,9 @@ private void test(String rpcClass) throws Exception { containerLaunchContext.setUser("dummy-user"); ContainerId containerId = recordFactory.newRecordInstance(ContainerId.class); - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); + ApplicationId applicationId = ApplicationId.newInstance(0, 0); ApplicationAttemptId applicationAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); applicationAttemptId.setApplicationId(applicationId); applicationAttemptId.setAttemptId(0); containerId.setApplicationAttemptId(applicationAttemptId); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java index 5064f54..7f4dc87 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java @@ -124,10 +124,7 @@ public long getRMIdentifier() { ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); ContainerId cID = recordFactory.newRecordInstance(ContainerId.class); - ApplicationId applicationId = - recordFactory.newRecordInstance(ApplicationId.class); - applicationId.setClusterTimestamp(0); - applicationId.setId(0); + ApplicationId applicationId = ApplicationId.newInstance(0, 0); ApplicationAttemptId applicationAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); applicationAttemptId.setApplicationId(applicationId); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java index 896132a..0b8c3f1 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java @@ -248,9 +248,7 @@ private void createFiles(String dir, String subDir, int numOfFiles) { } private ContainerId createContainerId() { - ApplicationId appId = Records.newRecord(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = Records.newRecord(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java index 9ff4fd6..031336c 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java @@ -220,9 +220,8 @@ public ContainerManager run() { } public static ContainerId createContainerId() { - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); + ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 74c938c..0ba67a0 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -151,8 +151,6 @@ public RegisterNodeManagerResponse registerNodeManager( return response; } - ApplicationId applicationID = recordFactory - .newRecordInstance(ApplicationId.class); ApplicationAttemptId appAttemptID = recordFactory .newRecordInstance(ApplicationAttemptId.class); ContainerId firstContainerID = recordFactory @@ -191,12 +189,15 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) getAppToContainerStatusMap(nodeStatus.getContainersStatuses()); org.apache.hadoop.yarn.api.records.Container mockContainer = mock(org.apache.hadoop.yarn.api.records.Container.class); + + ApplicationId appId1 = ApplicationId.newInstance(0, 1); + ApplicationId appId2 = ApplicationId.newInstance(0, 2); + if (heartBeatID == 1) { Assert.assertEquals(0, nodeStatus.getContainersStatuses().size()); // Give a container to the NM. - applicationID.setId(heartBeatID); - appAttemptID.setApplicationId(applicationID); + appAttemptID.setApplicationId(appId1); firstContainerID.setApplicationAttemptId(appAttemptID); firstContainerID.setId(heartBeatID); ContainerLaunchContext launchContext = recordFactory @@ -213,7 +214,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) Assert.assertEquals("Number of applications should only be one!", 1, nodeStatus.getContainersStatuses().size()); Assert.assertEquals("Number of container for the app should be one!", - 1, appToContainers.get(applicationID).size()); + 1, appToContainers.get(appId1).size()); // Checks on the NM end ConcurrentMap activeContainers = @@ -221,8 +222,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) Assert.assertEquals(1, activeContainers.size()); // Give another container to the NM. - applicationID.setId(heartBeatID); - appAttemptID.setApplicationId(applicationID); + appAttemptID.setApplicationId(appId2); secondContainerID.setApplicationAttemptId(appAttemptID); secondContainerID.setId(heartBeatID); ContainerLaunchContext launchContext = recordFactory @@ -239,7 +239,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) Assert.assertEquals("Number of applications should only be one!", 1, appToContainers.size()); Assert.assertEquals("Number of container for the app should be two!", - 2, appToContainers.get(applicationID).size()); + 2, appToContainers.get(appId2).size()); // Checks on the NM end ConcurrentMap activeContainers = diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java index 46c9faa..38ced35 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java @@ -18,8 +18,12 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; -import org.junit.Test; -import static org.junit.Assert.*; +import static org.apache.hadoop.yarn.service.Service.STATE.INITED; +import static org.apache.hadoop.yarn.service.Service.STATE.STARTED; +import static org.apache.hadoop.yarn.service.Service.STATE.STOPPED; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -30,17 +34,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType; import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.service.Service; - - -import static org.apache.hadoop.yarn.service.Service.STATE.*; +import org.junit.Test; public class TestAuxServices { private static final Log LOG = LogFactory.getLog(TestAuxServices.class); @@ -123,18 +120,17 @@ public void testAuxEventDispatch() { aux.init(conf); aux.start(); - ApplicationId appId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - appId.setId(65); + ApplicationId appId1 = ApplicationId.newInstance(0, 65); ByteBuffer buf = ByteBuffer.allocate(6); buf.putChar('A'); buf.putInt(65); buf.flip(); AuxServicesEvent event = new AuxServicesEvent( - AuxServicesEventType.APPLICATION_INIT, "user0", appId, "Asrv", buf); + AuxServicesEventType.APPLICATION_INIT, "user0", appId1, "Asrv", buf); aux.handle(event); - appId.setId(66); + ApplicationId appId2 = ApplicationId.newInstance(0, 66); event = new AuxServicesEvent( - AuxServicesEventType.APPLICATION_STOP, "user0", appId, "Bsrv", null); + AuxServicesEventType.APPLICATION_STOP, "user0", appId2, "Bsrv", null); // verify all services got the stop event aux.handle(event); Collection servs = aux.getServices(); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java index da5c2bc..d586877 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java @@ -77,9 +77,7 @@ public TestContainerManager() throws UnsupportedFileSystemException { } private ContainerId createContainerId() { - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index 9e9d656..31c0240 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -18,7 +18,9 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.BufferedReader; import java.io.File; @@ -26,14 +28,14 @@ import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; -import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import junit.framework.Assert; + import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; @@ -58,7 +60,6 @@ import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -66,9 +67,6 @@ import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import org.junit.Before; import org.junit.Test; -import static org.mockito.Mockito.*; - -import junit.framework.Assert; public class TestContainerLaunch extends BaseContainerManagerTest { @@ -165,9 +163,7 @@ public void testContainerEnvVariables() throws Exception { Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); @@ -336,9 +332,7 @@ public void testDelayedKill() throws Exception { Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(1); - appId.setId(1); + ApplicationId appId = ApplicationId.newInstance(1, 1); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java index 3c97f43..0257588 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java @@ -18,9 +18,19 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation; -import static org.mockito.Mockito.*; import static junit.framework.Assert.assertEquals; import static junit.framework.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyMap; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; @@ -47,15 +57,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; -import org.apache.hadoop.io.DataInputBuffer; -import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; +import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -72,9 +80,9 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat; -import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader; +import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; @@ -683,10 +691,7 @@ public void testLogAggregationForRealContainerLaunch() throws IOException, recordFactory.newRecordInstance(ContainerLaunchContext.class); Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = - recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1); ContainerId cId = BuilderUtils.newContainerId(appAttemptId, 0); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java index 75f7272..507c8ed 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java @@ -201,10 +201,7 @@ public void testContainerKillOnMemoryOverflow() throws IOException, recordFactory.newRecordInstance(ContainerLaunchContext.class); Container mockContainer = mock(Container.class); // ////// Construct the Container-id - ApplicationId appId = - recordFactory.newRecordInstance(ApplicationId.class); - appId.setClusterTimestamp(0); - appId.setId(0); + ApplicationId appId = ApplicationId.newInstance(0, 0); ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 577ea0a..01de344 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -154,8 +154,7 @@ public void testGetApplicationReport() throws YarnRemoteException { RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); GetApplicationReportRequest request = recordFactory .newRecordInstance(GetApplicationReportRequest.class); - request.setApplicationId(recordFactory - .newRecordInstance(ApplicationId.class)); + request.setApplicationId(ApplicationId.newInstance(0, 0)); GetApplicationReportResponse applicationReport = rmService .getApplicationReport(request); Assert.assertNull("It should return null as application report for absent application.", @@ -436,11 +435,7 @@ private void mockRMContext(YarnScheduler yarnScheduler, RMContext rmContext) } private ApplicationId getApplicationId(int id) { - ApplicationId applicationId = recordFactory - .newRecordInstance(ApplicationId.class); - applicationId.setClusterTimestamp(123456); - applicationId.setId(id); - return applicationId; + return ApplicationId.newInstance(123456, id); } private RMAppImpl getRMApp(RMContext rmContext, YarnScheduler yarnScheduler, diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 1bb6a0e..6e37df4 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.when; import java.io.IOException; import java.util.Comparator; @@ -30,6 +31,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; @@ -51,13 +53,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import static org.mockito.Mockito.*; public class TestCapacityScheduler { @@ -468,15 +467,9 @@ public void testApplicationComparator() { CapacityScheduler cs = new CapacityScheduler(); Comparator appComparator= cs.getApplicationComparator(); - ApplicationId id1 = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - id1.setClusterTimestamp(1); - id1.setId(1); - ApplicationId id2 = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - id2.setClusterTimestamp(1); - id2.setId(2); - ApplicationId id3 = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class); - id3.setClusterTimestamp(2); - id3.setId(1); + ApplicationId id1 = ApplicationId.newInstance(1, 1); + ApplicationId id2 = ApplicationId.newInstance(1, 2); + ApplicationId id3 = ApplicationId.newInstance(2, 1); //same clusterId FiCaSchedulerApp app1 = Mockito.mock(FiCaSchedulerApp.class); when(app1.getApplicationId()).thenReturn(id1); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java index 62a1b9b..3b54519 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSSchedulerApp.java @@ -36,8 +36,7 @@ private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationAttemptId attId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - ApplicationId appIdImpl = recordFactory.newRecordInstance(ApplicationId.class); - appIdImpl.setId(appId); + ApplicationId appIdImpl = ApplicationId.newInstance(0, appId); attId.setAttemptId(attemptId); attId.setApplicationId(appIdImpl); return attId; diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 4da11f1..65c93be 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -143,8 +143,7 @@ private Configuration createConfiguration() { private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationAttemptId attId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - ApplicationId appIdImpl = recordFactory.newRecordInstance(ApplicationId.class); - appIdImpl.setId(appId); + ApplicationId appIdImpl = ApplicationId.newInstance(0, appId); attId.setAttemptId(attemptId); attId.setApplicationId(appIdImpl); return attId; diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 4ae3858..0420780 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -97,9 +97,7 @@ public void tearDown() throws Exception { private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationAttemptId attId = recordFactory .newRecordInstance(ApplicationAttemptId.class); - ApplicationId appIdImpl = recordFactory - .newRecordInstance(ApplicationId.class); - appIdImpl.setId(appId); + ApplicationId appIdImpl = ApplicationId.newInstance(0, appId); attId.setAttemptId(attemptId); attId.setApplicationId(appIdImpl); return attId;