diff --git hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java index e679c9d..9ed904c 100644 --- hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java +++ hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java @@ -389,18 +389,28 @@ private void createAMForJob(Map jsonJob) throws YarnException { String queue = jsonJob.get("job.queue.name").toString(); increaseQueueAppNum(queue); - String oldAppId = (String)jsonJob.get("job.id"); - if (oldAppId == null) { - oldAppId = Integer.toString(AM_ID); - } - String amType = (String)jsonJob.get("am.type"); if (amType == null) { amType = SLSUtils.DEFAULT_JOB_TYPE; } - runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime, - getTaskContainers(jsonJob), null); + int jobCount = 1; + if (jsonJob.containsKey("job.count")) { + jobCount = Integer.parseInt(jsonJob.get("job.count").toString()); + } + jobCount = Math.max(jobCount, 1); + + String oldAppId = (String)jsonJob.get("job.id"); + // Job id is generated automatically if this job configuration allows + // multiple job instances + if(jobCount > 1) { + oldAppId = null; + } + + for (int i = 0; i < jobCount; i++) { + runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime, + getTaskContainers(jsonJob), null); + } } private List getTaskContainers(Map jsonJob) @@ -704,9 +714,14 @@ private void runNewAM(String jobType, String user, SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS, SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS_DEFAULT); boolean isTracked = trackedApps.contains(oldJobId); - amSim.init(AM_ID++, heartbeatInterval, containerList, + if (oldJobId == null) { + oldJobId = Integer.toString(AM_ID); + } + amSim.init(AM_ID, heartbeatInterval, containerList, rm, this, jobStartTimeMS, jobFinishTimeMS, user, jobQueue, isTracked, oldJobId, rr, runner.getStartTimeMS()); + + AM_ID++; runner.schedule(amSim); maxRuntime = Math.max(maxRuntime, jobFinishTimeMS); numTasks += containerList.size(); diff --git hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md index 6e00e9a..2b614eb 100644 --- hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md +++ hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md @@ -338,6 +338,7 @@ Here we provide an example format of the sls json file, which contains 2 jobs. T "job.queue.name" : "sls_queue_1", // the queue job will be submitted to "job.id" : "job_1", // the job id used to track the job, optional, the default value is an zero-based integer increasing with number of jobs "job.user" : "default", // user, optional, the default value is "default" + "job.count" : 1, // number of jobs, optional, the default value is 1 "job.tasks" : [ { "count": 1, // number of tasks, optional, the default value is 1 "container.host" : "/default-rack/node1", // host the container asks for