processors = getProcessorList(conf);
if (processors != null) {
Collections.reverse(processors);
for (ApplicationMasterServiceProcessor p : processors) {
// Ensure only single instance of PlacementProcessor is included
- if (enablePlacementConstraints && p instanceof PlacementProcessor) {
+ if (p instanceof AbstractPlacementProcessor) {
+ LOG.warn("Found PlacementProcessor=" + p.getClass().getCanonicalName()
+ + " defined in "
+ + YarnConfiguration.RM_APPLICATION_MASTER_SERVICE_PROCESSORS
+ + ", however PlacementProcessor handler should be configured "
+ + "by using " + YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER
+ + ", this processor will be ignored.");
continue;
}
this.amsProcessingChain.addProcessor(p);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index cd9d1373c7d..0f9304f8927 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -1098,18 +1098,6 @@ public Allocation allocate(ApplicationAttemptId applicationAttemptId,
return EMPTY_ALLOCATION;
}
- if ((!getConfiguration().getBoolean(
- CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED,
- CapacitySchedulerConfiguration.DEFAULT_SCHEDULING_REQUEST_ALLOWED))
- && schedulingRequests != null && (!schedulingRequests.isEmpty())) {
- throw new SchedulerInvalidResoureRequestException(
- "Application attempt:" + applicationAttemptId
- + " is using SchedulingRequest, which is disabled. Please update "
- + CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED
- + " to true in capacity-scheduler.xml in order to use this "
- + "feature.");
- }
-
// The allocate may be the leftover from previous attempt, and it will
// impact current attempt, such as confuse the request and allocation for
// current attempt's AM container.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index a9cf7146ec4..bdd30b915c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -77,11 +77,6 @@
@Private
public static final String PREFIX = "yarn.scheduler.capacity.";
-
- @Private
- public static final String SCHEDULING_REQUEST_ALLOWED =
- PREFIX + "scheduling-request.allowed";
- public static final boolean DEFAULT_SCHEDULING_REQUEST_ALLOWED = false;
@Private
public static final String DOT = ".";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/AbstractPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/AbstractPlacementProcessor.java
new file mode 100644
index 00000000000..279aabfec28
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/AbstractPlacementProcessor.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
+import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Base class of all PlacementProcessors
+ */
+public abstract class AbstractPlacementProcessor implements
+ ApplicationMasterServiceProcessor{
+ private static final Logger LOG =
+ LoggerFactory.getLogger(AbstractPlacementProcessor.class);
+
+ protected ApplicationMasterServiceProcessor nextAMSProcessor;
+ protected AbstractYarnScheduler scheduler;
+ private PlacementConstraintManager constraintManager;
+
+ @Override
+ public void init(ApplicationMasterServiceContext amsContext,
+ ApplicationMasterServiceProcessor nextProcessor) {
+ this.nextAMSProcessor = nextProcessor;
+ this.scheduler =
+ (AbstractYarnScheduler) ((RMContextImpl) amsContext).getScheduler();
+ this.constraintManager =
+ ((RMContextImpl)amsContext).getPlacementConstraintManager();
+ }
+
+ @Override
+ public void registerApplicationMaster(
+ ApplicationAttemptId applicationAttemptId,
+ RegisterApplicationMasterRequest request,
+ RegisterApplicationMasterResponse response)
+ throws IOException, YarnException {
+ Map, PlacementConstraint> appPlacementConstraints =
+ request.getPlacementConstraints();
+ processPlacementConstraints(applicationAttemptId.getApplicationId(),
+ appPlacementConstraints);
+ nextAMSProcessor.registerApplicationMaster(applicationAttemptId, request,
+ response);
+ }
+
+ private void processPlacementConstraints(ApplicationId applicationId,
+ Map, PlacementConstraint> appPlacementConstraints) {
+ if (appPlacementConstraints != null && !appPlacementConstraints.isEmpty()) {
+ LOG.info("Constraints added for application [{}] against tags [{}]",
+ applicationId, appPlacementConstraints);
+ constraintManager.registerApplication(
+ applicationId, appPlacementConstraints);
+ }
+ }
+
+ @Override
+ public void finishApplicationMaster(ApplicationAttemptId applicationAttemptId,
+ FinishApplicationMasterRequest request,
+ FinishApplicationMasterResponse response) {
+ constraintManager.unregisterApplication(
+ applicationAttemptId.getApplicationId());
+ this.nextAMSProcessor.finishApplicationMaster(applicationAttemptId, request,
+ response);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/DisabledPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/DisabledPlacementProcessor.java
new file mode 100644
index 00000000000..51bb512aa69
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/DisabledPlacementProcessor.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/**
+ * Processor which reject all SchedulingRequests
+ */
+public class DisabledPlacementProcessor extends AbstractPlacementProcessor {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(DisabledPlacementProcessor.class);
+
+ @Override
+ public void registerApplicationMaster(
+ ApplicationAttemptId applicationAttemptId,
+ RegisterApplicationMasterRequest request,
+ RegisterApplicationMasterResponse response)
+ throws IOException, YarnException {
+ if (request.getPlacementConstraints() != null && !request
+ .getPlacementConstraints().isEmpty()) {
+ String message = "Found non empty placement constraints map of "
+ + "RegisterApplicationMasterRequest for application="
+ + applicationAttemptId.toString() + ", however the configured "
+ + YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER
+ + " cannot handle placement constraints, rejecting this "
+ + "registerApplicationMaster operation";
+ LOG.warn(message);
+ throw new YarnException(message);
+ }
+ nextAMSProcessor.registerApplicationMaster(applicationAttemptId, request,
+ response);
+ }
+
+ @Override
+ public void allocate(ApplicationAttemptId appAttemptId,
+ AllocateRequest request, AllocateResponse response) throws YarnException {
+ if (request.getSchedulingRequests() != null && !request
+ .getSchedulingRequests().isEmpty()) {
+ String message = "Found non empty SchedulingRequest of "
+ + "AllocateRequest for application="
+ + appAttemptId.toString() + ", however the configured "
+ + YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER
+ + " cannot handle placement constraints, rejecting this "
+ + "allocate operation";
+ LOG.warn(message);
+ throw new YarnException(message);
+ }
+ nextAMSProcessor.allocate(appAttemptId, request, response);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java
similarity index 90%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java
index 9ce38f4fdda..69c4d2963ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java
@@ -44,7 +44,6 @@
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.PlacedSchedulingRequest;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.SchedulingRequestWithPlacementAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api.SchedulingResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
import org.slf4j.Logger;
@@ -69,7 +68,7 @@
* the placement constraint manager.
* 3. Dispatches Scheduling Requests to the Planner.
*/
-public class PlacementProcessor implements ApplicationMasterServiceProcessor {
+public class PlacementConstraintProcessor extends AbstractPlacementProcessor {
/**
* Wrapper over the SchedulingResponse that wires in the placement attempt
@@ -90,11 +89,8 @@ private Response(boolean isSuccess, ApplicationId applicationId,
}
private static final Logger LOG =
- LoggerFactory.getLogger(PlacementProcessor.class);
- private PlacementConstraintManager constraintManager;
- private ApplicationMasterServiceProcessor nextAMSProcessor;
+ LoggerFactory.getLogger(PlacementConstraintProcessor.class);
- private AbstractYarnScheduler scheduler;
private ExecutorService schedulingThreadPool;
private int retryAttempts;
private Map> requestsToRetry =
@@ -110,12 +106,8 @@ private Response(boolean isSuccess, ApplicationId applicationId,
public void init(ApplicationMasterServiceContext amsContext,
ApplicationMasterServiceProcessor nextProcessor) {
LOG.info("Initializing Constraint Placement Processor:");
- this.nextAMSProcessor = nextProcessor;
- this.constraintManager =
- ((RMContextImpl)amsContext).getPlacementConstraintManager();
+ super.init(amsContext, nextProcessor);
- this.scheduler =
- (AbstractYarnScheduler)((RMContextImpl)amsContext).getScheduler();
// Only the first class is considered - even if a comma separated
// list is provided. (This is for simplicity, since getInstances does a
// lot of good things by handling things correctly)
@@ -166,28 +158,6 @@ public void init(ApplicationMasterServiceContext amsContext,
}
@Override
- public void registerApplicationMaster(ApplicationAttemptId appAttemptId,
- RegisterApplicationMasterRequest request,
- RegisterApplicationMasterResponse response)
- throws IOException, YarnException {
- Map, PlacementConstraint> appPlacementConstraints =
- request.getPlacementConstraints();
- processPlacementConstraints(
- appAttemptId.getApplicationId(), appPlacementConstraints);
- nextAMSProcessor.registerApplicationMaster(appAttemptId, request, response);
- }
-
- private void processPlacementConstraints(ApplicationId applicationId,
- Map, PlacementConstraint> appPlacementConstraints) {
- if (appPlacementConstraints != null && !appPlacementConstraints.isEmpty()) {
- LOG.info("Constraints added for application [{}] against tags [{}]",
- applicationId, appPlacementConstraints);
- constraintManager.registerApplication(
- applicationId, appPlacementConstraints);
- }
- }
-
- @Override
public void allocate(ApplicationAttemptId appAttemptId,
AllocateRequest request, AllocateResponse response) throws YarnException {
// Copy the scheduling request since we will clear it later after sending
@@ -311,11 +281,10 @@ private void handleRejectedRequests(ApplicationAttemptId appAttemptId,
public void finishApplicationMaster(ApplicationAttemptId appAttemptId,
FinishApplicationMasterRequest request,
FinishApplicationMasterResponse response) {
- constraintManager.unregisterApplication(appAttemptId.getApplicationId());
placementDispatcher.clearApplicationState(appAttemptId.getApplicationId());
requestsToReject.remove(appAttemptId.getApplicationId());
requestsToRetry.remove(appAttemptId.getApplicationId());
- nextAMSProcessor.finishApplicationMaster(appAttemptId, request, response);
+ super.finishApplicationMaster(appAttemptId, request, response);
}
private void handleSchedulingResponse(SchedulingResponse schedulerResponse) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SchedulerPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SchedulerPlacementProcessor.java
new file mode 100644
index 00000000000..9a6ca0f30c5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/SchedulerPlacementProcessor.java
@@ -0,0 +1,40 @@
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/**
+ * Use scheduler to handle placement constraint requests.
+ */
+public class SchedulerPlacementProcessor extends AbstractPlacementProcessor {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SchedulerPlacementProcessor.class);
+
+ @Override
+ public void allocate(ApplicationAttemptId appAttemptId,
+ AllocateRequest request, AllocateResponse response) throws YarnException {
+ if (request.getSchedulingRequests() != null && !request
+ .getSchedulingRequests().isEmpty()) {
+ if (!(scheduler instanceof CapacityScheduler)) {
+ String message = "Found non empty SchedulingRequest of "
+ + "AllocateRequest for application=" + appAttemptId.toString()
+ + ", however the configured scheduler=" + scheduler.getClass()
+ .getCanonicalName()
+ + " cannot handle placement constraints, rejecting this "
+ + "allocate operation";
+ LOG.warn(message);
+ throw new YarnException(message);
+ }
+ }
+ nextAMSProcessor.allocate(appAttemptId, request, response);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java
index 484d7803410..ee7e013ccbd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSchedulingRequestUpdate.java
@@ -50,6 +50,8 @@ public void testBasicPendingResourceUpdate() throws Exception {
Configuration conf = TestUtils.getConfigurationWithQueueLabels(
new Configuration(false));
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
+ conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
+ YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
mgr.init(conf);
@@ -166,6 +168,8 @@ public void testNodePartitionPendingResourceUpdate() throws Exception {
Configuration conf = TestUtils.getConfigurationWithQueueLabels(
new Configuration(false));
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
+ conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
+ YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
mgr.init(conf);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
index b297f79d114..27d86611e31 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocation.java
@@ -58,8 +58,8 @@ public void setUp() throws Exception {
public void testIntraAppAntiAffinity() throws Exception {
Configuration csConf = TestUtils.getConfigurationWithMultipleQueues(
new Configuration());
- csConf.setBoolean(CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED,
- true);
+ csConf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
+ YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
@@ -141,8 +141,8 @@ public RMNodeLabelsManager createNodeLabelManager() {
public void testIntraAppAntiAffinityWithMultipleTags() throws Exception {
Configuration csConf = TestUtils.getConfigurationWithMultipleQueues(
new Configuration());
- csConf.setBoolean(CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED,
- true);
+ csConf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
+ YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java
index fc1cb0d37b6..d1d05dc407f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestSchedulingRequestContainerAllocationAsync.java
@@ -57,13 +57,13 @@ public void setUp() throws Exception {
private void testIntraAppAntiAffinityAsync(int numThreads) throws Exception {
Configuration csConf = TestUtils.getConfigurationWithMultipleQueues(
new Configuration());
- csConf.setBoolean(CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED,
- true);
csConf.setInt(
CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_THREAD,
numThreads);
csConf.setInt(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
+ ".scheduling-interval-ms", 0);
+ csConf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
+ YarnConfiguration.SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER);
// inject node label manager
MockRM rm1 = new MockRM(csConf) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
index 7180e24c0c2..fae63be5051 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java
@@ -275,9 +275,7 @@ public static Container getMockContainer(
public static Configuration getConfigurationWithQueueLabels(Configuration config) {
CapacitySchedulerConfiguration conf =
new CapacitySchedulerConfiguration(config);
- conf.setBoolean(CapacitySchedulerConfiguration.SCHEDULING_REQUEST_ALLOWED,
- true);
-
+
// Define top-level queues
conf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[] {"a", "b", "c"});
conf.setCapacityByLabel(CapacitySchedulerConfiguration.ROOT, "x", 100);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
index c4c0b5df475..e129a750c17 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/TestPlacementProcessor.java
@@ -86,8 +86,8 @@ public void createAndStartRM() {
YarnConfiguration conf = new YarnConfiguration(csConf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
- conf.setBoolean(
- YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ENABLED, true);
+ conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
+ YarnConfiguration.PROCESSOR_RM_PLACEMENT_CONSTRAINTS_HANDLER);
conf.setInt(
YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS, 1);
startRM(conf);
@@ -381,8 +381,8 @@ public void testSchedulerRejection() throws Exception {
YarnConfiguration conf = new YarnConfiguration(csConf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
- conf.setBoolean(
- YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ENABLED, true);
+ conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
+ YarnConfiguration.PROCESSOR_RM_PLACEMENT_CONSTRAINTS_HANDLER);
startRM(conf);
HashMap nodes = new HashMap<>();
@@ -533,8 +533,8 @@ public void testRePlacementAfterSchedulerRejection() throws Exception {
YarnConfiguration conf = new YarnConfiguration(csConf);
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
ResourceScheduler.class);
- conf.setBoolean(
- YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_ENABLED, true);
+ conf.set(YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_HANDLER,
+ YarnConfiguration.PROCESSOR_RM_PLACEMENT_CONSTRAINTS_HANDLER);
conf.setInt(
YarnConfiguration.RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS, 2);
startRM(conf);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md
similarity index 78%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md
index 7926eaba43d..f2a35b480b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md.vm
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/PlacementConstraints.md
@@ -12,10 +12,6 @@
limitations under the License. See accompanying LICENSE file.
-->
-#set ( $H3 = '###' )
-#set ( $H4 = '####' )
-#set ( $H5 = '#####' )
-
Placement Constraints
=====================
@@ -29,7 +25,7 @@ YARN allows applications to specify placement constraints in the form of data lo
For example, it may be beneficial to co-locate the allocations of a job on the same rack (*affinity* constraints) to reduce network costs, spread allocations across machines (*anti-affinity* constraints) to minimize resource interference, or allow up to a specific number of allocations in a node group (*cardinality* constraints) to strike a balance between the two. Placement decisions also affect resilience. For example, allocations placed within the same cluster upgrade domain would go offline simultaneously.
-The applications can specify constraints without requiring knowledge of the underlying topology of the cluster (e.g., one does not need to specify the specific node or rack where their containers should be placed with constraints) or the other applications deployed. Currently **intra-application** constraints are supported, but the design that is followed is generic and support for constraints across applications will soon be added. Moreover, all constraints at the moment are **hard**, that is, if the constraints for a container cannot be satisfied due to the current cluster condition or conflicting constraints, the container request gets rejected.
+The applications can specify constraints without requiring knowledge of the underlying topology of the cluster (e.g., one does not need to specify the specific node or rack where their containers should be placed with constraints) or the other applications deployed. Currently **intra-application** constraints are supported, but the design that is followed is generic and support for constraints across applications will soon be added. Moreover, all constraints at the moment are **hard**, that is, if the constraints for a container cannot be satisfied due to the current cluster condition or conflicting constraints, the container request gets pending or rejected.
Note that in this document we use the notion of “allocation” to refer to a unit of resources (e.g., CPU and memory) that gets allocated in a node. In the current implementation of YARN, an allocation corresponds to a single container. However, in case an application uses an allocation to spawn more than one containers, an allocation could correspond to multiple containers.
@@ -39,31 +35,24 @@ Quick Guide
We first describe how to enable scheduling with placement constraints and then provide examples of how to experiment with this feature using the distributed shell, an application that allows to run a given shell command on a set of containers.
-$H3 Enabling placement constraints
+### Enabling placement constraints
-To enable placement constraints, the following property has to be set to **true** in **conf/yarn-site.xml**:
+To enable placement constraints, the following property has to be set to `placement-processor` or `scheduler` in **conf/yarn-site.xml**:
| Property | Description | Default value |
|:-------- |:----------- |:------------- |
-| `yarn.resourcemanager.placement-constraints.enabled` | Enables rich placement constraints. | `false` |
+| `yarn.resourcemanager.placement-constraints.handler` | Specify use which handler will be used to process PlacementConstraints. Acceptable values are: `placement-processor`, `scheduler` and `disabled`. For detailed explanation of these values, please refer to section below. | `disabled` |
Further, the user can choose between the following two alternatives for placing containers with constraints:
-* **Placement processor:** Following this approach, the placement of containers with constraints is determined as a pre-processing step before the capacity or the fair scheduler is called. Once the placement is decided, the capacity/fair scheduler is invoked to perform the actual allocation. The advantage of this approach is that it supports all constraint types (affinity, anti-affinity, cardinality). Moreover, it considers multiple containers at a time, which allows to satisfy more constraints than a container-at-a-time approach can achieve. As it sits outside the main scheduler, it can be used by both the capacity and fair schedulers. Note that at the moment it does not account for task priorities within an application, given that such priorities might be conflicting with the placement constraints.
-* **Placement allocator in capacity scheduler:** This approach places containers with constraints within the capacity scheduler. It currently supports anti-affinity constraints (no affinity or cardinality) and places one container at a time. However, it supports traditional task priorities within an application.
-
-The placement processor approach supports a wider range of constraints and can allow more containers to be placed especially when applications have demanding constraints or the cluster is highly-utilized (due to considering multiple containers at a time). However, if respecting task priority within an application is important for the user and the capacity scheduler is used, then the placement allocator in the capacity scheduler should be used instead.
-
-By default, the placement processor approach is enabled. To use the placement allocator in the capacity scheduler instead, the following parameter has to be set to **true** in the **conf/capacity-scheduler.xml**:
-
-| Property | Description | Default value |
-|:-------- |:----------- |:------------- |
-| `yarn.scheduler.capacity.scheduling-request.allowed` | When set to false, the placement processor is used; when set to true, the allocator inside the capacity scheduler is used. | `false` |
+* `placement-processor`: For this handler, the placement of containers with constraints is determined as a pre-processing step before the capacity or the fair scheduler is called. Once the placement is decided, the capacity/fair scheduler is invoked to perform the actual allocation. The advantage of this handler is that it is targeted to support all constraint types (affinity, anti-affinity, cardinality), currently **intra-application** constraints are supported. Moreover, it considers multiple containers at a time, which allows to satisfy more constraints than a container-at-a-time approach can achieve. As it sits outside the main scheduler, it can be used by both the capacity and fair schedulers. Note that at the moment it does not account for task priorities within an application, given that such priorities might be conflicting with the placement constraints.
+* `scheduler`: For this handler, containers with constraints will be placed by scheduler (as of now, only `CapacityScheduler` supports SchedulingRequest. It currently supports intra-application anti-affinity constraints (no affinity or cardinality). The advantage of this handler comparing to `placement-processor` is, it follows the exactly same ordering rules for queues (sorted by utilization, priority), apps (sorted by FIFO/fairness/priority) and tasks within the same app (priority) which enforced by existing scheduler.
+* `disabled`: Reject all placement requests: If any SchedulingRequest asked by an application, the allocate call will be rejected.
+The `placement-processor` handler supports a wider range of constraints and can allow more containers to be placed especially when applications have demanding constraints or the cluster is highly-utilized (due to considering multiple containers at a time). However, if respecting task priority within an application is important for the user and the capacity scheduler is used, then the `scheduler` handler should be used instead.
-
-$H3 Experimenting with placement constraints using distributed shell
+### Experimenting with placement constraints using distributed shell
Users can experiment with placement constraints by using the distributed shell application through the following command:
@@ -101,18 +90,18 @@ The above encodes two constraints:
Defining Placement Constraints
------------------------------
-$H3 Allocation tags
+### Allocation tags
Allocation tags are string tags that an application can associate with (groups of) its containers. Tags are used to identify components of applications. For example, an HBase Master allocation can be tagged with "hbase-m", and Region Servers with "hbase-rs". Other examples are "latency-critical" to refer to the more general demands of the allocation, or "app_0041" to denote the job ID. Allocation tags play a key role in constraints, as they allow to refer to multiple allocations that share a common tag.
Note that instead of using the `ResourceRequest` object to define allocation tags, we use the new `SchedulingRequest` object. This has many similarities with the `ResourceRequest`, but better separates the sizing of the requested allocations (number and size of allocations, priority, execution type, etc.), and the constraints dictating how these allocations should be placed (resource name, relaxed locality). Applications can still use `ResourceRequest` objects, but in order to define allocation tags and constraints, they need to use the `SchedulingRequest` object. Within a single `AllocateRequest`, an application should use either the `ResourceRequest` or the `SchedulingRequest` objects, but not both of them.
-$H4 Differences between node labels, node attributes and allocation tags
+#### Differences between node labels, node attributes and allocation tags
The difference between allocation tags and node labels or node attributes (YARN-3409), is that allocation tags are attached to allocations and not to nodes. When an allocation gets allocated to a node by the scheduler, the set of tags of that allocation are automatically added to the node for the duration of the allocation. Hence, a node inherits the tags of the allocations that are currently allocated to the node. Likewise, a rack inherits the tags of its nodes. Moreover, similar to node labels and unlike node attributes, allocation tags have no value attached to them. As we show below, our constraints can refer to allocation tags, as well as node labels and node attributes.
-$H3 Placement constraints API
+### Placement constraints API
Applications can use the public API in the `PlacementConstraints` to construct placement constraint. Before describing the methods for building constraints, we describe the methods of the `PlacementTargets` class that are used to construct the target expressions that will then be used in constraints:
@@ -139,11 +128,10 @@ The methods of the `PlacementConstraints` class for building constraints are the
The `PlacementConstraints` class also includes method for building compound constraints (AND/OR expressions with multiple constraints). Adding support for compound constraints is work in progress.
-$H3 Specifying constraints in applications
+### Specifying constraints in applications
Applications have to specify the containers for which each constraint will be enabled. To this end, applications can provide a mapping from a set of allocation tags (source tags) to a placement constraint. For example, an entry of this mapping could be "hbase"->constraint1, which means that constraint1 will be applied when scheduling each allocation with tag "hbase".
-When using the placement processor approach (see [Enabling placement constraints](#Enabling_placement_constraints)), this constraint mapping is specified within the `RegisterApplicationMasterRequest`.
-
-When using the placement allocator in the capacity scheduler, the constraints can also be added at each `SchedulingRequest` object. Each such constraint is valid for the tag of that scheduling request. In case constraints are specified both at the `ReisterApplicationMasterRequest` and the scheduling requests, the latter override the former.
+When using the `placement-processor` handler (see [Enabling placement constraints](#Enabling_placement_constraints)), this constraint mapping is specified within the `RegisterApplicationMasterRequest`.
+When using the `scheduler` handler, the constraints can also be added at each `SchedulingRequest` object. Each such constraint is valid for the tag of that scheduling request. In case constraints are specified both at the `RegisterApplicationMasterRequest` and the scheduling requests, the latter override the former.