diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueConfig.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueConfig.java
new file mode 100644
index 00000000000..4750640063a
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueConfig.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.util.Map;
+
+/**
+ *
QueueConfig is the config update info for an
+ * added/updated queue.
+ */
+@Public
+@Unstable
+public abstract class QueueConfig {
+
+ @Public
+ @Unstable
+ public static QueueConfig newInstance(String queuePath,
+ Map queueConfigs) {
+ QueueConfig QueueConfig = Records.newRecord(QueueConfig.class);
+ QueueConfig.setQueuePath(queuePath);
+ QueueConfig.setQueueConfigs(queueConfigs);
+ return QueueConfig;
+ }
+
+ /**
+ * Get the queuePath.
+ * @return queuePath
+ */
+ @Public
+ @Unstable
+ public abstract String getQueuePath();
+
+ @Private
+ @Unstable
+ protected abstract void setQueuePath(String queuePath);
+
+ /**
+ * Get the queueConfigs for this queue.
+ * @return queueConfigs
+ */
+ @Public
+ @Unstable
+ public abstract Map getQueueConfigs();
+
+ @Private
+ @Unstable
+ protected abstract void setQueueConfigs(Map queueConfigs);
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
index 852334245ce..c40bc3a60ae 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
@@ -54,6 +54,8 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateSchedulerConfigurationRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateSchedulerConfigurationResponse;
@Private
public interface ResourceManagerAdministrationProtocol extends GetUserMappingsProtocol {
@@ -144,4 +146,10 @@ public CheckForDecommissioningNodesResponse checkForDecommissioningNodes(
public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
RefreshClusterMaxPriorityRequest request) throws YarnException,
IOException;
+
+ @Private
+ @Idempotent
+ public UpdateSchedulerConfigurationResponse updateSchedulerConfiguration(
+ UpdateSchedulerConfigurationRequest request) throws YarnException,
+ IOException;
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateSchedulerConfigurationRequest.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateSchedulerConfigurationRequest.java
new file mode 100644
index 00000000000..21f75d4e5e0
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateSchedulerConfigurationRequest.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.api.records.QueueConfig;
+import org.apache.hadoop.yarn.util.Records;
+
+import java.util.Map;
+import java.util.Set;
+
+@Public
+@Evolving
+public abstract class UpdateSchedulerConfigurationRequest {
+ public static UpdateSchedulerConfigurationRequest newInstance(
+ Set addQueuesInfo, Set removeQueuesInfo,
+ Set updateQueuesInfo, Map globalUpdateInfo) {
+ UpdateSchedulerConfigurationRequest request =
+ Records.newRecord(UpdateSchedulerConfigurationRequest.class);
+ request.setAddQueuesInfo(addQueuesInfo);
+ request.setRemoveQueuesInfo(removeQueuesInfo);
+ request.setUpdateQueuesInfo(updateQueuesInfo);
+ request.setGlobalUpdatesInfo(globalUpdateInfo);
+ return request;
+ }
+
+ @Public
+ @Evolving
+ public abstract Set getAddQueuesInfo();
+
+ @Public
+ @Evolving
+ public abstract void setAddQueuesInfo(Set addQueuesInfo);
+
+ @Public
+ @Evolving
+ public abstract Set getRemoveQueuesInfo();
+
+ @Public
+ @Evolving
+ public abstract void setRemoveQueuesInfo(Set removeQueuesInfo);
+
+ @Public
+ @Evolving
+ public abstract Set getUpdateQueuesInfo();
+
+ @Public
+ @Evolving
+ public abstract void setUpdateQueuesInfo(Set updateQueuesInfo);
+
+ @Public
+ @Evolving
+ public abstract Map getGlobalUpdatesInfo();
+
+ @Public
+ @Evolving
+ public abstract void setGlobalUpdatesInfo(Map
+ globalUpdateInfo);
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateSchedulerConfigurationResponse.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateSchedulerConfigurationResponse.java
new file mode 100644
index 00000000000..ee6b5916894
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateSchedulerConfigurationResponse.java
@@ -0,0 +1,36 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+@Private
+@Stable
+public abstract class UpdateSchedulerConfigurationResponse {
+ @Private
+ @Unstable
+ public static UpdateSchedulerConfigurationResponse newInstance() {
+ UpdateSchedulerConfigurationResponse response =
+ Records.newRecord(UpdateSchedulerConfigurationResponse.class);
+ return response;
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
index 113462305cd..65666313655 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
@@ -45,4 +45,5 @@ service ResourceManagerAdministrationProtocolService {
rpc replaceLabelsOnNodes(ReplaceLabelsOnNodeRequestProto) returns (ReplaceLabelsOnNodeResponseProto);
rpc checkForDecommissioningNodes(CheckForDecommissioningNodesRequestProto) returns (CheckForDecommissioningNodesResponseProto);
rpc refreshClusterMaxPriority(RefreshClusterMaxPriorityRequestProto) returns (RefreshClusterMaxPriorityResponseProto);
+ rpc updateSchedulerConfiguration(UpdateSchedulerConfigurationRequestProto) returns (UpdateSchedulerConfigurationResponseProto);
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
index e8c92d962f3..ccb93c333c9 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
@@ -130,6 +130,16 @@ enum DecommissionTypeProto {
GRACEFUL = 2;
FORCEFUL = 3;
}
+
+message UpdateSchedulerConfigurationRequestProto {
+ repeated QueueConfigProto addQueueConfig = 1;
+ repeated string removeQueueConfig = 2;
+ repeated QueueConfigProto updateQueueConfig = 3;
+ repeated ConfigProto globalUpdateConfig = 4;
+}
+
+message UpdateSchedulerConfigurationResponseProto {
+}
//////////////////////////////////////////////////////////////////
///////////// RM Failover related records ////////////////////////
//////////////////////////////////////////////////////////////////
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 3b26a5cb3d6..1a8bbde5022 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -307,6 +307,16 @@ enum ExecutionTypeProto {
OPPORTUNISTIC = 2;
}
+message QueueConfigProto {
+ required string queuePath = 1;
+ repeated ConfigProto queueConfigs = 2;
+}
+
+message ConfigProto {
+ required string confKey = 1;
+ optional string confVal = 2;
+}
+
////////////////////////////////////////////////////////////////////////
////// From AM_RM_Protocol /////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index ac5f559d4dc..f199ada5bb3 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -23,6 +23,7 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -47,6 +48,7 @@
import org.apache.hadoop.yarn.api.records.DecommissionType;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.api.records.QueueConfig;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.client.ClientRMProxy;
@@ -73,6 +75,7 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateSchedulerConfigurationRequest;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.resource.Resources;
@@ -96,6 +99,10 @@
private static final String ADD_LABEL_FORMAT_ERR_MSG =
"Input format for adding node-labels is not correct, it should be "
+ "labelName1[(exclusive=true/false)],LabelName2[] ..";
+ private static final String QUEUE_CONF_FORMAT_ERR_MSG =
+ "Input format for queue configs is not correct, it should be "
+ + "queueName=[queuePath1],confKey1=confVal1,confKey2=confVal2;"
+ + "queueName=[queuePath2],confKey3=confVal3";
protected final static Map ADMIN_USAGE =
ImmutableMap.builder()
@@ -160,6 +167,14 @@
.put("-updateNodeResource",
new UsageInfo("[NodeID] [MemSize] [vCores] ([OvercommitTimeout])",
"Update resource on specific node."))
+ .put("-updateSchedulerConfiguration",
+ new UsageInfo("[-add queueName=queuePathToAdd1," +
+ "addConfKey1=addConfVal1,addConfKey2=addConfVal2;" +
+ "queueName=queuePathToAdd2,addConfKey3=addConfVal3]" +
+ " [-remove queuePathToRemove1,queuePathToRemove2]" +
+ " [-update queueName=queuePathToUpdate1,updateConfKey=updateConfVal]" +
+ " [-global globalSchedKey=globalSchedVal]",
+ "Update scheduler configuration."))
.build();
public RMAdminCLI() {
@@ -280,7 +295,13 @@ private static void printHelp(String cmd, boolean isHAEnabled) {
" [-directlyAccessNodeLabelStore]" +
" [-refreshClusterMaxPriority]" +
" [-updateNodeResource [NodeID] [MemSize] [vCores]" +
- " ([OvercommitTimeout])");
+ " ([OvercommitTimeout])" +
+ " [-updateSchedulerConfiguration [-add queueName=queuePathToAdd1," +
+ "addConfKey1=addConfVal1,addConfKey2=addConfVal2;" +
+ "queueName=queuePathToAdd2,addConfKey3=addConfVal3]" +
+ " [-remove queuePathToRemove1,queuePathToRemove2]" +
+ " [-update queueName=queuePathToUpdate1,updateConfKey=updateConfVal]" +
+ " [-global globalSchedKey=globalSchedVal]");
if (isHAEnabled) {
appendHAUsage(summary);
}
@@ -758,6 +779,80 @@ private int replaceLabelsOnNodes(Map> map,
return 0;
}
+ private QueueConfig getQueueConfig(String arg) {
+ if (arg == null) {
+ return null;
+ }
+ String[] queueArgs = arg.split(",");
+ String queueArg = queueArgs[0];
+ String[] queueNameArg = queueArg.split("=");
+ if (queueNameArg.length <= 1 || !"queueName".equals(queueNameArg[0])) {
+ throw new IllegalArgumentException(QUEUE_CONF_FORMAT_ERR_MSG);
+ }
+ String queuePath = queueNameArg[1];
+ Map queueConfigs = new HashMap<>();
+ for (int i = 1; i < queueArgs.length; ++i) {
+ String[] queueConf = queueArgs[i].split("=");
+ if (queueConf.length == 1) {
+ if (queueConf[0].isEmpty()) {
+ throw new IllegalArgumentException(QUEUE_CONF_FORMAT_ERR_MSG);
+ } else {
+ queueConfigs.put(queueConf[0], null);
+ }
+ } else if (queueConf.length > 2) {
+ throw new IllegalArgumentException(QUEUE_CONF_FORMAT_ERR_MSG);
+ } else {
+ queueConfigs.put(queueConf[0], queueConf[1]);
+ }
+ }
+ return QueueConfig.newInstance(queuePath, queueConfigs);
+ }
+
+ private int updateSchedulerConfiguration(String[] args) throws IOException,
+ ParseException, YarnException {
+ Options opts = new Options();
+ opts.addOption("updateSchedulerConfiguration", false, "Update scheduler configuration.");
+ opts.addOption("add", "add-queue", true, "Add queue and its configs.");
+ opts.addOption("remove", "remove-queue", true, "Remove queue.");
+ opts.addOption("update", "update-queue", true, "Update queue's configs.");
+ opts.addOption("global", "global-update", true, "Update global scheduler configs.");
+ CommandLine cliParser = new GnuParser().parse(opts, args);
+ String addQueueList = cliParser.getOptionValue("add-queue");
+ Set addQueueVal = new HashSet<>();
+ if (addQueueList != null) {
+ for (String addQueue : addQueueList.split(";")) {
+ addQueueVal.add(getQueueConfig(addQueue));
+ }
+ }
+ String removeQueueList = cliParser.getOptionValue("remove-queue");
+ Set removeQueueVal = Collections.emptySet();
+ if (removeQueueList != null) {
+ removeQueueVal = new HashSet<>(Arrays.asList(removeQueueList.split(",")));
+ }
+ String updateQueueList = cliParser.getOptionValue("update-queue");
+ Set updateQueueVal = new HashSet<>();
+ if (updateQueueList != null) {
+ for (String updateQueue : updateQueueList.split(";")) {
+ updateQueueVal.add(getQueueConfig(updateQueue));
+ }
+ }
+ String globalUpdateList = cliParser.getOptionValue("global-update");
+ Map globalUpdateVal = new HashMap<>();
+ if (globalUpdateList != null) {
+ for (String globalUpdate : globalUpdateList.split(",")) {
+ String[] globalUpdatePair = globalUpdate.split("=");
+ globalUpdateVal.put(globalUpdatePair[0], globalUpdatePair[1]);
+ }
+ }
+ ResourceManagerAdministrationProtocol adminProtocol =
+ createAdminProtocol();
+ UpdateSchedulerConfigurationRequest request =
+ UpdateSchedulerConfigurationRequest.newInstance(addQueueVal,
+ removeQueueVal, updateQueueVal, globalUpdateVal);
+ adminProtocol.updateSchedulerConfiguration(request);
+ return 0;
+ }
+
@Override
public int run(String[] args) throws Exception {
YarnConfiguration yarnConf =
@@ -837,6 +932,8 @@ public int run(String[] args) throws Exception {
exitCode = handleRemoveFromClusterNodeLabels(args, cmd, isHAEnabled);
} else if ("-replaceLabelsOnNode".equals(cmd)) {
exitCode = handleReplaceLabelsOnNodes(args, cmd, isHAEnabled);
+ } else if ("-updateSchedulerConfiguration".equals(cmd)) {
+ exitCode = updateSchedulerConfiguration(args);
} else {
exitCode = -1;
System.err.println(cmd.substring(1) + ": Unknown command");
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueConfigPBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueConfigPBImpl.java
new file mode 100644
index 00000000000..90d4382686b
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueConfigPBImpl.java
@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.QueueConfig;
+import org.apache.hadoop.yarn.proto.YarnProtos.ConfigProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueConfigProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueConfigProtoOrBuilder;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+public class QueueConfigPBImpl extends QueueConfig {
+ QueueConfigProto proto = QueueConfigProto.getDefaultInstance();
+ QueueConfigProto.Builder builder = null;
+ boolean viaProto = false;
+ private Map queueConfigs = null;
+
+ public QueueConfigPBImpl() {
+ builder = QueueConfigProto.newBuilder();
+ }
+
+ public QueueConfigPBImpl(QueueConfigProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public QueueConfigProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.queueConfigs != null) {
+ addQueueConfigsToProto();
+ }
+ }
+
+ private void addQueueConfigsToProto() {
+ maybeInitBuilder();
+ builder.clearQueueConfigs();
+ if (queueConfigs == null) {
+ return;
+ }
+ Iterable iterable =
+ new Iterable() {
+ @Override
+ public Iterator iterator() {
+ return new Iterator() {
+
+ Iterator> iter = queueConfigs
+ .entrySet().iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ConfigProto next() {
+ Map.Entry now = iter.next();
+ return ConfigProto.newBuilder()
+ .setConfKey(now.getKey())
+ .setConfVal(now.getValue()).build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+ };
+ }
+ };
+ builder.addAllQueueConfigs(iterable);
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = QueueConfigProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null)
+ return false;
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public String getQueuePath() {
+ QueueConfigProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasQueuePath()) {
+ return null;
+ }
+ return (p.getQueuePath());
+ }
+
+ @Override
+ public void setQueuePath(String queuePath) {
+ maybeInitBuilder();
+ if (queuePath == null) {
+ builder.clearQueuePath();
+ }
+ builder.setQueuePath(queuePath);
+ }
+
+ @Override
+ public Map getQueueConfigs() {
+ initQueueConfig();
+ return this.queueConfigs;
+ }
+
+ @Override
+ public void setQueueConfigs(Map queueConfigs) {
+ if (queueConfigs == null) {
+ return;
+ }
+ initQueueConfig();
+ this.queueConfigs.clear();
+ this.queueConfigs.putAll(queueConfigs);
+ }
+
+ private void initQueueConfig() {
+ if (this.queueConfigs != null) {
+ return;
+ }
+ QueueConfigProtoOrBuilder p = viaProto ? proto : builder;
+ List lists = p.getQueueConfigsList();
+ this.queueConfigs = new HashMap<>(lists.size());
+ for (ConfigProto configProto : lists) {
+ this.queueConfigs.put(configProto.getConfKey(),
+ configProto.getConfVal());
+ }
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
index 077edf34900..7daeced51c3 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
@@ -43,6 +43,7 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateSchedulerConfigurationRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
@@ -72,6 +73,8 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateSchedulerConfigurationRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateSchedulerConfigurationResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
@@ -96,6 +99,8 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateSchedulerConfigurationRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateSchedulerConfigurationResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
@@ -323,4 +328,19 @@ public RefreshClusterMaxPriorityResponse refreshClusterMaxPriority(
return null;
}
}
+
+ @Override
+ public UpdateSchedulerConfigurationResponse updateSchedulerConfiguration(
+ UpdateSchedulerConfigurationRequest request) throws YarnException,
+ IOException {
+ UpdateSchedulerConfigurationRequestProto requestProto =
+ ((UpdateSchedulerConfigurationRequestPBImpl) request).getProto();
+ try {
+ return new UpdateSchedulerConfigurationResponsePBImpl(
+ proxy.updateSchedulerConfiguration(null, requestProto));
+ } catch (ServiceException e) {
+ RPCUtil.unwrapAndThrowException(e);
+ return null;
+ }
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
index aafce0875e7..98b5d049740 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
@@ -48,6 +48,8 @@
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateSchedulerConfigurationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateSchedulerConfigurationResponseProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesResponseProto;
import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
@@ -67,6 +69,8 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateSchedulerConfigurationRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateSchedulerConfigurationResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl;
@@ -91,6 +95,8 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateSchedulerConfigurationRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateSchedulerConfigurationResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
@@ -336,4 +342,21 @@ public RefreshClusterMaxPriorityResponseProto refreshClusterMaxPriority(
throw new ServiceException(e);
}
}
+
+ @Override
+ public UpdateSchedulerConfigurationResponseProto updateSchedulerConfiguration(
+ RpcController controller, UpdateSchedulerConfigurationRequestProto proto)
+ throws ServiceException {
+ UpdateSchedulerConfigurationRequest request =
+ new UpdateSchedulerConfigurationRequestPBImpl(proto);
+ try {
+ UpdateSchedulerConfigurationResponse response =
+ real.updateSchedulerConfiguration(request);
+ return ((UpdateSchedulerConfigurationResponsePBImpl) response).getProto();
+ } catch (YarnException e) {
+ throw new ServiceException(e);
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateSchedulerConfigurationRequestPBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateSchedulerConfigurationRequestPBImpl.java
new file mode 100644
index 00000000000..27cc9d01ffb
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateSchedulerConfigurationRequestPBImpl.java
@@ -0,0 +1,293 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.records.QueueConfig;
+import org.apache.hadoop.yarn.api.records.impl.pb.QueueConfigPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ConfigProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.QueueConfigProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateSchedulerConfigurationRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateSchedulerConfigurationRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateSchedulerConfigurationRequest;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class UpdateSchedulerConfigurationRequestPBImpl extends UpdateSchedulerConfigurationRequest {
+
+ UpdateSchedulerConfigurationRequestProto proto = UpdateSchedulerConfigurationRequestProto.getDefaultInstance();
+ UpdateSchedulerConfigurationRequestProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private Set addQueueConfigs;
+ private Set removeQueueConfigs;
+ private Set updateQueueConfigs;
+ private Map globalConfigs;
+
+ public UpdateSchedulerConfigurationRequestPBImpl() {
+ builder = UpdateSchedulerConfigurationRequestProto.newBuilder();
+ }
+
+ public UpdateSchedulerConfigurationRequestPBImpl(UpdateSchedulerConfigurationRequestProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ @Override
+ public Set getAddQueuesInfo() {
+ initAddQueuesInfo();
+ return this.addQueueConfigs;
+ }
+
+ @Override
+ public void setAddQueuesInfo(Set addQueuesInfo) {
+ if (addQueuesInfo == null) {
+ return;
+ }
+ initAddQueuesInfo();
+ this.addQueueConfigs.clear();
+ this.addQueueConfigs.addAll(addQueuesInfo);
+ }
+
+ @Override
+ public Set getRemoveQueuesInfo() {
+ initRemoveQueuesInfo();
+ return this.removeQueueConfigs;
+ }
+
+ @Override
+ public void setRemoveQueuesInfo(Set removeQueuesInfo) {
+ if (removeQueuesInfo == null) {
+ return;
+ }
+ initRemoveQueuesInfo();
+ this.removeQueueConfigs.clear();
+ this.removeQueueConfigs.addAll(removeQueuesInfo);
+ }
+
+ @Override
+ public Set getUpdateQueuesInfo() {
+ initUpdateQueuesInfo();
+ return this.updateQueueConfigs;
+ }
+
+ @Override
+ public void setUpdateQueuesInfo(Set updateQueuesInfo) {
+ if (updateQueuesInfo == null) {
+ return;
+ }
+ initUpdateQueuesInfo();
+ this.updateQueueConfigs.clear();
+ this.updateQueueConfigs.addAll(updateQueuesInfo);
+ }
+
+ @Override
+ public Map getGlobalUpdatesInfo() {
+ initGlobalUpdates();
+ return this.globalConfigs;
+ }
+
+ @Override
+ public void setGlobalUpdatesInfo(Map globalInfo) {
+ if (globalInfo == null) {
+ return;
+ }
+ initGlobalUpdates();
+ this.globalConfigs.clear();
+ this.globalConfigs.putAll(globalInfo);
+ }
+
+ public UpdateSchedulerConfigurationRequestProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private void mergeLocalToBuilder() {
+ if (this.addQueueConfigs != null) {
+ addAddQueueConfigsToProto();
+ }
+ if (this.removeQueueConfigs != null) {
+ addRemoveQueueConfigsToProto();
+ }
+ if (this.updateQueueConfigs != null) {
+ addUpdateQueueConfigsToProto();
+ }
+ if (this.globalConfigs != null) {
+ addGlobalConfigsToProto();
+ }
+ }
+
+ private void addAddQueueConfigsToProto() {
+ maybeInitBuilder();
+ builder.clearAddQueueConfig();
+ if (this.addQueueConfigs == null)
+ return;
+ Set addQueueConfigsProto = new HashSet<>();
+ for (QueueConfig queueConfig : addQueueConfigs) {
+ addQueueConfigsProto.add(convertToProtoFormat(queueConfig));
+ }
+ builder.addAllAddQueueConfig(addQueueConfigsProto);
+ }
+
+ private void addRemoveQueueConfigsToProto() {
+ maybeInitBuilder();
+ builder.clearRemoveQueueConfig();
+ if (this.removeQueueConfigs == null)
+ return;
+ builder.addAllRemoveQueueConfig(removeQueueConfigs);
+ }
+
+ private void addUpdateQueueConfigsToProto() {
+ maybeInitBuilder();
+ builder.clearUpdateQueueConfig();
+ if (this.updateQueueConfigs == null)
+ return;
+ Set updateQueueConfigsProto = new HashSet<>();
+ for (QueueConfig queueConfig : updateQueueConfigs) {
+ updateQueueConfigsProto.add(convertToProtoFormat(queueConfig));
+ }
+ builder.addAllUpdateQueueConfig(updateQueueConfigsProto);
+ }
+
+ private void addGlobalConfigsToProto() {
+ maybeInitBuilder();
+ builder.clearGlobalUpdateConfig();
+ if (globalConfigs == null) {
+ return;
+ }
+ Iterable iterable =
+ new Iterable() {
+ @Override
+ public Iterator iterator() {
+ return new Iterator() {
+
+ Iterator> iter = globalConfigs
+ .entrySet().iterator();
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ConfigProto next() {
+ Map.Entry now = iter.next();
+ return ConfigProto.newBuilder()
+ .setConfKey(now.getKey())
+ .setConfVal(now.getValue()).build();
+ }
+
+ @Override
+ public boolean hasNext() {
+ return iter.hasNext();
+ }
+ };
+ }
+ };
+ builder.addAllGlobalUpdateConfig(iterable);
+ }
+
+ private void initAddQueuesInfo() {
+ if (this.addQueueConfigs != null) {
+ return;
+ }
+ UpdateSchedulerConfigurationRequestProtoOrBuilder p = viaProto ? proto : builder;
+ List list = p.getAddQueueConfigList();
+ this.addQueueConfigs = new HashSet<>(list.size());
+ for (QueueConfigProto addQueueProto : list) {
+ this.addQueueConfigs.add(convertFromProtoFormat(addQueueProto));
+ }
+ }
+
+ private void initRemoveQueuesInfo() {
+ if (this.removeQueueConfigs != null) {
+ return;
+ }
+ UpdateSchedulerConfigurationRequestProtoOrBuilder p = viaProto ? proto : builder;
+ List list = p.getRemoveQueueConfigList();
+ this.removeQueueConfigs = new HashSet<>(list);
+ }
+
+ private void initUpdateQueuesInfo() {
+ if (this.updateQueueConfigs != null) {
+ return;
+ }
+ UpdateSchedulerConfigurationRequestProtoOrBuilder p = viaProto ? proto : builder;
+ List list = p.getUpdateQueueConfigList();
+ this.updateQueueConfigs = new HashSet<>(list.size());
+ for (QueueConfigProto updateQueueProto : list) {
+ this.updateQueueConfigs.add(convertFromProtoFormat(updateQueueProto));
+ }
+ }
+
+ private void initGlobalUpdates() {
+ if (this.globalConfigs != null) {
+ return;
+ }
+ UpdateSchedulerConfigurationRequestProtoOrBuilder p = viaProto ? proto : builder;
+ List list = p.getGlobalUpdateConfigList();
+ this.globalConfigs = new HashMap<>(list.size());
+ for (ConfigProto configProto : list) {
+ this.globalConfigs.put(configProto.getConfKey(), configProto.getConfVal());
+ }
+ }
+
+ private void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = UpdateSchedulerConfigurationRequestProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ private QueueConfigProto convertToProtoFormat(QueueConfig queueConfig) {
+ return ((QueueConfigPBImpl)queueConfig).getProto();
+ }
+
+ private QueueConfig convertFromProtoFormat(QueueConfigProto proto) {
+ return new QueueConfigPBImpl(proto);
+ }
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null)
+ return false;
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateSchedulerConfigurationResponsePBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateSchedulerConfigurationResponsePBImpl.java
new file mode 100644
index 00000000000..69c62259265
--- /dev/null
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/UpdateSchedulerConfigurationResponsePBImpl.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+import com.google.protobuf.TextFormat;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateSchedulerConfigurationResponseProto;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateSchedulerConfigurationResponse;
+
+public class UpdateSchedulerConfigurationResponsePBImpl extends UpdateSchedulerConfigurationResponse {
+
+ UpdateSchedulerConfigurationResponseProto proto = UpdateSchedulerConfigurationResponseProto.getDefaultInstance();
+ UpdateSchedulerConfigurationResponseProto.Builder builder = null;
+ boolean viaProto = false;
+
+ public UpdateSchedulerConfigurationResponsePBImpl() {
+ builder = UpdateSchedulerConfigurationResponseProto.newBuilder();
+ }
+
+ public UpdateSchedulerConfigurationResponsePBImpl(UpdateSchedulerConfigurationResponseProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public UpdateSchedulerConfigurationResponseProto getProto() {
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ @Override
+ public int hashCode() {
+ return getProto().hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == null)
+ return false;
+ if (other.getClass().isAssignableFrom(this.getClass())) {
+ return this.getProto().equals(this.getClass().cast(other).getProto());
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return TextFormat.shortDebugString(getProto());
+ }
+}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index bcf7309f883..1998fa22b04 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -23,6 +23,7 @@
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -50,6 +51,7 @@
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.QueueConfig;
import org.apache.hadoop.yarn.api.records.ResourceOption;
import org.apache.hadoop.yarn.conf.HAUtil;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -87,12 +89,17 @@
import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateSchedulerConfigurationRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateSchedulerConfigurationResponse;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NodeLabelsUtils;
import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
import org.apache.hadoop.yarn.server.resourcemanager.resource.DynamicResourceConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.QueueConfigInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedConfUpdateInfo;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
@@ -914,4 +921,51 @@ private void refreshClusterMaxPriority() throws IOException, YarnException {
rmContext.getScheduler().setClusterMaxPriority(conf);
}
+
+ @Override
+ public UpdateSchedulerConfigurationResponse updateSchedulerConfiguration(
+ UpdateSchedulerConfigurationRequest request) throws YarnException, IOException {
+ final String operation = "updateSchedulerConfiguration";
+ final String msg = "update scheduler configuration";
+ UserGroupInformation user = UserGroupInformation.getCurrentUser();
+ try {
+ // Don't check ACLs here. Leave it to the scheduler's configuration provider.
+ updateSchedulerConfiguration(request.getAddQueuesInfo(),
+ request.getRemoveQueuesInfo(), request.getUpdateQueuesInfo(),
+ request.getGlobalUpdatesInfo());
+ RMAuditLogger.logSuccess(user.getShortUserName(), operation, "AdminService");
+ return recordFactory.newRecordInstance(UpdateSchedulerConfigurationResponse.class);
+ } catch (IOException e) {
+ throw logAndWrapException(e, user.getShortUserName(), operation, msg);
+ }
+ }
+
+ private void updateSchedulerConfiguration(
+ Set addQueuesInfo, Set removeQueuesInfo,
+ Set updateQueuesInfo, Map globalUpdatesInfo)
+ throws YarnException, IOException {
+ if (!(rmContext.getScheduler() instanceof MutableConfScheduler)) {
+ throw new UnsupportedOperationException("Configuration change only " +
+ "supported by MutableConfScheduler.");
+ }
+ ArrayList addInfo = new ArrayList<>();
+ for (QueueConfig addConf : addQueuesInfo) {
+ addInfo.add(new QueueConfigInfo(addConf.getQueuePath(),
+ addConf.getQueueConfigs()));
+ }
+ ArrayList removeInfo = new ArrayList<>(removeQueuesInfo);
+ ArrayList updateInfo = new ArrayList<>();
+ for (QueueConfig updateConf : updateQueuesInfo) {
+ updateInfo.add(new QueueConfigInfo(updateConf.getQueuePath(),
+ updateConf.getQueueConfigs()));
+ }
+ HashMap globalInfo = new HashMap<>(globalUpdatesInfo);
+ SchedConfUpdateInfo schedConfInfo = new SchedConfUpdateInfo();
+ schedConfInfo.setAddQueueInfo(addInfo);
+ schedConfInfo.setRemoveQueueInfo(removeInfo);
+ schedConfInfo.setUpdateQueueInfo(updateInfo);
+ schedConfInfo.setGlobalParams(globalInfo);
+ ((MutableConfScheduler) rmContext.getScheduler()).updateConfiguration(
+ UserGroupInformation.getCurrentUser(), schedConfInfo);
+ }
}
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
index b7c585e733c..9237578f731 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedConfUpdateInfo.java
@@ -54,16 +54,32 @@ public SchedConfUpdateInfo() {
return addQueueInfo;
}
+ public void setAddQueueInfo(ArrayList addQueueInfo) {
+ this.addQueueInfo = addQueueInfo;
+ }
+
public ArrayList getRemoveQueueInfo() {
return removeQueueInfo;
}
+ public void setRemoveQueueInfo(ArrayList removeQueueInfo) {
+ this.removeQueueInfo = removeQueueInfo;
+ }
+
public ArrayList getUpdateQueueInfo() {
return updateQueueInfo;
}
+ public void setUpdateQueueInfo(ArrayList updateQueueInfo) {
+ this.updateQueueInfo = updateQueueInfo;
+ }
+
@XmlElementWrapper(name = "global-updates")
public HashMap getGlobalParams() {
return global;
}
+
+ public void setGlobalParams(HashMap global) {
+ this.global = global;
+ }
}