diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java index ff1ca48..741046c 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java @@ -43,7 +43,10 @@ LOST, /** Node has rebooted */ - REBOOTED; + REBOOTED, + + /** Node decommission is in progress */ + DECOMMISSIONING; public boolean isUnusable() { return (this == UNHEALTHY || this == DECOMMISSIONED || this == LOST); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java index 8777e00..a0f301d 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java @@ -32,8 +32,14 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; @@ -142,4 +148,25 @@ public ReplaceLabelsOnNodeResponse replaceLabelsOnNode( @Idempotent public UpdateNodeLabelsResponse updateNodeLabels( UpdateNodeLabelsRequest request) throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public RefreshNodesGracefullyResponse refreshNodesGracefully( + RefreshNodesGracefullyRequest refreshNodesGracefullyRequest) + throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public CheckForDecommissioningNodesResponse checkForDecommissioningNodes( + CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest) + throws YarnException, IOException; + + @Public + @Evolving + @Idempotent + public RefreshNodesForcefullyResponse refreshNodesForcefully( + RefreshNodesForcefullyRequest refreshNodesForcefullyRequest) + throws YarnException, IOException; } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/CheckForDecommissioningNodesRequest.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/CheckForDecommissioningNodesRequest.java new file mode 100644 index 0000000..3fb9fa6 --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/CheckForDecommissioningNodesRequest.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +@Private +@Unstable +public abstract class CheckForDecommissioningNodesRequest { + @Public + @Unstable + public static CheckForDecommissioningNodesRequest newInstance() { + CheckForDecommissioningNodesRequest request = Records + .newRecord(CheckForDecommissioningNodesRequest.class); + return request; + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/CheckForDecommissioningNodesResponse.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/CheckForDecommissioningNodesResponse.java new file mode 100644 index 0000000..d44da33 --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/CheckForDecommissioningNodesResponse.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +@Private +@Unstable +public abstract class CheckForDecommissioningNodesResponse { + @Private + @Unstable + public static CheckForDecommissioningNodesResponse newInstance( + boolean nodesDecommissioning) { + CheckForDecommissioningNodesResponse response = Records + .newRecord(CheckForDecommissioningNodesResponse.class); + response.setNodesDecommissioning(nodesDecommissioning); + return response; + } + + public abstract void setNodesDecommissioning(boolean nodesDecommissioning); + + public abstract boolean getNodesDecommissioning(); +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesForcefullyRequest.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesForcefullyRequest.java new file mode 100644 index 0000000..78cdcdc --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesForcefullyRequest.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +@Private +@Unstable +public abstract class RefreshNodesForcefullyRequest { + @Public + @Unstable + public static RefreshNodesForcefullyRequest newInstance() { + RefreshNodesForcefullyRequest request = Records + .newRecord(RefreshNodesForcefullyRequest.class); + return request; + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesForcefullyResponse.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesForcefullyResponse.java new file mode 100644 index 0000000..eadac12 --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesForcefullyResponse.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +@Private +@Unstable +public abstract class RefreshNodesForcefullyResponse { + @Private + @Unstable + public static RefreshNodesForcefullyResponse newInstance() { + RefreshNodesForcefullyResponse response = Records + .newRecord(RefreshNodesForcefullyResponse.class); + return response; + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesGracefullyRequest.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesGracefullyRequest.java new file mode 100644 index 0000000..2a17479 --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesGracefullyRequest.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +@Private +@Unstable +public abstract class RefreshNodesGracefullyRequest { + @Public + @Unstable + public static RefreshNodesGracefullyRequest newInstance() { + RefreshNodesGracefullyRequest request = Records + .newRecord(RefreshNodesGracefullyRequest.class); + return request; + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesGracefullyResponse.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesGracefullyResponse.java new file mode 100644 index 0000000..cc1769c --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesGracefullyResponse.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +@Private +@Unstable +public abstract class RefreshNodesGracefullyResponse { + @Private + @Unstable + public static RefreshNodesGracefullyResponse newInstance() { + RefreshNodesGracefullyResponse response = Records + .newRecord(RefreshNodesGracefullyResponse.class); + return response; + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto index 6646718..39589e0 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto @@ -43,4 +43,7 @@ service ResourceManagerAdministrationProtocolService { rpc removeFromClusterNodeLabels(RemoveFromClusterNodeLabelsRequestProto) returns (RemoveFromClusterNodeLabelsResponseProto); rpc replaceLabelsOnNodes(ReplaceLabelsOnNodeRequestProto) returns (ReplaceLabelsOnNodeResponseProto); rpc updateNodeLabels(UpdateNodeLabelsRequestProto) returns (UpdateNodeLabelsResponseProto); + rpc refreshNodesGracefully(RefreshNodesGracefullyRequestProto) returns (RefreshNodesGracefullyResponseProto); + rpc checkForDecommissioningNodes(CheckForDecommissioningNodesRequestProto) returns (CheckForDecommissioningNodesResponseProto); + rpc refreshNodesForcefully(RefreshNodesForcefullyRequestProto) returns (RefreshNodesForcefullyResponseProto); } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto index 0d5b5c7..fb9e39d 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto @@ -105,6 +105,22 @@ message UpdateNodeLabelsRequestProto { message UpdateNodeLabelsResponseProto { } +message RefreshNodesGracefullyRequestProto { +} +message RefreshNodesGracefullyResponseProto { +} + +message CheckForDecommissioningNodesRequestProto { +} +message CheckForDecommissioningNodesResponseProto { + required bool nodesDecommissioning = 1; +} + +message RefreshNodesForcefullyRequestProto { +} +message RefreshNodesForcefullyResponseProto { +} + ////////////////////////////////////////////////////////////////// ///////////// RM Failover related records //////////////////////// ////////////////////////////////////////////////////////////////// diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 194be82..872cf02 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -214,6 +214,7 @@ enum NodeStateProto { NS_DECOMMISSIONED = 4; NS_LOST = 5; NS_REBOOTED = 6; + NS_DECOMMISSIONING = 7; } message NodeIdProto { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java index 4642add..7b5b909 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java @@ -50,7 +50,11 @@ import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; @@ -75,6 +79,8 @@ "No cluster node-labels are specified"; private static final String NO_MAPPING_ERR_MSG = "No node-to-labels mappings are specified"; + private static final String INVALID_TIMEOUT_ERR_MSG = + "Invalid timeout specified : "; protected final static Map ADMIN_USAGE = ImmutableMap.builder() @@ -82,8 +88,11 @@ "Reload the queues' acls, states and scheduler specific " + "properties. \n\t\tResourceManager will reload the " + "mapred-queues configuration file.")) - .put("-refreshNodes", new UsageInfo("", - "Refresh the hosts information at the ResourceManager.")) + .put("-refreshNodes", new UsageInfo("[-g [timeout in seconds]]", + "Refresh the hosts information at the ResourceManager. Here " + + "[-g [timeout in seconds] is optional, if we specify the " + + "timeout then ResourceManager will wait for timeout before " + + "marking the NodeManager as decommissioned.")) .put("-refreshSuperUserGroupsConfiguration", new UsageInfo("", "Refresh superuser proxy groups mappings")) .put("-refreshUserToGroupsMappings", new UsageInfo("", @@ -201,7 +210,7 @@ private static void printHelp(String cmd, boolean isHAEnabled) { summary.append("The full syntax is: \n\n" + "yarn rmadmin" + " [-refreshQueues]" + - " [-refreshNodes]" + + " [-refreshNodes [-g [timeout in seconds]]]" + " [-refreshSuperUserGroupsConfiguration]" + " [-refreshUserToGroupsMappings]" + " [-refreshAdminAcls]" + @@ -279,7 +288,42 @@ private int refreshNodes() throws IOException, YarnException { adminProtocol.refreshNodes(request); return 0; } - + + private int refreshNodes(long timeout) throws IOException, YarnException { + // Graceful decommissioning with timeout + ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); + RefreshNodesGracefullyRequest refreshNodesGracefullyRequest = recordFactory + .newRecordInstance(RefreshNodesGracefullyRequest.class); + adminProtocol.refreshNodesGracefully(refreshNodesGracefullyRequest); + CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest = recordFactory + .newRecordInstance(CheckForDecommissioningNodesRequest.class); + boolean nodesDecommissioning = true; + long waitingTime; + for (waitingTime = 0; waitingTime < timeout && nodesDecommissioning; waitingTime++) { + // wait for one second to check nodes decommissioning status + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + // Ignore the InterruptedException + } + CheckForDecommissioningNodesResponse checkForDecommissioningNodes = adminProtocol + .checkForDecommissioningNodes(checkForDecommissioningNodesRequest); + nodesDecommissioning = checkForDecommissioningNodes + .getNodesDecommissioning(); + } + if (nodesDecommissioning) { + System.out.println("Graceful decommissioning not completed in " + timeout + + " seconds, issueing forceful decommissioning command."); + RefreshNodesForcefullyRequest refreshNodesForcefullyRequest = recordFactory + .newRecordInstance(RefreshNodesForcefullyRequest.class); + adminProtocol.refreshNodesForcefully(refreshNodesForcefullyRequest); + } else { + System.out.println("Graceful decommissioning completed in " + waitingTime + + " seconds."); + } + return 0; + } + private int refreshUserToGroupsMappings() throws IOException, YarnException { // Refresh the user-to-groups mappings @@ -517,7 +561,7 @@ public int run(String[] args) throws Exception { // verify that we have enough command line parameters // if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) || - "-refreshNodes".equals(cmd) || "-refreshServiceAcl".equals(cmd) || + "-refreshServiceAcl".equals(cmd) || "-refreshUserToGroupsMappings".equals(cmd) || "-refreshSuperUserGroupsConfiguration".equals(cmd)) { if (args.length != 1) { @@ -530,7 +574,21 @@ public int run(String[] args) throws Exception { if ("-refreshQueues".equals(cmd)) { exitCode = refreshQueues(); } else if ("-refreshNodes".equals(cmd)) { - exitCode = refreshNodes(); + if (args.length == 1) { + exitCode = refreshNodes(); + } else if (args.length == 3) { + // if the graceful timeout specified + if ("-g".equals(args[1])) { + long timeout = validateTimeout(args[2]); + exitCode = refreshNodes(timeout); + } else { + printUsage(cmd, isHAEnabled); + return -1; + } + } else { + printUsage(cmd, isHAEnabled); + return -1; + } } else if ("-refreshUserToGroupsMappings".equals(cmd)) { exitCode = refreshUserToGroupsMappings(); } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { @@ -598,6 +656,19 @@ public int run(String[] args) throws Exception { return exitCode; } + private long validateTimeout(String strTimeout) { + long timeout; + try { + timeout = Long.parseLong(strTimeout); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException(INVALID_TIMEOUT_ERR_MSG + strTimeout); + } + if (timeout < 0) { + throw new IllegalArgumentException(INVALID_TIMEOUT_ERR_MSG + timeout); + } + return timeout; + } + @Override public void setConf(Configuration conf) { if (conf != null) { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java index c22494c..647be68 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java @@ -46,12 +46,17 @@ import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; +import org.apache.hadoop.yarn.util.Records; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentMatcher; @@ -176,6 +181,41 @@ public void testRefreshNodes() throws Exception { verify(admin).refreshNodes(any(RefreshNodesRequest.class)); } + @Test + public void testRefreshNodesWithGracefulTimeout() throws Exception { + //graceful decommission before timeout + String[] args = { "-refreshNodes", "-g", "1" }; + CheckForDecommissioningNodesResponse response = + Records.newRecord(CheckForDecommissioningNodesResponse.class); + response.setNodesDecommissioning(false); + when(admin.checkForDecommissioningNodes(any( + CheckForDecommissioningNodesRequest.class))).thenReturn(response); + assertEquals(0, rmAdminCLI.run(args)); + verify(admin).refreshNodesGracefully( + any(RefreshNodesGracefullyRequest.class)); + + //Forceful decommission when timeout occurs + String[] focefulDecomArgs = { "-refreshNodes", "-g", "1" }; + response.setNodesDecommissioning(true); + when(admin.checkForDecommissioningNodes(any( + CheckForDecommissioningNodesRequest.class))).thenReturn(response); + assertEquals(0, rmAdminCLI.run(focefulDecomArgs)); + verify(admin).refreshNodesForcefully( + any(RefreshNodesForcefullyRequest.class)); + + //invalid graceful timeout parameter + String[] invalidArgs = { "-refreshNodes", "-ginvalid", "invalid" }; + assertEquals(-1, rmAdminCLI.run(invalidArgs)); + + //invalid timeout + String[] invalidTimeoutArgs = { "-refreshNodes", "-g", "invalid" }; + assertEquals(-1, rmAdminCLI.run(invalidTimeoutArgs)); + + // negative timeout + String[] negativeTimeoutArgs = { "-refreshNodes", "-g", "-1000" }; + assertEquals(-1, rmAdminCLI.run(negativeTimeoutArgs)); + } + @Test(timeout=500) public void testGetGroups() throws Exception { when(admin.getGroupsForUser(eq("admin"))).thenReturn( @@ -284,7 +324,7 @@ public void testHelp() throws Exception { assertTrue(dataOut .toString() .contains( - "yarn rmadmin [-refreshQueues] [-refreshNodes] [-refreshSuper" + + "yarn rmadmin [-refreshQueues] [-refreshNodes [-g [timeout in seconds]]] [-refreshSuper" + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" + " [username]] [[-addToClusterNodeLabels [label1,label2,label3]]" + @@ -299,7 +339,7 @@ public void testHelp() throws Exception { assertTrue(dataOut .toString() .contains( - "-refreshNodes: Refresh the hosts information at the " + + "-refreshNodes [-g [timeout in seconds]]: Refresh the hosts information at the " + "ResourceManager.")); assertTrue(dataOut.toString().contains( "-refreshUserToGroupsMappings: Refresh user-to-groups mappings")); @@ -327,7 +367,7 @@ public void testHelp() throws Exception { testError(new String[] { "-help", "-refreshQueues" }, "Usage: yarn rmadmin [-refreshQueues]", dataErr, 0); testError(new String[] { "-help", "-refreshNodes" }, - "Usage: yarn rmadmin [-refreshNodes]", dataErr, 0); + "Usage: yarn rmadmin [-refreshNodes [-g [timeout in seconds]]]", dataErr, 0); testError(new String[] { "-help", "-refreshUserToGroupsMappings" }, "Usage: yarn rmadmin [-refreshUserToGroupsMappings]", dataErr, 0); testError( @@ -364,7 +404,7 @@ public void testHelp() throws Exception { assertEquals(0, rmAdminCLIWithHAEnabled.run(args)); oldOutPrintStream.println(dataOut); String expectedHelpMsg = - "yarn rmadmin [-refreshQueues] [-refreshNodes] [-refreshSuper" + + "yarn rmadmin [-refreshQueues] [-refreshNodes [-g [timeout in seconds]]] [-refreshSuper" + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" + " [username]] [[-addToClusterNodeLabels [label1,label2,label3]]" + diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java index 8cb225f..7cd11a8 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java @@ -30,9 +30,12 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesForcefullyRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesGracefullyRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto; @@ -46,8 +49,14 @@ import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; @@ -68,8 +77,14 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesForcefullyRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesForcefullyResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesGracefullyRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesGracefullyResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl; @@ -282,4 +297,52 @@ public UpdateNodeLabelsResponse updateNodeLabels( return null; } } -} + + @Override + public RefreshNodesGracefullyResponse refreshNodesGracefully( + RefreshNodesGracefullyRequest refreshNodesGracefullyRequest) + throws YarnException, IOException { + RefreshNodesGracefullyRequestProto requestProto = + ((RefreshNodesGracefullyRequestPBImpl) refreshNodesGracefullyRequest) + .getProto(); + try { + return new RefreshNodesGracefullyResponsePBImpl( + proxy.refreshNodesGracefully(null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public CheckForDecommissioningNodesResponse checkForDecommissioningNodes( + CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest) + throws YarnException, IOException { + CheckForDecommissioningNodesRequestProto requestProto = + ((CheckForDecommissioningNodesRequestPBImpl) checkForDecommissioningNodesRequest) + .getProto(); + try { + return new CheckForDecommissioningNodesResponsePBImpl( + proxy.checkForDecommissioningNodes(null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } + + @Override + public RefreshNodesForcefullyResponse refreshNodesForcefully( + RefreshNodesForcefullyRequest refreshNodesForcefullyRequest) + throws YarnException, IOException { + RefreshNodesForcefullyRequestProto requestProto = + ((RefreshNodesForcefullyRequestPBImpl) refreshNodesForcefullyRequest) + .getProto(); + try { + return new RefreshNodesForcefullyResponsePBImpl( + proxy.refreshNodesForcefully(null, requestProto)); + } catch (ServiceException e) { + RPCUtil.unwrapAndThrowException(e); + return null; + } + } +} \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java index fe4c812..a4583c6 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java @@ -24,10 +24,16 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.GetGroupsForUserResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesForcefullyRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesForcefullyResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesGracefullyRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesGracefullyResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto; @@ -49,7 +55,13 @@ import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; @@ -62,8 +74,14 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesForcefullyRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesForcefullyResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesGracefullyRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesGracefullyResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl; @@ -291,4 +309,55 @@ public UpdateNodeLabelsResponseProto updateNodeLabels( throw new ServiceException(e); } } + + @Override + public RefreshNodesGracefullyResponseProto refreshNodesGracefully( + RpcController controller, RefreshNodesGracefullyRequestProto proto) + throws ServiceException { + RefreshNodesGracefullyRequest request = + new RefreshNodesGracefullyRequestPBImpl(proto); + try { + RefreshNodesGracefullyResponse response = real + .refreshNodesGracefully(request); + return ((RefreshNodesGracefullyResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public CheckForDecommissioningNodesResponseProto checkForDecommissioningNodes( + RpcController controller, CheckForDecommissioningNodesRequestProto proto) + throws ServiceException { + CheckForDecommissioningNodesRequest request = + new CheckForDecommissioningNodesRequestPBImpl(proto); + try { + CheckForDecommissioningNodesResponse response = real + .checkForDecommissioningNodes(request); + return ((CheckForDecommissioningNodesResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public RefreshNodesForcefullyResponseProto refreshNodesForcefully( + RpcController controller, RefreshNodesForcefullyRequestProto proto) + throws ServiceException { + RefreshNodesForcefullyRequest request = new RefreshNodesForcefullyRequestPBImpl( + proto); + try { + RefreshNodesForcefullyResponse response = real + .refreshNodesForcefully(request); + return ((RefreshNodesForcefullyResponsePBImpl) response).getProto(); + } catch (YarnException e) { + throw new ServiceException(e); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/CheckForDecommissioningNodesRequestPBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/CheckForDecommissioningNodesRequestPBImpl.java new file mode 100644 index 0000000..b7ad954 --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/CheckForDecommissioningNodesRequestPBImpl.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class CheckForDecommissioningNodesRequestPBImpl extends + CheckForDecommissioningNodesRequest { + + CheckForDecommissioningNodesRequestProto proto = CheckForDecommissioningNodesRequestProto + .getDefaultInstance(); + CheckForDecommissioningNodesRequestProto.Builder builder = null; + boolean viaProto = false; + + public CheckForDecommissioningNodesRequestPBImpl() { + builder = CheckForDecommissioningNodesRequestProto.newBuilder(); + } + + public CheckForDecommissioningNodesRequestPBImpl( + CheckForDecommissioningNodesRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public CheckForDecommissioningNodesRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/CheckForDecommissioningNodesResponsePBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/CheckForDecommissioningNodesResponsePBImpl.java new file mode 100644 index 0000000..97d0f24 --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/CheckForDecommissioningNodesResponsePBImpl.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProtoOrBuilder; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class CheckForDecommissioningNodesResponsePBImpl extends + CheckForDecommissioningNodesResponse { + + CheckForDecommissioningNodesResponseProto proto = CheckForDecommissioningNodesResponseProto + .getDefaultInstance(); + CheckForDecommissioningNodesResponseProto.Builder builder = null; + boolean viaProto = false; + + private boolean nodesDecommissioning; + + public CheckForDecommissioningNodesResponsePBImpl() { + builder = CheckForDecommissioningNodesResponseProto.newBuilder(); + } + + public CheckForDecommissioningNodesResponsePBImpl( + CheckForDecommissioningNodesResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public CheckForDecommissioningNodesResponseProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = CheckForDecommissioningNodesResponseProto.newBuilder(proto); + } + viaProto = false; + } + + private void mergeLocalToBuilder() { + builder.setNodesDecommissioning(this.nodesDecommissioning); + } + + @Override + public void setNodesDecommissioning(boolean nodesDecommissioning) { + maybeInitBuilder(); + this.nodesDecommissioning = nodesDecommissioning; + mergeLocalToBuilder(); + } + + @Override + public boolean getNodesDecommissioning() { + CheckForDecommissioningNodesResponseProtoOrBuilder p = viaProto ? proto + : builder; + return p.getNodesDecommissioning(); + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesForcefullyRequestPBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesForcefullyRequestPBImpl.java new file mode 100644 index 0000000..8835535 --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesForcefullyRequestPBImpl.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesForcefullyRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyRequest; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class RefreshNodesForcefullyRequestPBImpl extends + RefreshNodesForcefullyRequest { + + RefreshNodesForcefullyRequestProto proto = RefreshNodesForcefullyRequestProto + .getDefaultInstance(); + RefreshNodesForcefullyRequestProto.Builder builder = null; + boolean viaProto = false; + + public RefreshNodesForcefullyRequestPBImpl() { + builder = RefreshNodesForcefullyRequestProto.newBuilder(); + } + + public RefreshNodesForcefullyRequestPBImpl( + RefreshNodesForcefullyRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public RefreshNodesForcefullyRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesForcefullyResponsePBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesForcefullyResponsePBImpl.java new file mode 100644 index 0000000..1cf28bf --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesForcefullyResponsePBImpl.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesForcefullyResponseProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyResponse; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class RefreshNodesForcefullyResponsePBImpl extends + RefreshNodesForcefullyResponse { + + RefreshNodesForcefullyResponseProto proto = RefreshNodesForcefullyResponseProto + .getDefaultInstance(); + RefreshNodesForcefullyResponseProto.Builder builder = null; + boolean viaProto = false; + + public RefreshNodesForcefullyResponsePBImpl() { + builder = RefreshNodesForcefullyResponseProto.newBuilder(); + } + + public RefreshNodesForcefullyResponsePBImpl( + RefreshNodesForcefullyResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public RefreshNodesForcefullyResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesGracefullyRequestPBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesGracefullyRequestPBImpl.java new file mode 100644 index 0000000..40ca38a --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesGracefullyRequestPBImpl.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesGracefullyRequestProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyRequest; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class RefreshNodesGracefullyRequestPBImpl extends + RefreshNodesGracefullyRequest { + + RefreshNodesGracefullyRequestProto proto = RefreshNodesGracefullyRequestProto + .getDefaultInstance(); + RefreshNodesGracefullyRequestProto.Builder builder = null; + boolean viaProto = false; + + public RefreshNodesGracefullyRequestPBImpl() { + builder = RefreshNodesGracefullyRequestProto.newBuilder(); + } + + public RefreshNodesGracefullyRequestPBImpl( + RefreshNodesGracefullyRequestProto proto) { + this.proto = proto; + viaProto = true; + } + + public RefreshNodesGracefullyRequestProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesGracefullyResponsePBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesGracefullyResponsePBImpl.java new file mode 100644 index 0000000..b3d2bc0 --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesGracefullyResponsePBImpl.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesGracefullyResponseProto; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyResponse; + +import com.google.protobuf.TextFormat; + +@Private +@Unstable +public class RefreshNodesGracefullyResponsePBImpl extends + RefreshNodesGracefullyResponse { + + RefreshNodesGracefullyResponseProto proto = RefreshNodesGracefullyResponseProto + .getDefaultInstance(); + RefreshNodesGracefullyResponseProto.Builder builder = null; + boolean viaProto = false; + + public RefreshNodesGracefullyResponsePBImpl() { + builder = RefreshNodesGracefullyResponseProto.newBuilder(); + } + + public RefreshNodesGracefullyResponsePBImpl( + RefreshNodesGracefullyResponseProto proto) { + this.proto = proto; + viaProto = true; + } + + public RefreshNodesGracefullyResponseProto getProto() { + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + @Override + public int hashCode() { + return getProto().hashCode(); + } + + @Override + public boolean equals(Object other) { + if (other == null) + return false; + if (other.getClass().isAssignableFrom(this.getClass())) { + return this.getProto().equals(this.getClass().cast(other).getProto()); + } + return false; + } + + @Override + public String toString() { + return TextFormat.shortDebugString(getProto()); + } +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java index 80299c0..d699d02 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java @@ -214,8 +214,14 @@ import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.AddToClusterNodeLabelsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesForcefullyRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesForcefullyResponseProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesGracefullyRequestProto; +import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesGracefullyResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto; @@ -290,8 +296,14 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommissioningNodesResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesForcefullyRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesForcefullyResponsePBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesGracefullyRequestPBImpl; +import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesGracefullyResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl; @@ -1299,4 +1311,40 @@ public void testUpdateNodeLabelsResponsePBImpl() throws Exception { validatePBImplRecord(UpdateNodeLabelsResponsePBImpl.class, UpdateNodeLabelsResponseProto.class); } -} + + @Test + public void testRefreshNodesGracefullyRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshNodesGracefullyRequestPBImpl.class, + RefreshNodesGracefullyRequestProto.class); + } + + @Test + public void testRefreshNodesGracefullyResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshNodesGracefullyResponsePBImpl.class, + RefreshNodesGracefullyResponseProto.class); + } + + @Test + public void testCheckForDecommissioningNodesRequestPBImpl() throws Exception { + validatePBImplRecord(CheckForDecommissioningNodesRequestPBImpl.class, + CheckForDecommissioningNodesRequestProto.class); + } + + @Test + public void testCheckForDecommissioningNodesResponsePBImpl() throws Exception { + validatePBImplRecord(CheckForDecommissioningNodesResponsePBImpl.class, + CheckForDecommissioningNodesResponseProto.class); + } + + @Test + public void testRefreshNodesForcefullyRequestPBImpl() throws Exception { + validatePBImplRecord(RefreshNodesForcefullyRequestPBImpl.class, + RefreshNodesForcefullyRequestProto.class); + } + + @Test + public void testRefreshNodesForcefullyResponsePBImpl() throws Exception { + validatePBImplRecord(RefreshNodesForcefullyResponsePBImpl.class, + RefreshNodesForcefullyResponseProto.class); + } +} \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java index 22b92c2..306ee34 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java @@ -61,8 +61,14 @@ import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesForcefullyResponse; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyRequest; +import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesGracefullyResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; @@ -704,4 +710,63 @@ private YarnException logAndWrapException(Exception exception, String user, "AdminService", "Exception " + msg); return RPCUtil.getRemoteException(exception); } + + @Override + public RefreshNodesGracefullyResponse refreshNodesGracefully( + RefreshNodesGracefullyRequest refreshNodesGracefullyRequest) + throws YarnException, IOException { + String argName = "refreshNodesGracefully"; + final String msg = "refresh nodes gracefully."; + UserGroupInformation user = checkAcls("refreshNodesGracefully"); + + checkRMStatus(user.getShortUserName(), argName, msg); + + try { + Configuration conf = getConfiguration(new Configuration(false), + YarnConfiguration.YARN_SITE_CONFIGURATION_FILE); + rmContext.getNodesListManager().refreshNodesGracefully(conf); + RMAuditLogger + .logSuccess(user.getShortUserName(), argName, "AdminService"); + return recordFactory + .newRecordInstance(RefreshNodesGracefullyResponse.class); + } catch (IOException ioe) { + throw logAndWrapException(ioe, user.getShortUserName(), argName, msg); + } + } + + @Override + public CheckForDecommissioningNodesResponse checkForDecommissioningNodes( + CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest) + throws IOException, YarnException { + String argName = "checkForDecommissioningNodes"; + final String msg = "check for decommissioning nodes."; + UserGroupInformation user = checkAcls("checkForDecommissioningNodes"); + + checkRMStatus(user.getShortUserName(), argName, msg); + + boolean nodesDecommissioning = rmContext.getNodesListManager() + .checkForDecommissioningNodes(); + RMAuditLogger.logSuccess(user.getShortUserName(), argName, "AdminService"); + CheckForDecommissioningNodesResponse response = recordFactory + .newRecordInstance(CheckForDecommissioningNodesResponse.class); + response.setNodesDecommissioning(nodesDecommissioning); + return response; + } + + @Override + public RefreshNodesForcefullyResponse refreshNodesForcefully( + RefreshNodesForcefullyRequest refreshNodesForcefullyRequest) + throws YarnException, IOException { + String argName = "refreshNodesForcefully"; + final String msg = "refresh nodes forcefully."; + UserGroupInformation user = checkAcls("refreshNodesForcefully"); + + checkRMStatus(user.getShortUserName(), argName, msg); + + rmContext.getNodesListManager().refreshNodesForcefully(); + RMAuditLogger + .logSuccess(user.getShortUserName(), argName, "AdminService"); + return recordFactory + .newRecordInstance(RefreshNodesForcefullyResponse.class); + } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java index 786bf8c..84bcb39 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/NodesListManager.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; +import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -31,6 +32,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.HostsFileReader; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -107,6 +109,18 @@ private void printConfiguredHosts() { public void refreshNodes(Configuration yarnConf) throws IOException, YarnException { + refreshHostsReader(yarnConf); + + for (NodeId nodeId: rmContext.getRMNodes().keySet()) { + if (!isValidNode(nodeId.getHost())) { + this.rmContext.getDispatcher().getEventHandler().handle( + new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION)); + } + } + } + + private void refreshHostsReader(Configuration yarnConf) throws IOException, + YarnException { synchronized (hostsReader) { if (null == yarnConf) { yarnConf = new YarnConfiguration(); @@ -126,13 +140,6 @@ public void refreshNodes(Configuration yarnConf) throws IOException, .getConfigurationInputStream(this.conf, excludesFile)); printConfiguredHosts(); } - - for (NodeId nodeId: rmContext.getRMNodes().keySet()) { - if (!isValidNode(nodeId.getHost())) { - this.rmContext.getDispatcher().getEventHandler().handle( - new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION)); - } - } } private void setDecomissionedNMsMetrics() { @@ -236,4 +243,58 @@ private HostsFileReader createHostsFileReader(String includesFile, .getConfigurationInputStream(this.conf, excludesFile)); return hostsReader; } -} + + /** + * Refresh the nodes gracefully + * + * @param conf + * @throws IOException + * @throws YarnException + */ + public void refreshNodesGracefully(Configuration conf) throws IOException, + YarnException { + refreshHostsReader(conf); + for (Entry entry:rmContext.getRMNodes().entrySet()) { + NodeId nodeId = entry.getKey(); + if (!isValidNode(nodeId.getHost())) { + this.rmContext.getDispatcher().getEventHandler().handle( + new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION_WITH_TIMEOUT)); + } else { + // Recommissioning the nodes + if (entry.getValue().getState() == NodeState.DECOMMISSIONING + || entry.getValue().getState() == NodeState.DECOMMISSIONED) { + this.rmContext.getDispatcher().getEventHandler() + .handle(new RMNodeEvent(nodeId, RMNodeEventType.RECOMMISSION)); + } + } + } + } + + /** + * It checks for any nodes in decommissioning state + * + * @return whether any nodes are decommissioning + */ + public boolean checkForDecommissioningNodes() { + boolean nodesDecommissioning = false; + for (Entry entry : rmContext.getRMNodes().entrySet()) { + if (entry.getValue().getState() == NodeState.DECOMMISSIONING) { + nodesDecommissioning = true; + break; + } + } + return nodesDecommissioning; + } + + /** + * Forcefully decommission the nodes if they are in DECOMMISSIONING state + */ + public void refreshNodesForcefully() { + for (Entry entry : rmContext.getRMNodes().entrySet()) { + if (entry.getValue().getState() == NodeState.DECOMMISSIONING) { + this.rmContext.getDispatcher().getEventHandler().handle( + new RMNodeEvent(entry.getKey(), RMNodeEventType.DECOMMISSION)); + } + } + } +} \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java index b4d0b8b..3ab54a7 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeEventType.java @@ -24,6 +24,8 @@ // Source: AdminService DECOMMISSION, + DECOMMISSION_WITH_TIMEOUT, + RECOMMISSION, // Source: AdminService, ResourceTrackerService RESOURCE_UPDATE, diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java index 4e10a2b..a002db7 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java @@ -40,7 +40,7 @@ public class TestNodesPage { final int numberOfRacks = 2; - final int numberOfNodesPerRack = 6; + final int numberOfNodesPerRack = 7; // The following is because of the way TestRMWebApp.mockRMContext creates // nodes. final int numberOfLostNodesPerRack = numberOfNodesPerRack