diff --git hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java index 459fd56..b483486 100644 --- hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java +++ hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java @@ -350,15 +350,14 @@ protected ContainerManager getCMProxy(ContainerId containerID, final InetSocketAddress cmAddr = NetUtils.createSocketAddr(containerManagerBindAddr); - UserGroupInformation user = UserGroupInformation.getCurrentUser(); - - if (UserGroupInformation.isSecurityEnabled()) { - Token token = - ProtoUtils.convertFromProtoFormat(containerToken, cmAddr); - // the user in createRemoteUser in this context has to be ContainerID - user = UserGroupInformation.createRemoteUser(containerID.toString()); - user.addToken(token); - } + + // the user in createRemoteUser in this context has to be ContainerID + UserGroupInformation user = + UserGroupInformation.createRemoteUser(containerID.toString()); + + Token token = + ProtoUtils.convertFromProtoFormat(containerToken, cmAddr); + user.addToken(token); ContainerManager proxy = user .doAs(new PrivilegedAction() { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index b5abecd..65bd057 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -135,13 +135,11 @@ public void init(Configuration conf) { conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); - // Create the secretManager if need be. - NMContainerTokenSecretManager containerTokenSecretManager = null; if (UserGroupInformation.isSecurityEnabled()) { - LOG.info("Security is enabled on NodeManager. " - + "Creating ContainerTokenSecretManager"); - containerTokenSecretManager = new NMContainerTokenSecretManager(conf); + LOG.info("Security is Enabled."); } + NMContainerTokenSecretManager containerTokenSecretManager = + new NMContainerTokenSecretManager(conf); this.context = createNMContext(containerTokenSecretManager); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 284cd94..6aeb600 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -193,16 +193,12 @@ protected void rebootNodeStatusUpdater() { throw new AvroRuntimeException(e); } } - - private boolean isSecurityEnabled() { - return UserGroupInformation.isSecurityEnabled(); - } @Private protected boolean isTokenKeepAliveEnabled(Configuration conf) { return conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED) - && isSecurityEnabled(); + && UserGroupInformation.isSecurityEnabled(); } protected ResourceTracker getRMClient() { @@ -298,16 +294,13 @@ protected void registerWithRM() throws YarnRemoteException { "Recieved SHUTDOWN signal from Resourcemanager ,Registration of NodeManager failed"); } - if (UserGroupInformation.isSecurityEnabled()) { - MasterKey masterKey = regNMResponse.getMasterKey(); - // do this now so that its set before we start heartbeating to RM - LOG.info("Security enabled - updating secret keys now"); - // It is expected that status updater is started by this point and - // RM gives the shared secret in registration during - // StatusUpdater#start(). - if (masterKey != null) { - this.context.getContainerTokenSecretManager().setMasterKey(masterKey); - } + MasterKey masterKey = regNMResponse.getMasterKey(); + // do this now so that its set before we start heartbeating to RM + // It is expected that status updater is started by this point and + // RM gives the shared secret in registration during + // StatusUpdater#start(). + if (masterKey != null) { + this.context.getContainerTokenSecretManager().setMasterKey(masterKey); } LOG.info("Registered with ResourceManager as " + this.nodeId @@ -438,10 +431,8 @@ public void run() { NodeHeartbeatRequest request = recordFactory .newRecordInstance(NodeHeartbeatRequest.class); request.setNodeStatus(nodeStatus); - if (isSecurityEnabled()) { - request.setLastKnownMasterKey(NodeStatusUpdaterImpl.this.context - .getContainerTokenSecretManager().getCurrentKey()); - } + request.setLastKnownMasterKey(NodeStatusUpdaterImpl.this.context + .getContainerTokenSecretManager().getCurrentKey()); while (!isStopped) { try { rmRetryCount++; @@ -470,13 +461,11 @@ public void run() { //get next heartbeat interval from response nextHeartBeatInterval = response.getNextHeartBeatInterval(); // See if the master-key has rolled over - if (isSecurityEnabled()) { - MasterKey updatedMasterKey = response.getMasterKey(); - if (updatedMasterKey != null) { - // Will be non-null only on roll-over on RM side - context.getContainerTokenSecretManager().setMasterKey( - updatedMasterKey); - } + MasterKey updatedMasterKey = response.getMasterKey(); + if (updatedMasterKey != null) { + // Will be non-null only on roll-over on RM side + context.getContainerTokenSecretManager().setMasterKey( + updatedMasterKey); } if (response.getNodeAction() == NodeAction.SHUTDOWN) { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 6889110..0814bea 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -20,6 +20,8 @@ import static org.apache.hadoop.yarn.service.Service.STATE.STARTED; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; @@ -29,6 +31,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataInputByteBuffer; @@ -40,7 +43,6 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse; @@ -102,6 +104,8 @@ import org.apache.hadoop.yarn.service.Service; import org.apache.hadoop.yarn.service.ServiceStateChangeListener; +import com.google.common.annotations.VisibleForTesting; + public class ContainerManagerImpl extends CompositeService implements ServiceStateChangeListener, ContainerManager, EventHandler { @@ -300,6 +304,40 @@ private ContainerTokenIdentifier selectContainerTokenIdentifier( return resultId; } + @Private + @VisibleForTesting + protected ContainerTokenIdentifier getContainerTokenIdentifier( + UserGroupInformation remoteUgi, + org.apache.hadoop.yarn.api.records.Container container) + throws YarnRemoteException { + if (UserGroupInformation.isSecurityEnabled()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Number of TokenIdentifiers in the UGI from RPC: " + + remoteUgi.getTokenIdentifiers().size()); + } + // Get the tokenId from the remote user ugi + return selectContainerTokenIdentifier(remoteUgi); + } else { + return selectContainerTokenIdentifier(container); + } + } + + private ContainerTokenIdentifier selectContainerTokenIdentifier( + org.apache.hadoop.yarn.api.records.Container container) + throws YarnRemoteException { + ContainerTokenIdentifier tokenId = new ContainerTokenIdentifier(); + try { + ByteBuffer buffer = container.getContainerToken().getIdentifier(); + tokenId.readFields(new DataInputStream(new ByteArrayInputStream(buffer + .array()))); + } catch (Exception e) { + String message = "Invalid container token received :"; + LOG.error(message, e); + throw RPCUtil.getRemoteException(message + e.getMessage()); + } + return tokenId; + } + /** * Authorize the request. * @@ -311,16 +349,14 @@ private ContainerTokenIdentifier selectContainerTokenIdentifier( * ugi corresponding to the remote end making the api-call * @throws YarnRemoteException */ - private void authorizeRequest(String containerIDStr, + @Private + @VisibleForTesting + protected void authorizeRequest(String containerIDStr, ContainerLaunchContext launchContext, org.apache.hadoop.yarn.api.records.Container container, - UserGroupInformation remoteUgi) + UserGroupInformation remoteUgi, ContainerTokenIdentifier tokenId) throws YarnRemoteException { - if (!UserGroupInformation.isSecurityEnabled()) { - return; - } - boolean unauthorized = false; StringBuilder messageBuilder = new StringBuilder("Unauthorized request to start container. "); @@ -332,37 +368,30 @@ private void authorizeRequest(String containerIDStr, } else if (launchContext != null) { // Verify other things also for startContainer() request. - if (LOG.isDebugEnabled()) { - LOG.debug("Number of TokenIdentifiers in the UGI from RPC: " - + remoteUgi.getTokenIdentifiers().size()); - } - - // Get the tokenId from the remote user ugi - ContainerTokenIdentifier tokenId = - selectContainerTokenIdentifier(remoteUgi); if (tokenId == null) { unauthorized = true; messageBuilder - .append("\nContainerTokenIdentifier cannot be null! Null found for " - + containerIDStr); + .append("\nContainerTokenIdentifier cannot be null! Null found for " + + containerIDStr); } else { // Is the container coming in with correct user-name? - if (!tokenId.getApplicationSubmitter().equals(launchContext.getUser())) { + if (!launchContext.getUser().equals(tokenId.getApplicationSubmitter())) { unauthorized = true; messageBuilder.append("\n Expected user-name " + tokenId.getApplicationSubmitter() + " but found " + launchContext.getUser()); } + // Is the container being relaunched? Or RPC layer let startCall with // tokens generated off old-secret through? if (!this.context.getContainerTokenSecretManager() - .isValidStartContainerRequest(tokenId)) { + .isValidStartContainerRequest(tokenId.getContainerID())) { unauthorized = true; - messageBuilder.append("\n Attempt to relaunch the same " + - "container with id " + containerIDStr + "."); + messageBuilder.append("\n Attempt to relaunch the same " + + "container with id " + containerIDStr + "."); } // Ensure the token is not expired. @@ -375,7 +404,7 @@ private void authorizeRequest(String containerIDStr, } Resource resource = tokenId.getResource(); - if (!resource.equals(container.getResource())) { + if (resource == null || !resource.equals(container.getResource())) { unauthorized = true; messageBuilder.append("\nExpected resource " + resource + " but found " + container.getResource()); @@ -411,7 +440,10 @@ public StartContainerResponse startContainer(StartContainerRequest request) String containerIDStr = containerID.toString(); UserGroupInformation remoteUgi = getRemoteUgi(containerIDStr); - authorizeRequest(containerIDStr, launchContext, lauchContainer, remoteUgi); + ContainerTokenIdentifier tokenId = + getContainerTokenIdentifier(remoteUgi, lauchContainer); + authorizeRequest(containerIDStr, launchContext, lauchContainer, remoteUgi, + tokenId); // Is the container coming from unknown RM if (lauchContainer.getRMIdentifer() != nodeStatusUpdater @@ -476,13 +508,9 @@ public StartContainerResponse startContainer(StartContainerRequest request) // TODO: Validate the request dispatcher.getEventHandler().handle( new ApplicationContainerInitEvent(container)); - if (UserGroupInformation.isSecurityEnabled()) { - ContainerTokenIdentifier tokenId = - selectContainerTokenIdentifier(remoteUgi); - this.context.getContainerTokenSecretManager().startContainerSuccessful( - tokenId); - } - + + this.context.getContainerTokenSecretManager().startContainerSuccessful( + tokenId); NMAuditLogger.logSuccess(launchContext.getUser(), AuditConstants.START_CONTAINER, "ContainerManageImpl", applicationID, containerID); @@ -511,12 +539,10 @@ public StopContainerResponse stopContainer(StopContainerRequest request) // TODO: Only the container's owner can kill containers today. UserGroupInformation remoteUgi = getRemoteUgi(containerIDStr); - authorizeRequest(containerIDStr, null, null, remoteUgi); - + Container container = this.context.getContainers().get(containerID); StopContainerResponse response = recordFactory.newRecordInstance(StopContainerResponse.class); - Container container = this.context.getContainers().get(containerID); if (container == null) { LOG.warn("Trying to stop unknown container " + containerID); NMAuditLogger.logFailure("UnknownUser", @@ -526,6 +552,8 @@ public StopContainerResponse stopContainer(StopContainerRequest request) containerID); return response; // Return immediately. } + authorizeRequest(containerIDStr, null, null, remoteUgi, + getContainerTokenIdentifier(remoteUgi, container.getContainer())); dispatcher.getEventHandler().handle( new ContainerKillEvent(containerID, @@ -554,21 +582,21 @@ public GetContainerStatusResponse getContainerStatus( // TODO: Only the container's owner can get containers' status today. UserGroupInformation remoteUgi = getRemoteUgi(containerIDStr); - authorizeRequest(containerIDStr, null, null, remoteUgi); - LOG.info("Getting container-status for " + containerIDStr); Container container = this.context.getContainers().get(containerID); - if (container != null) { - ContainerStatus containerStatus = container.cloneAndGetContainerStatus(); - LOG.info("Returning " + containerStatus); - GetContainerStatusResponse response = recordFactory - .newRecordInstance(GetContainerStatusResponse.class); - response.setStatus(containerStatus); - return response; + if (container == null) { + throw RPCUtil.getRemoteException("Container " + containerIDStr + + " is not handled by this NodeManager"); } - - throw RPCUtil.getRemoteException("Container " + containerIDStr - + " is not handled by this NodeManager"); + authorizeRequest(containerIDStr, null, null, remoteUgi, + getContainerTokenIdentifier(remoteUgi, container.getContainer())); + + ContainerStatus containerStatus = container.cloneAndGetContainerStatus(); + LOG.info("Returning " + containerStatus); + GetContainerStatusResponse response = + recordFactory.newRecordInstance(GetContainerStatusResponse.class); + response.setStatus(containerStatus); + return response; } class ContainerEventDispatcher implements EventHandler { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java index 4e4cf0a..e9dda5d 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java @@ -28,7 +28,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -394,9 +393,8 @@ public ApplicationState transition(ApplicationImpl app, public void transition(ApplicationImpl app, ApplicationEvent event) { // Inform the ContainerTokenSecretManager - if (UserGroupInformation.isSecurityEnabled()) { - app.context.getContainerTokenSecretManager().appFinished(app.appId); - } + app.context.getContainerTokenSecretManager().appFinished(app.appId); + // Inform the logService app.dispatcher.getEventHandler().handle( new LogHandlerAppFinishedEvent(app.appId)); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java index bc70f26..d704e7d 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java @@ -27,7 +27,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -35,6 +34,8 @@ import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager; +import com.google.common.annotations.VisibleForTesting; + /** * The NM maintains only two master-keys. The current key that RM knows and the * key from the previous rolling-interval. @@ -134,10 +135,6 @@ public synchronized void setMasterKey(MasterKey masterKeyRecord) { */ public synchronized void startContainerSuccessful( ContainerTokenIdentifier tokenId) { - if (!UserGroupInformation.isSecurityEnabled()) { - return; - } - int keyId = tokenId.getMasterKeyId(); if (currentMasterKey.getMasterKey().getKeyId() == keyId) { addKeyForContainerId(tokenId.getContainerID(), currentMasterKey); @@ -154,8 +151,7 @@ public synchronized void startContainerSuccessful( * via retrievePassword. */ public synchronized boolean isValidStartContainerRequest( - ContainerTokenIdentifier tokenId) { - ContainerId containerID = tokenId.getContainerID(); + ContainerId containerID) { ApplicationId applicationId = containerID.getApplicationAttemptId().getApplicationId(); return !this.oldMasterKeys.containsKey(applicationId) diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java index 3f74c29..9512c23 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java @@ -26,7 +26,11 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.exceptions.YarnRemoteException; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; @@ -173,4 +177,23 @@ public void handle(LogHandlerEvent event) { public void setBlockNewContainerRequests(boolean blockNewContainerRequests) { // do nothing } + + @Override + protected void authorizeRequest(String containerIDStr, + ContainerLaunchContext launchContext, + org.apache.hadoop.yarn.api.records.Container container, + UserGroupInformation remoteUgi, ContainerTokenIdentifier tokenId) + throws YarnRemoteException { + // do Nothing + } + + @Override + protected ContainerTokenIdentifier getContainerTokenIdentifier( + UserGroupInformation remoteUgi, + org.apache.hadoop.yarn.api.records.Container container) + throws YarnRemoteException { + return new ContainerTokenIdentifier(container.getId(), + container.getNodeHttpAddress(), remoteUgi.getUserName(), + container.getResource(), System.currentTimeMillis(), 123); + } } \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java index d71334e..9daab34 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockNodeStatusUpdater.java @@ -18,6 +18,8 @@ package org.apache.hadoop.yarn.server.nodemanager; +import java.nio.ByteBuffer; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.event.Dispatcher; @@ -29,7 +31,9 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.NodeStatus; +import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils; @@ -64,6 +68,11 @@ public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerRequest request) throws YarnRemoteException { RegisterNodeManagerResponse response = recordFactory .newRecordInstance(RegisterNodeManagerResponse.class); + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) + .byteValue() })); + response.setMasterKey(masterKey); return response; } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java index 4353d21..2785ba4 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java @@ -18,8 +18,12 @@ package org.apache.hadoop.yarn.server.nodemanager; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.io.File; import java.io.IOException; +import java.nio.ByteBuffer; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; @@ -39,13 +43,14 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.ResourceTracker; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext; import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.junit.Test; -import static org.mockito.Mockito.*; public class TestEventFlow { @@ -75,7 +80,17 @@ public void testSuccessfulContainerLaunch() throws InterruptedException, remoteLogDir.mkdir(); YarnConfiguration conf = new YarnConfiguration(); - Context context = new NMContext(new NMContainerTokenSecretManager(conf)); + + NMContainerTokenSecretManager containerTokenSecretMgr = + new NMContainerTokenSecretManager(conf); + + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) + .byteValue() })); + containerTokenSecretMgr.setMasterKey(masterKey); + + Context context = new NMContext(containerTokenSecretMgr); conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath()); conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); @@ -132,7 +147,6 @@ public long getRMIdentifier() { when(mockContainer.getResource()).thenReturn(recordFactory .newRecordInstance(Resource.class)); when(mockContainer.getRMIdentifer()).thenReturn(SIMULATED_RM_IDENTIFIER); - launchContext.setUser("testing"); StartContainerRequest request = recordFactory.newRecordInstance(StartContainerRequest.class); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java index 1e4c155..50ede46 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java @@ -18,10 +18,17 @@ package org.apache.hadoop.yarn.server.nodemanager; -import static org.mockito.Mockito.*; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.isNull; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import java.io.File; import java.io.IOException; +import java.net.InetSocketAddress; +import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -29,28 +36,37 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.ContainerToken; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.URL; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; +import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; import org.junit.After; @@ -69,7 +85,8 @@ static final String user = System.getProperty("user.name"); private FileContext localFS; - + private Configuration conf = new Configuration(); + private MyNodeManager nm; private DeletionService delService; static final Log LOG = LogFactory.getLog(TestNodeManagerReboot.class); @@ -87,25 +104,26 @@ public void tearDown() throws IOException, InterruptedException { } } - @Test(timeout = 20000) + @Test(timeout = 20000000) public void testClearLocalDirWhenNodeReboot() throws IOException, YarnRemoteException { nm = new MyNodeManager(); nm.start(); + // create files under fileCache createFiles(nmLocalDir.getAbsolutePath(), ContainerLocalizer.FILECACHE, 100); localResourceDir.mkdirs(); - ContainerManagerImpl containerManager = nm.getContainerManager(); + ContainerLaunchContext containerLaunchContext = Records.newRecord(ContainerLaunchContext.class); // Construct the Container-id ContainerId cId = createContainerId(); org.apache.hadoop.yarn.api.records.Container mockContainer = - mock(org.apache.hadoop.yarn.api.records.Container.class); - when(mockContainer.getId()).thenReturn(cId); + new ContainerPBImpl(); + mockContainer.setId(cId); - containerLaunchContext.setUser(user); + containerLaunchContext.setUser(cId.toString()); URL localResourceUri = ConverterUtils.getYarnUrlFromPath(localFS @@ -123,17 +141,34 @@ public void testClearLocalDirWhenNodeReboot() throws IOException, new HashMap(); localResources.put(destinationFile, localResource); containerLaunchContext.setLocalResources(localResources); - containerLaunchContext.setUser(containerLaunchContext.getUser()); List commands = new ArrayList(); containerLaunchContext.setCommands(commands); Resource resource = Records.newRecord(Resource.class); resource.setMemory(1024); - when(mockContainer.getResource()).thenReturn(resource); + mockContainer.setResource(resource); + NodeId nodeId = BuilderUtils.newNodeId("127.0.0.1", 12345); + mockContainer.setContainerToken(nm.getContainerToken(cId, nodeId, + cId.toString(), resource)); + mockContainer.setNodeHttpAddress("127.0.0.1"); + mockContainer.setNodeId(nodeId); + StartContainerRequest startRequest = Records.newRecord(StartContainerRequest.class); startRequest.setContainerLaunchContext(containerLaunchContext); startRequest.setContainer(mockContainer); - containerManager.startContainer(startRequest); + UserGroupInformation currentUser = UserGroupInformation + .createRemoteUser(cId.toString()); + + currentUser.doAs(new PrivilegedAction() { + @Override + public ContainerManager run() { + YarnRPC rpc = YarnRPC.create(conf); + InetSocketAddress containerManagerBindAddress = + NetUtils.createSocketAddrForHost("127.0.0.1", 12345); + return (ContainerManager) rpc.getProxy(ContainerManager.class, + containerManagerBindAddress, conf); + } + }).startContainer(startRequest); GetContainerStatusRequest request = Records.newRecord(GetContainerStatusRequest.class); @@ -190,17 +225,16 @@ public void testClearLocalDirWhenNodeReboot() throws IOException, ContainerLocalizer.FILECACHE) == 0 && numOfLocalDirs(nmLocalDir .getAbsolutePath(), ResourceLocalizationService.NM_PRIVATE_DIR) == 0); - verify(delService, times(1)).delete(eq(user), - argThat(new PathInclude(user))); + verify(delService, times(1)).delete((String) isNull(), + argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_"))); verify(delService, times(1)).delete( (String) isNull(), argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_"))); verify(delService, times(1)).delete((String) isNull(), - argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_"))); - verify(delService, times(1)).delete((String) isNull(), argThat(new PathInclude(ContainerLocalizer.USERCACHE + "_DEL_"))); - + verify(delService, times(1)).delete(eq(user), + argThat(new PathInclude(cId.toString()))); } private int numOfLocalDirs(String localDir, String localSubDir) { @@ -267,6 +301,16 @@ private YarnConfiguration createNMConfig() { conf.set(YarnConfiguration.NM_LOCAL_DIRS, nmLocalDir.getAbsolutePath()); return conf; } + + public void setMasterKey(MasterKey masterKey) { + getNMContext().getContainerTokenSecretManager().setMasterKey(masterKey); + } + + public ContainerToken getContainerToken(ContainerId containerId, + NodeId nodeId, String user, Resource resource) { + return getNMContext().getContainerTokenSecretManager() + .createContainerToken(containerId, nodeId, user, resource); + } } class PathInclude extends ArgumentMatcher { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java index 39f1f01..7f35841 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CyclicBarrier; @@ -34,11 +35,16 @@ import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.ContainerToken; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.NMNotYetReadyException; import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics; @@ -89,6 +95,7 @@ public void testKillContainersOnResync() throws IOException, YarnConfiguration conf = createNMConfig(); nm.init(conf); nm.start(); + ContainerId cId = TestNodeManagerShutdown.createContainerId(); TestNodeManagerShutdown.startContainer(nm, cId, localFS, tmpDir, processStartFile); @@ -117,7 +124,7 @@ public void testBlockNewContainerRequestsOnStartAndResync() YarnConfiguration conf = createNMConfig(); nm.init(conf); nm.start(); - + // Start the container in running state ContainerId cId = TestNodeManagerShutdown.createContainerId(); TestNodeManagerShutdown.startContainer(nm, cId, localFS, tmpDir, @@ -159,7 +166,7 @@ protected NodeStatusUpdater createNodeStatusUpdater(Context context, public int getNMRegistrationCount() { return registrationCount; } - + class TestNodeStatusUpdaterImpl1 extends MockNodeStatusUpdater { public TestNodeStatusUpdaterImpl1(Context context, Dispatcher dispatcher, diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java index 1013a89..89bcda8 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java @@ -18,14 +18,14 @@ package org.apache.hadoop.yarn.server.nodemanager; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.security.PrivilegedAction; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -33,10 +33,14 @@ import junit.framework.Assert; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -46,18 +50,22 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.ContainerToken; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.URL; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; +import org.apache.hadoop.yarn.ipc.YarnRPC; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.junit.After; @@ -100,9 +108,16 @@ public void tearDown() throws IOException, InterruptedException { @Test public void testKillContainersOnShutdown() throws IOException, YarnRemoteException { - NodeManager nm = getNodeManager(); + TestNodeManager nm = getNodeManager(); nm.init(createNMConfig()); nm.start(); + + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) + .byteValue() })); + nm.setMasterKey(masterKey); + startContainer(nm, cId, localFS, tmpDir, processStartFile); final int MAX_TRIES=20; @@ -147,19 +162,20 @@ public void testKillContainersOnShutdown() throws IOException, public static void startContainer(NodeManager nm, ContainerId cId, FileContext localFS, File scriptFileDir, File processStartFile) throws IOException, YarnRemoteException { - ContainerManagerImpl containerManager = nm.getContainerManager(); File scriptFile = createUnhaltingScriptFile(cId, scriptFileDir, processStartFile); ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); - Container mockContainer = mock(Container.class); - when(mockContainer.getId()).thenReturn(cId); + Container mockContainer = new ContainerPBImpl(); + + mockContainer.setId(cId); NodeId nodeId = BuilderUtils.newNodeId("localhost", 1234); - when(mockContainer.getNodeId()).thenReturn(nodeId); - when(mockContainer.getNodeHttpAddress()).thenReturn("localhost:12345"); - containerLaunchContext.setUser(user); + mockContainer.setNodeId(nodeId); + mockContainer.setNodeHttpAddress("localhost:12345"); + + containerLaunchContext.setUser(cId.toString()); URL localResourceUri = ConverterUtils.getYarnUrlFromPath(localFS @@ -180,11 +196,28 @@ public static void startContainer(NodeManager nm, ContainerId cId, List commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile)); containerLaunchContext.setCommands(commands); Resource resource = BuilderUtils.newResource(1024, 1); - when(mockContainer.getResource()).thenReturn(resource); + mockContainer.setResource(resource); + mockContainer.setContainerToken(getContainerToken(nm, cId, nodeId, + cId.toString(), resource)); StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class); startRequest.setContainerLaunchContext(containerLaunchContext); startRequest.setContainer(mockContainer); + UserGroupInformation currentUser = UserGroupInformation + .createRemoteUser(cId.toString()); + + ContainerManager containerManager = + currentUser.doAs(new PrivilegedAction() { + @Override + public ContainerManager run() { + Configuration conf = new Configuration(); + YarnRPC rpc = YarnRPC.create(conf); + InetSocketAddress containerManagerBindAddress = + NetUtils.createSocketAddrForHost("127.0.0.1", 12345); + return (ContainerManager) rpc.getProxy(ContainerManager.class, + containerManagerBindAddress, conf); + } + }); containerManager.startContainer(startRequest); GetContainerStatusRequest request = @@ -249,15 +282,28 @@ private static File createUnhaltingScriptFile(ContainerId cId, return scriptFile; } - private NodeManager getNodeManager() { - return new NodeManager() { - @Override - protected NodeStatusUpdater createNodeStatusUpdater(Context context, - Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { - MockNodeStatusUpdater myNodeStatusUpdater = new MockNodeStatusUpdater( - context, dispatcher, healthChecker, metrics); - return myNodeStatusUpdater; - } - }; + private TestNodeManager getNodeManager() { + return new TestNodeManager(); + } + + public static ContainerToken getContainerToken(NodeManager nm, + ContainerId containerId, NodeId nodeId, String user, Resource resource) { + return nm.getNMContext().getContainerTokenSecretManager() + .createContainerToken(containerId, nodeId, user, resource); + } + + class TestNodeManager extends NodeManager { + + @Override + protected NodeStatusUpdater createNodeStatusUpdater(Context context, + Dispatcher dispatcher, NodeHealthCheckerService healthChecker) { + MockNodeStatusUpdater myNodeStatusUpdater = + new MockNodeStatusUpdater(context, dispatcher, healthChecker, metrics); + return myNodeStatusUpdater; + } + + public void setMasterKey(MasterKey masterKey) { + getNMContext().getContainerTokenSecretManager().setMasterKey(masterKey); + } } -} +} \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 10dd155..973b205 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -42,7 +43,6 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.YarnException; -import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -63,8 +63,10 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse; +import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.NodeAction; import org.apache.hadoop.yarn.server.api.records.NodeStatus; +import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext; import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; @@ -136,6 +138,11 @@ public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerResponse response = recordFactory .newRecordInstance(RegisterNodeManagerResponse.class); + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) + .byteValue() })); + response.setMasterKey(masterKey); return response; } @@ -395,6 +402,11 @@ public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerResponse response = recordFactory .newRecordInstance(RegisterNodeManagerResponse.class); response.setNodeAction(registerNodeAction ); + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) + .byteValue() })); + response.setMasterKey(masterKey); return response; } @Override @@ -429,6 +441,12 @@ public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerResponse response = recordFactory.newRecordInstance(RegisterNodeManagerResponse.class); response.setNodeAction(registerNodeAction); + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) + .byteValue() })); + response.setMasterKey(masterKey); + return response; } @@ -478,6 +496,12 @@ public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerResponse response = recordFactory .newRecordInstance(RegisterNodeManagerResponse.class); response.setNodeAction(registerNodeAction); + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) + .byteValue() })); + response.setMasterKey(masterKey); + return response; } @@ -569,6 +593,12 @@ public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerResponse response = recordFactory .newRecordInstance(RegisterNodeManagerResponse.class); response.setNodeAction(registerNodeAction ); + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) + .byteValue() })); + response.setMasterKey(masterKey); + return response; } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java index ea08ff3..76454d4 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; +import java.nio.ByteBuffer; import junit.framework.Assert; @@ -29,10 +30,13 @@ import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -40,7 +44,10 @@ import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.api.ResourceTracker; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor; @@ -161,6 +168,11 @@ public void delete(String user, Path subDir, Path[] baseDirs) { nodeHealthChecker = new NodeHealthCheckerService(); nodeHealthChecker.init(conf); dirsHandler = nodeHealthChecker.getDiskHandler(); + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[] { new Integer(123) + .byteValue() })); + context.getContainerTokenSecretManager().setMasterKey(masterKey); containerManager = new ContainerManagerImpl(context, exec, delSrvc, nodeStatusUpdater, metrics, new ApplicationACLsManager(conf), dirsHandler) { @@ -169,6 +181,24 @@ public void setBlockNewContainerRequests( boolean blockNewContainerRequests) { // do nothing } + + @Override + protected void authorizeRequest(String containerIDStr, + ContainerLaunchContext launchContext, Container container, + UserGroupInformation remoteUgi, ContainerTokenIdentifier tokenId) + throws YarnRemoteException { + // do nothing + } + + @Override + protected ContainerTokenIdentifier getContainerTokenIdentifier( + UserGroupInformation remoteUgi, + org.apache.hadoop.yarn.api.records.Container container) + throws YarnRemoteException { + return new ContainerTokenIdentifier(container.getId(), + container.getNodeHttpAddress(), remoteUgi.getUserName(), + container.getResource(), System.currentTimeMillis(), 123); + } }; containerManager.init(conf); } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java index f2fd09e..743dcb3 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; @@ -54,6 +55,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.api.ResourceManagerConstants; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; @@ -427,6 +429,24 @@ public void setBlockNewContainerRequests( boolean blockNewContainerRequests) { // do nothing } + + @Override + protected void authorizeRequest(String containerIDStr, + ContainerLaunchContext launchContext, Container container, + UserGroupInformation remoteUgi, ContainerTokenIdentifier tokenId) + throws YarnRemoteException { + // do nothing + } + + @Override + protected ContainerTokenIdentifier getContainerTokenIdentifier( + UserGroupInformation remoteUgi, + org.apache.hadoop.yarn.api.records.Container container) + throws YarnRemoteException { + return new ContainerTokenIdentifier(container.getId(), + container.getNodeHttpAddress(), remoteUgi.getUserName(), + container.getResource(), System.currentTimeMillis(), 123); + } }; containerManager.init(conf); containerManager.start(); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java index 1050a9e..47c5663 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java @@ -25,10 +25,13 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; +import junit.framework.Assert; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -37,6 +40,9 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType; @@ -54,11 +60,13 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEventType; +import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.util.BuilderUtils; import org.junit.Test; import org.mockito.ArgumentMatcher; + public class TestApplication { /** @@ -257,6 +265,10 @@ public void testAppFinishedOnRunningContainers() { AuxServicesEventType.APPLICATION_STOP, wa.appId))); wa.appResourcesCleanedup(); + for ( Container container : wa.containers) { + Assert.assertTrue(wa.context.getContainerTokenSecretManager() + .isValidStartContainerRequest(container.getContainer().getId())); + } assertEquals(ApplicationState.FINISHED, wa.app.getApplicationState()); } finally { @@ -293,6 +305,10 @@ public void testAppFinishedOnCompletedContainers() { LocalizationEventType.DESTROY_APPLICATION_RESOURCES, wa.app))); wa.appResourcesCleanedup(); + for ( Container container : wa.containers) { + Assert.assertTrue(wa.context.getContainerTokenSecretManager() + .isValidStartContainerRequest(container.getContainer().getId())); + } assertEquals(ApplicationState.FINISHED, wa.app.getApplicationState()); } finally { if (wa != null) @@ -429,8 +445,10 @@ public boolean matches(Object argument) { final Application app; WrappedApplication(int id, long timestamp, String user, int numContainers) { + Configuration conf = new Configuration(); + dispatcher = new DrainDispatcher(); - dispatcher.init(new Configuration()); + dispatcher.init(conf); localizerBus = mock(EventHandler.class); launcherBus = mock(EventHandler.class); @@ -448,6 +466,15 @@ public boolean matches(Object argument) { context = mock(Context.class); + when(context.getContainerTokenSecretManager()).thenReturn( + new NMContainerTokenSecretManager(conf)); + + // Setting master key + MasterKey masterKey = new MasterKeyPBImpl(); + masterKey.setKeyId(123); + masterKey.setBytes(ByteBuffer.wrap(new byte[]{(new Integer(123).byteValue())})); + context.getContainerTokenSecretManager().setMasterKey(masterKey); + this.user = user; this.appId = BuilderUtils.newApplicationId(timestamp, id); @@ -455,7 +482,13 @@ public boolean matches(Object argument) { new Configuration()), this.user, appId, null, context); containers = new ArrayList(); for (int i = 0; i < numContainers; i++) { - containers.add(createMockedContainer(this.appId, i)); + Container container = createMockedContainer(this.appId, i); + containers.add(container); + context.getContainerTokenSecretManager().startContainerSuccessful( + new ContainerTokenIdentifier(container.getContainer().getId(), "", + "", null, System.currentTimeMillis() + 1000, masterKey.getKeyId())); + Assert.assertFalse(context.getContainerTokenSecretManager() + .isValidStartContainerRequest(container.getContainer().getId())); } dispatcher.start(); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index 9e9d656..b9578fc 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -18,7 +18,9 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.BufferedReader; import java.io.File; @@ -26,14 +28,14 @@ import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; -import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import junit.framework.Assert; + import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; @@ -58,7 +60,6 @@ import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -66,9 +67,6 @@ import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; import org.junit.Before; import org.junit.Test; -import static org.mockito.Mockito.*; - -import junit.framework.Assert; public class TestContainerLaunch extends BaseContainerManagerTest { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java index 3c97f43..ddb6e8a 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java @@ -18,9 +18,19 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation; -import static org.mockito.Mockito.*; import static junit.framework.Assert.assertEquals; import static junit.framework.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyMap; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; @@ -47,15 +57,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; -import org.apache.hadoop.io.DataInputBuffer; -import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; +import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -72,9 +80,9 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat; -import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader; +import org.apache.hadoop.yarn.logaggregation.ContainerLogsRetentionPolicy; import org.apache.hadoop.yarn.server.nodemanager.CMgrCompletedAppsEvent; import org.apache.hadoop.yarn.server.nodemanager.DeletionService; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/krb5.conf hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/krb5.conf new file mode 100644 index 0000000..121ac6d --- /dev/null +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/krb5.conf @@ -0,0 +1,28 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +[libdefaults] + default_realm = APACHE.ORG + udp_preference_limit = 1 + extra_addresses = 127.0.0.1 +[realms] + APACHE.ORG = { + admin_server = localhost:88 + kdc = localhost:88 + } +[domain_realm] + localhost = APACHE.ORG diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index c4f0b4c..385af0e 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -43,7 +43,6 @@ import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.api.records.NodeAction; import org.apache.hadoop.yarn.server.api.records.NodeStatus; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; @@ -190,11 +189,9 @@ public RegisterNodeManagerResponse registerNodeManager( return response; } - if (isSecurityEnabled()) { - MasterKey nextMasterKeyForNode = - this.containerTokenSecretManager.getCurrentKey(); - response.setMasterKey(nextMasterKeyForNode); - } + MasterKey nextMasterKeyForNode = + this.containerTokenSecretManager.getCurrentKey(); + response.setMasterKey(nextMasterKeyForNode); RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort, resolve(host), capability); @@ -281,26 +278,24 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) getResponseId() + 1, NodeAction.NORMAL, null, null, null, nextHeartBeatInterval); rmNode.updateNodeHeartbeatResponseForCleanup(nodeHeartBeatResponse); + // Check if node's masterKey needs to be updated and if the currentKey has // roller over, send it across - if (isSecurityEnabled()) { - - boolean shouldSendMasterKey = false; - - MasterKey nextMasterKeyForNode = - this.containerTokenSecretManager.getNextKey(); - if (nextMasterKeyForNode != null) { - // nextMasterKeyForNode can be null if there is no outstanding key that - // is in the activation period. - MasterKey nodeKnownMasterKey = request.getLastKnownMasterKey(); - if (nodeKnownMasterKey.getKeyId() != nextMasterKeyForNode.getKeyId()) { - shouldSendMasterKey = true; - } - } - if (shouldSendMasterKey) { - nodeHeartBeatResponse.setMasterKey(nextMasterKeyForNode); + boolean shouldSendMasterKey = false; + + MasterKey nextMasterKeyForNode = + this.containerTokenSecretManager.getNextKey(); + if (nextMasterKeyForNode != null) { + // nextMasterKeyForNode can be null if there is no outstanding key that + // is in the activation period. + MasterKey nodeKnownMasterKey = request.getLastKnownMasterKey(); + if (nodeKnownMasterKey.getKeyId() != nextMasterKeyForNode.getKeyId()) { + shouldSendMasterKey = true; } } + if (shouldSendMasterKey) { + nodeHeartBeatResponse.setMasterKey(nextMasterKeyForNode); + } // 4. Send status to RMNode, saving the latest response. this.rmContext.getDispatcher().getEventHandler().handle( @@ -324,8 +319,4 @@ void refreshServiceAcls(Configuration configuration, PolicyProvider policyProvider) { this.server.refreshServiceAcl(configuration, policyProvider); } - - protected boolean isSecurityEnabled() { - return UserGroupInformation.isSecurityEnabled(); - } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 64f7114..f4108c6 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -1296,16 +1296,13 @@ private Resource assignContainer(Resource clusterResource, FiCaSchedulerNode nod unreserve(application, priority, node, rmContainer); } - // Create container tokens in secure-mode - if (UserGroupInformation.isSecurityEnabled()) { - ContainerToken containerToken = - createContainerToken(application, container); - if (containerToken == null) { - // Something went wrong... - return Resources.none(); - } - container.setContainerToken(containerToken); + ContainerToken containerToken = + createContainerToken(application, container); + if (containerToken == null) { + // Something went wrong... + return Resources.none(); } + container.setContainerToken(containerToken); // Inform the application RMContainer allocatedContainer = diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java index 4bd6e2b..c1896c3 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java @@ -25,7 +25,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerToken; @@ -35,8 +34,8 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; -import org.apache.hadoop.yarn.server.resourcemanager.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; +import org.apache.hadoop.yarn.server.resourcemanager.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; @@ -159,16 +158,11 @@ public Container createContainer( NodeId nodeId = node.getRMNode().getNodeID(); ContainerId containerId = BuilderUtils.newContainerId(application .getApplicationAttemptId(), application.getNewContainerId()); - ContainerToken containerToken = null; - - // If security is enabled, send the container-tokens too. - if (UserGroupInformation.isSecurityEnabled()) { - containerToken = - containerTokenSecretManager.createContainerToken(containerId, nodeId, + ContainerToken containerToken = + containerTokenSecretManager.createContainerToken(containerId, nodeId, application.getUser(), capability); - if (containerToken == null) { - return null; // Try again later. - } + if (containerToken == null) { + return null; // Try again later. } // Create the container diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 15f3eba..44a81dd 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -229,6 +229,7 @@ protected synchronized void update() { // Update recorded capacity of root queue (child queues are updated // when fair share is calculated). rootMetrics.setAvailableResourcesToQueue(clusterCapacity); + getContainerTokenSecretManager().rollMasterKey(); } /** diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index 2024e74..c822bf3 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -552,15 +552,12 @@ private int assignContainer(FiCaSchedulerNode node, FiCaSchedulerApp application .getApplicationAttemptId(), application.getNewContainerId()); ContainerToken containerToken = null; - // If security is enabled, send the container-tokens too. - if (UserGroupInformation.isSecurityEnabled()) { - containerToken = - this.rmContext.getContainerTokenSecretManager() + containerToken = + this.rmContext.getContainerTokenSecretManager() .createContainerToken(containerId, nodeId, - application.getUser(), capability); - if (containerToken == null) { - return i; // Try again later. - } + application.getUser(), capability); + if (containerToken == null) { + return i; // Try again later. } // Create the container diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index ae6d581..67df982 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -202,15 +202,17 @@ public NodeHeartbeatResponse getLastNodeHeartBeatResponse() { }; private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr) { - return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++); + return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++, null); } - private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr, int hostnum) { + private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr, int hostnum, String hostName) { final String rackName = "rack"+ rack; final int nid = hostnum; - final String hostName = "host"+ nid; final String nodeAddr = hostName + ":" + nid; final int port = 123; + if (hostName == null) { + hostName = "host"+ nid; + } final NodeId nodeID = newNodeID(hostName, port); final String httpAddress = httpAddr; final NodeHealthStatus nodeHealthStatus = @@ -233,6 +235,12 @@ public static RMNode newNodeInfo(int rack, final Resource perNode) { } public static RMNode newNodeInfo(int rack, final Resource perNode, int hostnum) { - return buildRMNode(rack, perNode, null, "localhost:0", hostnum); + return buildRMNode(rack, perNode, null, "localhost:0", hostnum, null); + } + + public static RMNode newNodeInfo(int rack, final Resource perNode, + int hostnum, String hostName) { + return buildRMNode(rack, perNode, null, "localhost:0", hostnum, hostName); } + } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 2259970..aa68908 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager; import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Level; @@ -295,8 +296,12 @@ public void stop() { @Override protected ResourceTrackerService createResourceTrackerService() { + RMContainerTokenSecretManager containerTokenSecretManager = + new RMContainerTokenSecretManager(new Configuration()); + containerTokenSecretManager.rollMasterKey(); return new ResourceTrackerService(getRMContext(), nodesListManager, - this.nmLivelinessMonitor, this.containerTokenSecretManager) { + this.nmLivelinessMonitor, containerTokenSecretManager) { + @Override public void start() { // override to not start rpc handler diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java index b316511..20c99bc 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java @@ -59,6 +59,7 @@ @Before public void setUp() { + Configuration conf = new Configuration(); // Dispatcher that processes events inline Dispatcher dispatcher = new InlineDispatcher(); dispatcher.register(SchedulerEventType.class, new EventHandler() { @@ -69,17 +70,16 @@ public void handle(Event event) { }); RMContext context = new RMContextImpl(dispatcher, null, null, null, null, - null, null, null); + null, new RMContainerTokenSecretManager(conf), null); dispatcher.register(RMNodeEventType.class, new ResourceManager.NodeEventDispatcher(context)); NodesListManager nodesListManager = new NodesListManager(context); - Configuration conf = new Configuration(); nodesListManager.init(conf); - RMContainerTokenSecretManager containerTokenSecretManager = - new RMContainerTokenSecretManager(conf); + + context.getContainerTokenSecretManager().rollMasterKey(); resourceTrackerService = new ResourceTrackerService(context, nodesListManager, new NMLivelinessMonitor(dispatcher), - containerTokenSecretManager); + context.getContainerTokenSecretManager()); resourceTrackerService.init(conf); } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java index 5953f84..e9f5313 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java @@ -52,6 +52,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -88,6 +89,11 @@ public void setUp() throws IOException { thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()). thenReturn(resourceCalculator); + RMContainerTokenSecretManager containerTokenSecretManager = + new RMContainerTokenSecretManager(conf); + containerTokenSecretManager.rollMasterKey(); + when(csContext.getContainerTokenSecretManager()).thenReturn( + containerTokenSecretManager); Map queues = new HashMap(); CSQueue root = diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 174692b..141b794 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -67,6 +67,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -122,6 +123,11 @@ public void setUp() throws Exception { thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()). thenReturn(resourceCalculator); + RMContainerTokenSecretManager containerTokenSecretManager = + new RMContainerTokenSecretManager(conf); + containerTokenSecretManager.rollMasterKey(); + when(csContext.getContainerTokenSecretManager()).thenReturn( + containerTokenSecretManager); root = CapacityScheduler.parseQueue(csContext, csConf, null, @@ -275,7 +281,7 @@ public void testSingleQueueOneUserMetrics() throws Exception { // Setup some nodes - String host_0 = "host_0"; + String host_0 = "127.0.0.1"; FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); final int numNodes = 1; @@ -397,7 +403,7 @@ public void testSingleQueueWithOneUser() throws Exception { // Setup some nodes - String host_0 = "host_0"; + String host_0 = "127.0.0.1"; FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); final int numNodes = 1; @@ -528,9 +534,9 @@ public void testUserLimits() throws Exception { a.submitApplication(app_2, user_1, A); // Setup some nodes - String host_0 = "host_0"; + String host_0 = "127.0.0.1"; FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); - String host_1 = "host_1"; + String host_1 = "127.0.0.2"; FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8*GB); final int numNodes = 2; @@ -622,9 +628,9 @@ public void testHeadroomWithMaxCap() throws Exception { a.submitApplication(app_2, user_1, A); // Setup some nodes - String host_0 = "host_0"; + String host_0 = "127.0.0.1"; FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); - String host_1 = "host_1"; + String host_1 = "127.0.0.2"; FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8*GB); final int numNodes = 2; @@ -740,7 +746,7 @@ public void testSingleQueueWithMultipleUsers() throws Exception { a.submitApplication(app_3, user_2, A); // Setup some nodes - String host_0 = "host_0"; + String host_0 = "127.0.0.1"; FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); final int numNodes = 1; @@ -902,7 +908,7 @@ public void testReservation() throws Exception { a.submitApplication(app_1, user_1, A); // Setup some nodes - String host_0 = "host_0"; + String host_0 = "127.0.0.1"; FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4*GB); final int numNodes = 2; @@ -1002,9 +1008,9 @@ public void testStolenReservedContainer() throws Exception { a.submitApplication(app_1, user_1, A); // Setup some nodes - String host_0 = "host_0"; + String host_0 = "127.0.0.1"; FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4*GB); - String host_1 = "host_1"; + String host_1 = "127.0.0.2"; FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 4*GB); final int numNodes = 3; @@ -1102,10 +1108,10 @@ public void testReservationExchange() throws Exception { a.submitApplication(app_1, user_1, A); // Setup some nodes - String host_0 = "host_0"; + String host_0 = "127.0.0.1"; FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4*GB); - String host_1 = "host_1"; + String host_1 = "127.0.0.2"; FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 4*GB); final int numNodes = 3; @@ -1214,15 +1220,15 @@ public void testLocalityScheduling() throws Exception { a.submitApplication(app_0, user_0, A); // Setup some nodes and racks - String host_0 = "host_0"; + String host_0 = "127.0.0.1"; String rack_0 = "rack_0"; FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 8*GB); - String host_1 = "host_1"; + String host_1 = "127.0.0.2"; String rack_1 = "rack_1"; FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, rack_1, 0, 8*GB); - String host_2 = "host_2"; + String host_2 = "127.0.0.3"; String rack_2 = "rack_2"; FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8*GB); @@ -1317,7 +1323,7 @@ public void testLocalityScheduling() throws Exception { app_0.updateResourceRequests(app_0_requests_0); assertEquals(2, app_0.getTotalRequiredResources(priority)); - String host_3 = "host_3"; // on rack_1 + String host_3 = "127.0.0.4"; // on rack_1 FiCaSchedulerNode node_3 = TestUtils.getMockNode(host_3, rack_1, 0, 8*GB); // Rack-delay @@ -1355,15 +1361,15 @@ public void testApplicationPriorityScheduling() throws Exception { a.submitApplication(app_0, user_0, A); // Setup some nodes and racks - String host_0 = "host_0"; + String host_0 = "127.0.0.1"; String rack_0 = "rack_0"; FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 8*GB); - String host_1 = "host_1"; + String host_1 = "127.0.0.2"; String rack_1 = "rack_1"; FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, rack_1, 0, 8*GB); - String host_2 = "host_2"; + String host_2 = "127.0.0.3"; String rack_2 = "rack_2"; FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8*GB); @@ -1486,14 +1492,14 @@ public void testSchedulingConstraints() throws Exception { a.submitApplication(app_0, user_0, A); // Setup some nodes and racks - String host_0_0 = "host_0_0"; + String host_0_0 = "127.0.0.1"; String rack_0 = "rack_0"; FiCaSchedulerNode node_0_0 = TestUtils.getMockNode(host_0_0, rack_0, 0, 8*GB); - String host_0_1 = "host_0_1"; + String host_0_1 = "127.0.0.2"; FiCaSchedulerNode node_0_1 = TestUtils.getMockNode(host_0_1, rack_0, 0, 8*GB); - String host_1_0 = "host_1_0"; + String host_1_0 = "127.0.0.3"; String rack_1 = "rack_1"; FiCaSchedulerNode node_1_0 = TestUtils.getMockNode(host_1_0, rack_1, 0, 8*GB); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index e6761b9..e16a9fd 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -218,13 +218,16 @@ public void testLoadConfigurationOnInitialize() throws IOException { @Test public void testAggregateCapacityTracking() throws Exception { // Add a node - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + RMNode node1 = + MockNodes + .newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); assertEquals(1024, scheduler.getClusterCapacity().getMemory()); // Add another node - RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(512)); + RMNode node2 = + MockNodes.newNodeInfo(1, Resources.createResource(512), 2, "127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); assertEquals(1536, scheduler.getClusterCapacity().getMemory()); @@ -238,7 +241,9 @@ public void testAggregateCapacityTracking() throws Exception { @Test public void testSimpleFairShareCalculation() { // Add one big node (only care about aggregate capacity) - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(10 * 1024)); + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(10 * 1024), 1, + "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); @@ -262,7 +267,9 @@ public void testSimpleFairShareCalculation() { public void testSimpleHierarchicalFairShareCalculation() { // Add one big node (only care about aggregate capacity) int capacity = 10 * 24; - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(capacity)); + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(capacity), 1, + "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); @@ -310,12 +317,15 @@ public void testHierarchicalQueuesSimilarParents() { @Test (timeout = 5000) public void testSimpleContainerAllocation() { // Add a node - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + RMNode node1 = + MockNodes + .newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); // Add another node - RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(512)); + RMNode node2 = + MockNodes.newNodeInfo(1, Resources.createResource(512), 2, "127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); @@ -341,7 +351,9 @@ public void testSimpleContainerAllocation() { @Test (timeout = 5000) public void testSimpleContainerReservation() throws InterruptedException { // Add a node - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + RMNode node1 = + MockNodes + .newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); @@ -349,6 +361,7 @@ public void testSimpleContainerReservation() throws InterruptedException { createSchedulingRequest(1024, "queue1", "user1", 1); scheduler.update(); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1); + scheduler.handle(updateEvent); // Make sure queue 1 is allocated app capacity @@ -366,7 +379,9 @@ public void testSimpleContainerReservation() throws InterruptedException { assertEquals(1024, scheduler.applications.get(attId).getCurrentReservation().getMemory()); // Now another node checks in with capacity - RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + RMNode node2 = + MockNodes + .newNodeInfo(1, Resources.createResource(1024), 2, "127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2); scheduler.handle(nodeEvent2); @@ -432,7 +447,9 @@ public void testFairShareWithMinAlloc() throws Exception { queueManager.initialize(); // Add one big node (only care about aggregate capacity) - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(3 * 1024)); + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(3 * 1024), 1, + "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); @@ -789,7 +806,9 @@ public void testIsStarvedForMinShare() throws Exception { queueManager.initialize(); // Add one big node (only care about aggregate capacity) - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(4 * 1024)); + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(4 * 1024), 1, + "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); @@ -847,7 +866,9 @@ public void testIsStarvedForFairShare() throws Exception { queueManager.initialize(); // Add one big node (only care about aggregate capacity) - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(4 * 1024)); + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(4 * 1024), 1, + "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); @@ -916,15 +937,21 @@ public void testChoiceOfPreemptedContainers() throws Exception { queueManager.initialize(); // Create four nodes - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024)); + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024), 1, + "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); - RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024)); + RMNode node2 = + MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024), 2, + "127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); - RMNode node3 = MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024)); + RMNode node3 = + MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024), 3, + "127.0.0.3"); NodeAddedSchedulerEvent nodeEvent3 = new NodeAddedSchedulerEvent(node3); scheduler.handle(nodeEvent3); @@ -1054,15 +1081,21 @@ public void testPreemptionDecision() throws Exception { queueManager.initialize(); // Create four nodes - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024)); + RMNode node1 = + MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024), 1, + "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); - RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024)); + RMNode node2 = + MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024), 2, + "127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); - RMNode node3 = MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024)); + RMNode node3 = + MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024), 3, + "127.0.0.3"); NodeAddedSchedulerEvent nodeEvent3 = new NodeAddedSchedulerEvent(node3); scheduler.handle(nodeEvent3); @@ -1143,7 +1176,9 @@ public void testPreemptionDecision() throws Exception { @Test (timeout = 5000) public void testMultipleContainersWaitingForReservation() { // Add a node - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + RMNode node1 = + MockNodes + .newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); @@ -1186,7 +1221,9 @@ public void testUserMaxRunningApps() throws Exception { queueManager.initialize(); // Add a node - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8192)); + RMNode node1 = + MockNodes + .newNodeInfo(1, Resources.createResource(8192), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); @@ -1223,7 +1260,9 @@ public void testUserMaxRunningApps() throws Exception { @Test (timeout = 5000) public void testReservationWhileMultiplePriorities() { // Add a node - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + RMNode node1 = + MockNodes + .newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); @@ -1305,9 +1344,15 @@ public void testAclSubmitApplication() throws Exception { @Test (timeout = 5000) public void testMultipleNodesSingleRackRequest() throws Exception { - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); - RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); - RMNode node3 = MockNodes.newNodeInfo(2, Resources.createResource(1024)); + RMNode node1 = + MockNodes + .newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1"); + RMNode node2 = + MockNodes + .newNodeInfo(1, Resources.createResource(1024), 2, "127.0.0.2"); + RMNode node3 = + MockNodes + .newNodeInfo(2, Resources.createResource(1024), 3, "127.0.0.3"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); @@ -1345,7 +1390,9 @@ public void testMultipleNodesSingleRackRequest() throws Exception { @Test (timeout = 5000) public void testFifoWithinQueue() throws Exception { - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(3072)); + RMNode node1 = + MockNodes + .newNodeInfo(1, Resources.createResource(3072), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); @@ -1403,8 +1450,12 @@ public void testAssignContainer() throws Exception { final String fairChild1 = fairParent + ".fairChild1"; final String fairChild2 = fairParent + ".fairChild2"; - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8192)); - RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(8192)); + RMNode node1 = + MockNodes + .newNodeInfo(1, Resources.createResource(8192), 1, "127.0.0.1"); + RMNode node2 = + MockNodes + .newNodeInfo(1, Resources.createResource(8192), 2, "127.0.0.2"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); @@ -1523,7 +1574,9 @@ public void testNotAllowSubmitApplication() throws Exception { @Test public void testReservationThatDoesntFit() { - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + RMNode node1 = + MockNodes + .newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 0ee3a52..7601868 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -18,6 +18,9 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -56,6 +59,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.util.BuilderUtils; import org.junit.After; import org.junit.Before; @@ -153,14 +157,17 @@ public void testAppAttemptMetrics() throws Exception { @Test(timeout=2000) public void testNodeLocalAssignment() throws Exception { AsyncDispatcher dispatcher = new InlineDispatcher(); + RMContainerTokenSecretManager containerTokenSecretManager = + new RMContainerTokenSecretManager(new Configuration()); + containerTokenSecretManager.rollMasterKey(); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, - null, null, null); + null, containerTokenSecretManager, null); FifoScheduler scheduler = new FifoScheduler(); scheduler.reinitialize(new Configuration(), rmContext); RMNode node0 = MockNodes.newNodeInfo(1, - Resources.createResource(1024 * 64), 1234); + Resources.createResource(1024 * 64), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node0); scheduler.handle(nodeEvent1); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java index 5d65dcf..93d514c 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java @@ -20,8 +20,6 @@ import static org.junit.Assert.fail; -import java.io.File; -import java.io.FileNotFoundException; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; @@ -39,14 +37,11 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileContext; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -86,8 +81,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.util.BuilderUtils; import org.apache.hadoop.yarn.util.Records; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.junit.Test; public class TestContainerManagerSecurity { @@ -95,39 +88,51 @@ static Log LOG = LogFactory.getLog(TestContainerManagerSecurity.class); static final RecordFactory recordFactory = RecordFactoryProvider .getRecordFactory(null); - private static FileContext localFS = null; - private static final File localDir = new File("target", - TestContainerManagerSecurity.class.getName() + "-localDir") - .getAbsoluteFile(); private static MiniYARNCluster yarnCluster; static final Configuration conf = new Configuration(); - @BeforeClass - public static void setup() throws AccessControlException, - FileNotFoundException, UnsupportedFileSystemException, IOException { - localFS = FileContext.getLocalFSFileContext(); - localFS.delete(new Path(localDir.getAbsolutePath()), true); - localDir.mkdir(); - + @Test (timeout = 1000000) + public void testContainerManagerWithSecurityEnabled() throws Exception { conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - // Set AM expiry interval to be very long. - conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 100000L); - UserGroupInformation.setConfiguration(conf); - yarnCluster = new MiniYARNCluster(TestContainerManagerSecurity.class - .getName(), 1, 1, 1); - yarnCluster.init(conf); - yarnCluster.start(); + testContainerManager(); } - - @AfterClass - public static void teardown() { - yarnCluster.stop(); + + @Test (timeout=1000000) + public void testContainerManagerWithSecurityDisabled() throws Exception { + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, + "simple"); + testContainerManager(); } - - @Test - public void testAuthenticatedUser() throws IOException, + + private void testContainerManager() throws Exception { + try { + yarnCluster = new MiniYARNCluster(TestContainerManagerSecurity.class + .getName(), 1, 1, 1); + conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 100000L); + UserGroupInformation.setConfiguration(conf); + yarnCluster.init(conf); + yarnCluster.start(); + + // Testing for authenticated user + testAuthenticatedUser(); + + // Testing for malicious user + testMaliceUser(); + + // Testing for unauthorized user + testUnauthorizedUser(); + + } finally { + if (yarnCluster != null) { + yarnCluster.stop(); + yarnCluster = null; + } + } + } + + private void testAuthenticatedUser() throws IOException, InterruptedException, YarnRemoteException { LOG.info("Running test for authenticated user"); @@ -179,8 +184,7 @@ public Void run() throws Exception { resourceManager.getClientRMService().forceKillApplication(request); } - @Test - public void testMaliceUser() throws IOException, InterruptedException, + private void testMaliceUser() throws IOException, InterruptedException, YarnRemoteException { LOG.info("Running test for malice user"); @@ -266,8 +270,7 @@ public Void run() { resourceManager.getClientRMService().forceKillApplication(request); } - @Test - public void testUnauthorizedUser() throws IOException, InterruptedException, + private void testUnauthorizedUser() throws IOException, InterruptedException, YarnRemoteException { LOG.info("\n\nRunning test for malice user"); @@ -318,8 +321,8 @@ public ContainerManager run() { LOG.info("Going to contact NM: unauthorized request"); callWithIllegalContainerID(client, tokenId); - callWithIllegalResource(client, tokenId); - callWithIllegalUserName(client, tokenId); + callWithIllegalResource(client, tokenId, allocatedContainer); + callWithIllegalUserName(client, tokenId, allocatedContainer); return client; } @@ -337,10 +340,11 @@ public ContainerManager run() { resourceManager.getRMContainerTokenSecretManager(); final ContainerTokenIdentifier newTokenId = new ContainerTokenIdentifier(tokenId.getContainerID(), - tokenId.getNmHostAddress(), "testUser", tokenId.getResource(), - System.currentTimeMillis() - 1, - containerTokenSecreteManager.getCurrentKey().getKeyId()); - byte[] passowrd = + tokenId.getNmHostAddress(), tokenId.getApplicationSubmitter(), + tokenId.getResource(), + System.currentTimeMillis() - 1, + containerTokenSecreteManager.getCurrentKey().getKeyId()); + final byte[] passowrd = containerTokenSecreteManager.createPassword( newTokenId); // Create a valid token by using the key from the RM. @@ -359,13 +363,12 @@ public Void run() { LOG.info("Going to contact NM with expired token"); ContainerLaunchContext context = createContainerLaunchContextForTest(newTokenId); - Container container = - BuilderUtils.newContainer(newTokenId.getContainerID(), null, null, - BuilderUtils.newResource(newTokenId.getResource().getMemory(), - newTokenId.getResource().getVirtualCores()), null, null, 0); - StartContainerRequest request = Records.newRecord(StartContainerRequest.class); + StartContainerRequest request = + Records.newRecord(StartContainerRequest.class); request.setContainerLaunchContext(context); - request.setContainer(container); + allocatedContainer.setContainerToken(BuilderUtils.newContainerToken( + allocatedContainer.getNodeId(), passowrd, newTokenId)); + request.setContainer(allocatedContainer); //Calling startContainer with an expired token. try { @@ -454,17 +457,19 @@ private AMRMProtocol submitAndRegisterApplication( // Ask for a container from the RM final InetSocketAddress schedulerAddr = resourceManager.getApplicationMasterService().getBindAddress(); - ApplicationTokenIdentifier appTokenIdentifier = new ApplicationTokenIdentifier( - appAttempt.getAppAttemptId()); - ApplicationTokenSecretManager appTokenSecretManager = - new ApplicationTokenSecretManager(conf); - appTokenSecretManager.setMasterKey(resourceManager - .getApplicationTokenSecretManager().getMasterKey()); - Token appToken = - new Token(appTokenIdentifier, - appTokenSecretManager); - SecurityUtil.setTokenService(appToken, schedulerAddr); - currentUser.addToken(appToken); + if (UserGroupInformation.isSecurityEnabled()) { + ApplicationTokenIdentifier appTokenIdentifier = new ApplicationTokenIdentifier( + appAttempt.getAppAttemptId()); + ApplicationTokenSecretManager appTokenSecretManager = + new ApplicationTokenSecretManager(conf); + appTokenSecretManager.setMasterKey(resourceManager + .getApplicationTokenSecretManager().getMasterKey()); + Token appToken = + new Token(appTokenIdentifier, + appTokenSecretManager); + SecurityUtil.setTokenService(appToken, schedulerAddr); + currentUser.addToken(appToken); + } AMRMProtocol scheduler = currentUser .doAs(new PrivilegedAction() { @@ -544,15 +549,14 @@ void callWithIllegalContainerID(ContainerManager client, } void callWithIllegalResource(ContainerManager client, - ContainerTokenIdentifier tokenId) { + ContainerTokenIdentifier tokenId, Container container) { StartContainerRequest request = recordFactory .newRecordInstance(StartContainerRequest.class); // Authenticated but unauthorized, due to wrong resource ContainerLaunchContext context = createContainerLaunchContextForTest(tokenId); - Container container = - BuilderUtils.newContainer(tokenId.getContainerID(), null, null, - BuilderUtils.newResource(2048, 1), null, null, 0); + Resource rsrc = container.getResource(); + container.setResource(BuilderUtils.newResource(2048, 1)); request.setContainerLaunchContext(context); request.setContainer(container); try { @@ -570,20 +574,17 @@ void callWithIllegalResource(ContainerManager client, LOG.info("Got IOException: ",e); fail("IOException is not expected."); } + container.setResource(rsrc); } void callWithIllegalUserName(ContainerManager client, - ContainerTokenIdentifier tokenId) { + ContainerTokenIdentifier tokenId, Container container) { StartContainerRequest request = recordFactory .newRecordInstance(StartContainerRequest.class); // Authenticated but unauthorized, due to wrong resource ContainerLaunchContext context = createContainerLaunchContextForTest(tokenId); context.setUser("Saruman"); // Set a different user-name. - Container container = - BuilderUtils.newContainer(tokenId.getContainerID(), null, null, - BuilderUtils.newResource(tokenId.getResource().getMemory(), tokenId - .getResource().getVirtualCores()), null, null, 0); request.setContainerLaunchContext(context); request.setContainer(container); try { @@ -607,7 +608,8 @@ private ContainerLaunchContext createContainerLaunchContextForTest( ContainerTokenIdentifier tokenId) { ContainerLaunchContext context = BuilderUtils.newContainerLaunchContext( - "testUser", new HashMap(), + tokenId.getApplicationSubmitter(), + new HashMap(), new HashMap(), new ArrayList(), new HashMap(), null, new HashMap()); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java index 9d0fb0c..60a73a6 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java @@ -1,20 +1,20 @@ /** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.yarn.server; @@ -22,6 +22,8 @@ import junit.framework.Assert; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -37,17 +39,27 @@ public class TestRMNMSecretKeys { - @Test + static Log LOG = LogFactory.getLog(TestRMNMSecretKeys.class); + + @Test(timeout = 1000000) public void testNMUpdation() throws Exception { YarnConfiguration conf = new YarnConfiguration(); + // validating RM NM keys for Unsecured environment + validateRMNMKeyExchange(conf); + + // validating RM NM keys for secured environment conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, - "kerberos"); + "kerberos"); UserGroupInformation.setConfiguration(conf); + validateRMNMKeyExchange(conf); + } + + private void validateRMNMKeyExchange(YarnConfiguration conf) throws Exception { // Default rolling and activation intervals are large enough, no need to // intervene - final DrainDispatcher dispatcher = new DrainDispatcher(); ResourceManager rm = new ResourceManager() { + @Override protected void doSecureLogin() throws IOException { // Do nothing. @@ -69,15 +81,15 @@ protected Dispatcher createDispatcher() { NodeHeartbeatResponse response = nm.nodeHeartbeat(true); Assert.assertNull( - "First heartbeat after registration shouldn't get any key updates!", - response.getMasterKey()); + "First heartbeat after registration shouldn't get any key updates!", + response.getMasterKey()); dispatcher.await(); response = nm.nodeHeartbeat(true); Assert - .assertNull( - "Even second heartbeat after registration shouldn't get any key updates!", - response.getMasterKey()); + .assertNull( + "Even second heartbeat after registration shouldn't get any key updates!", + response.getMasterKey()); dispatcher.await(); // Let's force a roll-over @@ -88,17 +100,17 @@ protected Dispatcher createDispatcher() { // Heartbeats after roll-over and before activation should be fine. response = nm.nodeHeartbeat(true); Assert.assertNotNull( - "Heartbeats after roll-over and before activation should not err out.", - response.getMasterKey()); + "Heartbeats after roll-over and before activation should not err out.", + response.getMasterKey()); Assert.assertEquals( - "Roll-over should have incremented the key-id only by one!", - masterKey.getKeyId() + 1, response.getMasterKey().getKeyId()); + "Roll-over should have incremented the key-id only by one!", + masterKey.getKeyId() + 1, response.getMasterKey().getKeyId()); dispatcher.await(); response = nm.nodeHeartbeat(true); Assert.assertNull( - "Second heartbeat after roll-over shouldn't get any key updates!", - response.getMasterKey()); + "Second heartbeat after roll-over shouldn't get any key updates!", + response.getMasterKey()); dispatcher.await(); // Let's force activation @@ -106,15 +118,17 @@ protected Dispatcher createDispatcher() { response = nm.nodeHeartbeat(true); Assert.assertNull("Activation shouldn't cause any key updates!", - response.getMasterKey()); + response.getMasterKey()); dispatcher.await(); response = nm.nodeHeartbeat(true); - Assert.assertNull( - "Even second heartbeat after activation shouldn't get any key updates!", - response.getMasterKey()); + Assert + .assertNull( + "Even second heartbeat after activation shouldn't get any key updates!", + response.getMasterKey()); dispatcher.await(); rm.stop(); + } }