diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java index 88b57f1..a9e6b2b 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java @@ -54,9 +54,16 @@ @Public @Stable public static Resource newInstance(int memory, int vCores) { + return newInstance(memory, vCores, 0); + } + + @Public + @Stable + public static Resource newInstance(int memory, int vCores, int vDisks) { Resource resource = Records.newRecord(Resource.class); resource.setMemory(memory); resource.setVirtualCores(vCores); + resource.setVirtualDisks(vDisks); return resource; } @@ -105,12 +112,21 @@ public static Resource newInstance(int memory, int vCores) { @Evolving public abstract void setVirtualCores(int vCores); + @Public + @Evolving + public abstract int getVirtualDisks(); + + @Public + @Evolving + public abstract void setVirtualDisks(int vDisks); + @Override public int hashCode() { final int prime = 263167; int result = 3571; result = 939769357 + getMemory(); // prime * result = 939769357 initially result = prime * result + getVirtualCores(); + result = prime * result + getVirtualDisks(); return result; } @@ -124,7 +140,8 @@ public boolean equals(Object obj) { return false; Resource other = (Resource) obj; if (getMemory() != other.getMemory() || - getVirtualCores() != other.getVirtualCores()) { + getVirtualCores() != other.getVirtualCores() || + getVirtualDisks() != other.getVirtualDisks()) { return false; } return true; @@ -132,6 +149,7 @@ public boolean equals(Object obj) { @Override public String toString() { - return ""; + return ""; } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index b459ee3..82d2171 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -160,7 +160,10 @@ private static void addDeprecatedKeys() { public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB = 1024; public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES = YARN_PREFIX + "scheduler.minimum-allocation-vcores"; - public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES = 1; + public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES = 1; + public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS = + YARN_PREFIX + "scheduler.minimum-allocation-vdisks"; + public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS = 0; /** Maximum request grant-able by the RM scheduler. */ public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_MB = @@ -169,6 +172,9 @@ private static void addDeprecatedKeys() { public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES = YARN_PREFIX + "scheduler.maximum-allocation-vcores"; public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES = 4; + public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS = + YARN_PREFIX + "scheduler.maximum-allocation-vdisks"; + public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS = 20; /** Number of threads to handle scheduler interface.*/ public static final String RM_SCHEDULER_CLIENT_THREAD_COUNT = @@ -790,7 +796,11 @@ private static void addDeprecatedKeys() { NM_PREFIX + "resource.percentage-physical-cpu-limit"; public static final int DEFAULT_NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT = 100; - + + /** Number of Virtual Disks which can be allocated for containers. */ + public static final String NM_VDISKS = NM_PREFIX + "resource.vdisks"; + public static final int DEFAULT_NM_VDISKS = 20; + /** NM Webapp address.**/ public static final String NM_WEBAPP_ADDRESS = NM_PREFIX + "webapp.address"; public static final int DEFAULT_NM_WEBAPP_PORT = 8042; diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 5c86c2d..02b57d7 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -56,6 +56,7 @@ message ContainerIdProto { message ResourceProto { optional int32 memory = 1; optional int32 virtual_cores = 2; + optional int32 virtual_disks = 3; } message ResourceOptionProto { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index 7906e6f..85f78a7 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -216,6 +216,8 @@ private int containerMemory = 10; // VirtualCores to request for the container on which the shell command will run private int containerVirtualCores = 1; + // VirtualDisks to request for the container on which the shell command will run + private int containerVirtualDisks = 0; // Priority of the request private int requestPriority; @@ -356,6 +358,8 @@ public boolean init(String[] args) throws ParseException, IOException { "Amount of memory in MB to be requested to run the shell command"); opts.addOption("container_vcores", true, "Amount of virtual cores to be requested to run the shell command"); + opts.addOption("container_vdisks", true, + "Amount of virtual disks to be requested to run the shell command"); opts.addOption("num_containers", true, "No. of containers on which the shell command needs to be executed"); opts.addOption("priority", true, "Application Priority. Default 0"); @@ -488,6 +492,8 @@ public boolean init(String[] args) throws ParseException, IOException { "container_memory", "10")); containerVirtualCores = Integer.parseInt(cliParser.getOptionValue( "container_vcores", "1")); + containerVirtualDisks = Integer.parseInt(cliParser.getOptionValue( + "container_vdisks", "0")); numTotalContainers = Integer.parseInt(cliParser.getOptionValue( "num_containers", "1")); if (numTotalContainers == 0) { @@ -582,6 +588,9 @@ public void run() throws YarnException, IOException { int maxVCores = response.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max vcores capabililty of resources in this cluster " + maxVCores); + int maxVDisks = response.getMaximumResourceCapability().getVirtualDisks(); + LOG.info("Max vdisks capability of resources in this cluster " + maxVDisks); + // A resource ask cannot exceed the max. if (containerMemory > maxMem) { LOG.info("Container memory specified above max threshold of cluster." @@ -597,6 +606,13 @@ public void run() throws YarnException, IOException { containerVirtualCores = maxVCores; } + if (containerVirtualDisks > maxVDisks) { + LOG.info("Container virtual disks specified above max threshold of cluster." + + " Using max value. specified=" + containerVirtualDisks + ", max=" + + maxVDisks); + containerVirtualDisks = maxVDisks; + } + List previousAMRunningContainers = response.getContainersFromPreviousAttempts(); LOG.info(appAttemptID + " received " + previousAMRunningContainers.size() @@ -755,10 +771,12 @@ public void onContainersAllocated(List allocatedContainers) { + ", containerNode=" + allocatedContainer.getNodeId().getHost() + ":" + allocatedContainer.getNodeId().getPort() + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress() - + ", containerResourceMemory" + + ", containerResourceMemory=" + allocatedContainer.getResource().getMemory() - + ", containerResourceVirtualCores" - + allocatedContainer.getResource().getVirtualCores()); + + ", containerResourceVirtualCores=" + + allocatedContainer.getResource().getVirtualCores() + + ", containerResourceVirtualDisks=" + + allocatedContainer.getResource().getVirtualDisks()); // + ", containerToken" // +allocatedContainer.getContainerToken().getIdentifier().toString()); @@ -1020,7 +1038,7 @@ private ContainerRequest setupContainerAskForRM() { // Set up resource type requirements // For now, memory and CPU are supported so we set memory and cpu requirements Resource capability = Resource.newInstance(containerMemory, - containerVirtualCores); + containerVirtualCores, containerVirtualDisks); ContainerRequest request = new ContainerRequest(capability, null, null, pri); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 0e9a4e4..1c5818c 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -129,6 +129,8 @@ private int amMemory = 10; // Amt. of virtual core resource to request for to run the App Master private int amVCores = 1; + // Amt. of virtual disk resource to request for to run the App Master + private int amVDisks = 0; // Application master jar file private String appMasterJar = ""; @@ -150,6 +152,8 @@ private int containerMemory = 10; // Amt. of virtual cores to request for container in which shell script will be executed private int containerVirtualCores = 1; + // Amt. of virtual disks to request for container in which shell script will be executed + private int containerVirtualDisks = 0; // No. of containers in which the shell script needs to be executed private int numContainers = 1; private String nodeLabelExpression = null; @@ -245,6 +249,7 @@ public Client(Configuration conf) throws Exception { opts.addOption("timeout", true, "Application timeout in milliseconds"); opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master"); opts.addOption("master_vcores", true, "Amount of virtual cores to be requested to run the application master"); + opts.addOption("master_vdisks", true, "Amount of virtual disks to be requested to run the application master"); opts.addOption("jar", true, "Jar file containing the application master"); opts.addOption("shell_command", true, "Shell command to be executed by " + "the Application Master. Can only specify either --shell_command " + @@ -258,6 +263,7 @@ public Client(Configuration conf) throws Exception { opts.addOption("shell_cmd_priority", true, "Priority for the shell command containers"); opts.addOption("container_memory", true, "Amount of memory in MB to be requested to run the shell command"); opts.addOption("container_vcores", true, "Amount of virtual cores to be requested to run the shell command"); + opts.addOption("container_vdisks", true, "Amount of virtual disks to be requested to run the shell command"); opts.addOption("num_containers", true, "No. of containers on which the shell command needs to be executed"); opts.addOption("log_properties", true, "log4j.properties file"); opts.addOption("keep_containers_across_application_attempts", false, @@ -345,6 +351,7 @@ public boolean init(String[] args) throws ParseException { amQueue = cliParser.getOptionValue("queue", "default"); amMemory = Integer.parseInt(cliParser.getOptionValue("master_memory", "10")); amVCores = Integer.parseInt(cliParser.getOptionValue("master_vcores", "1")); + amVDisks = Integer.parseInt(cliParser.getOptionValue("master_vdisks", "0")); if (amMemory < 0) { throw new IllegalArgumentException("Invalid memory specified for application master, exiting." @@ -354,6 +361,10 @@ public boolean init(String[] args) throws ParseException { throw new IllegalArgumentException("Invalid virtual cores specified for application master, exiting." + " Specified virtual cores=" + amVCores); } + if (amVDisks < 0) { + throw new IllegalArgumentException("Invalid virtual disks specified for application master, exiting." + + " Specified virtual disks=" + amVDisks); + } if (!cliParser.hasOption("jar")) { throw new IllegalArgumentException("No jar file specified for application master"); @@ -396,14 +407,17 @@ public boolean init(String[] args) throws ParseException { containerMemory = Integer.parseInt(cliParser.getOptionValue("container_memory", "10")); containerVirtualCores = Integer.parseInt(cliParser.getOptionValue("container_vcores", "1")); + containerVirtualDisks = Integer.parseInt(cliParser.getOptionValue("container_vdisks", "0")); numContainers = Integer.parseInt(cliParser.getOptionValue("num_containers", "1")); - if (containerMemory < 0 || containerVirtualCores < 0 || numContainers < 1) { - throw new IllegalArgumentException("Invalid no. of containers or container memory/vcores specified," + if (containerMemory < 0 || containerVirtualCores < 0 + || containerVirtualDisks < 0 || numContainers < 1) { + throw new IllegalArgumentException("Invalid no. of containers or container memory/vcores/vdisks specified," + " exiting." + " Specified containerMemory=" + containerMemory + ", containerVirtualCores=" + containerVirtualCores + + ", containerVirtualDisks=" + containerVirtualDisks + ", numContainer=" + numContainers); } @@ -507,6 +521,16 @@ public boolean run() throws IOException, YarnException { + ", max=" + maxVCores); amVCores = maxVCores; } + + int maxVDisks = appResponse.getMaximumResourceCapability().getVirtualDisks(); + LOG.info("Max virtual disks capability of resources in this cluster " + maxVDisks); + + if (amVDisks > maxVDisks) { + LOG.info("AM virtual disks specified above max threshold of cluster. " + + "Using max value." + "specified=" + amVDisks + + ", max=" + maxVDisks); + amVDisks = maxVDisks; + } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); @@ -625,6 +649,7 @@ public boolean run() throws IOException, YarnException { // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); + vargs.add("--container_vdisks " + String.valueOf(containerVirtualDisks)); vargs.add("--num_containers " + String.valueOf(numContainers)); if (null != nodeLabelExpression) { appContext.setNodeLabelExpression(nodeLabelExpression); @@ -658,7 +683,7 @@ public boolean run() throws IOException, YarnException { // Set up resource type requirements // For now, both memory and vcores are supported, so we set memory and // vcores requirements - Resource capability = Resource.newInstance(amMemory, amVCores); + Resource capability = Resource.newInstance(amMemory, amVCores, amVDisks); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java index 0ded5bd..57282e8 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java @@ -196,10 +196,14 @@ public void testDSShell(boolean haveDomain) throws Exception { "512", "--master_vcores", "2", + "--master_vdisks", + "0", "--container_memory", "128", "--container_vcores", - "1" + "1", + "--container_vdisks", + "0" }; if (haveDomain) { String[] domainArgs = { @@ -506,10 +510,14 @@ public void testDSShellWithCustomLogPropertyFile() throws Exception { "512", "--master_vcores", "2", + "--master_vdisks", + "0", "--container_memory", "128", "--container_vcores", - "1" + "1", + "--container_vdisks", + "0" }; //Before run the DS, the default the log level is INFO @@ -550,10 +558,14 @@ public void testDSShellWithCommands() throws Exception { "512", "--master_vcores", "2", + "--master_vdisks", + "0", "--container_memory", "128", "--container_vcores", - "1" + "1", + "--container_vdisks", + "0" }; LOG.info("Initializing DS Client"); @@ -584,10 +596,14 @@ public void testDSShellWithMultipleArgs() throws Exception { "512", "--master_vcores", "2", + "--master_vdisks", + "0", "--container_memory", "128", "--container_vcores", - "1" + "1", + "--container_vdisks", + "0" }; LOG.info("Initializing DS Client"); @@ -632,10 +648,14 @@ public void testDSShellWithShellScript() throws Exception { "512", "--master_vcores", "2", + "--master_vdisks", + "0", "--container_memory", "128", "--container_vcores", - "1" + "1", + "--container_vdisks", + "0" }; LOG.info("Initializing DS Client"); @@ -736,10 +756,14 @@ public void testDSShellWithInvalidArgs() throws Exception { "512", "--master_vcores", "-2", + "--master_vdisks", + "0", "--container_memory", "128", "--container_vcores", - "1" + "1", + "--container_vdisks", + "0" }; client.init(args); Assert.fail("Exception is expected"); @@ -761,10 +785,14 @@ public void testDSShellWithInvalidArgs() throws Exception { "512", "--master_vcores", "2", + "--master_vdisks", + "0", "--container_memory", "128", "--container_vcores", "1", + "--container_vdisks", + "0", "--shell_script", "test.sh" }; @@ -787,10 +815,14 @@ public void testDSShellWithInvalidArgs() throws Exception { "512", "--master_vcores", "2", + "--master_vdisks", + "0", "--container_memory", "128", "--container_vcores", - "1" + "1", + "--container_vdisks", + "0" }; client.init(args); Assert.fail("Exception is expected"); @@ -854,10 +886,14 @@ public void testDebugFlag() throws Exception { "512", "--master_vcores", "2", + "--master_vdisks", + "0", "--container_memory", "128", "--container_vcores", "1", + "--container_vdisks", + "0", "--debug" }; diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index 071c1ee..519f23f 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -115,16 +115,24 @@ /** * Class compares Resource by memory then cpu in reverse order */ - class ResourceReverseMemoryThenCpuComparator implements Comparator { + class ResourceReverseMemoryThenCpuThenVdisksComparator implements Comparator { @Override public int compare(Resource arg0, Resource arg1) { int mem0 = arg0.getMemory(); int mem1 = arg1.getMemory(); int cpu0 = arg0.getVirtualCores(); int cpu1 = arg1.getVirtualCores(); + int vdisks0 = arg0.getVirtualDisks(); + int vdisks1 = arg1.getVirtualDisks(); if(mem0 == mem1) { if(cpu0 == cpu1) { - return 0; + if (vdisks0 == vdisks1) { + return 0; + } + if (vdisks0 < vdisks1) { + return 1; + } + return -1; } if(cpu0 < cpu1) { return 1; @@ -143,8 +151,10 @@ static boolean canFit(Resource arg0, Resource arg1) { int mem1 = arg1.getMemory(); int cpu0 = arg0.getVirtualCores(); int cpu1 = arg1.getVirtualCores(); - - if(mem0 <= mem1 && cpu0 <= cpu1) { + int vdisks0 = arg0.getVirtualDisks(); + int vdisks1 = arg1.getVirtualDisks(); + + if(mem0 <= mem1 && cpu0 <= cpu1 && vdisks0 <= vdisks1) { return true; } return false; @@ -623,7 +633,7 @@ private void addResourceRequestToAsk(ResourceRequest remoteRequest) { if (reqMap == null) { // capabilities are stored in reverse sorted order. smallest last. reqMap = new TreeMap( - new ResourceReverseMemoryThenCpuComparator()); + new ResourceReverseMemoryThenCpuThenVdisksComparator()); remoteRequests.put(resourceName, reqMap); } ResourceRequestInfo resourceRequestInfo = reqMap.get(capability); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java index fa2779e..7aad66b 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java @@ -202,6 +202,11 @@ private void printNodeStatus(String nodeIdStr) throws YarnException, : (nodeReport.getUsed().getVirtualCores() + " vcores")); nodeReportStr.print("\tCPU-Capacity : "); nodeReportStr.println(nodeReport.getCapability().getVirtualCores() + " vcores"); + nodeReportStr.print("\tDisk-I/O-Used : "); + nodeReportStr.println((nodeReport.getUsed() == null) ? "0 vdisks" + : (nodeReport.getUsed().getVirtualDisks() + " vdisks")); + nodeReportStr.print("\tDisk-I/O-Capacity : "); + nodeReportStr.println(nodeReport.getCapability().getVirtualDisks() + " vdisks"); nodeReportStr.print("\tNode-Labels : "); // Create a List for node labels since we need it get sorted diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java index ec00d45..13fa2b2 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java @@ -827,7 +827,7 @@ public AllocateResponse createFakeAllocateResponse() { return AllocateResponse.newInstance(-1, new ArrayList(), new ArrayList(), new ArrayList(), - Resource.newInstance(1024, 2), null, 1, + Resource.newInstance(1024, 2, 2), null, 1, null, new ArrayList()); } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java index bfc6656..0ac7344 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java @@ -169,6 +169,7 @@ public void testSubmitApplicationOnHA() throws Exception { Resource capability = Records.newRecord(Resource.class); capability.setMemory(10); capability.setVirtualCores(1); + capability.setVirtualDisks(1); appContext.setResource(capability); ApplicationId appId = client.submitApplication(appContext); Assert.assertTrue(getActiveRM().getRMContext().getRMApps() diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java index 8885769..6a7f8da 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java @@ -55,7 +55,7 @@ public void shutDown() { @Test(timeout = 15000) public void testResourceTrackerOnHA() throws Exception { NodeId nodeId = NodeId.newInstance("localhost", 0); - Resource resource = Resource.newInstance(2048, 4); + Resource resource = Resource.newInstance(2048, 4, 4); // make sure registerNodeManager works when failover happens RegisterNodeManagerRequest request = diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java index e24b5f6..7e51b3a 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java @@ -140,7 +140,7 @@ public static void setup() throws Exception { priority = Priority.newInstance(1); priority2 = Priority.newInstance(2); - capability = Resource.newInstance(1024, 1); + capability = Resource.newInstance(1024, 1, 1); node = nodeReports.get(0).getNodeId().getHost(); rack = nodeReports.get(0).getRackName(); @@ -171,7 +171,7 @@ public void startApp() throws Exception { new HashMap(), null, new HashMap()); appContext.setAMContainerSpec(amContainer); - appContext.setResource(Resource.newInstance(1024, 1)); + appContext.setResource(Resource.newInstance(1024, 1, 1)); // Create the request to send to the applications manager SubmitApplicationRequest appRequest = Records .newRecord(SubmitApplicationRequest.class); @@ -230,13 +230,13 @@ public void testAMRMClientMatchingFit() throws YarnException, IOException { amClient.start(); amClient.registerApplicationMaster("Host", 10000, ""); - Resource capability1 = Resource.newInstance(1024, 2); - Resource capability2 = Resource.newInstance(1024, 1); - Resource capability3 = Resource.newInstance(1000, 2); - Resource capability4 = Resource.newInstance(2000, 1); - Resource capability5 = Resource.newInstance(1000, 3); - Resource capability6 = Resource.newInstance(2000, 1); - Resource capability7 = Resource.newInstance(2000, 1); + Resource capability1 = Resource.newInstance(1024, 2, 2); + Resource capability2 = Resource.newInstance(1024, 1, 1); + Resource capability3 = Resource.newInstance(1000, 2, 2); + Resource capability4 = Resource.newInstance(2000, 1, 1); + Resource capability5 = Resource.newInstance(1000, 3, 3); + Resource capability6 = Resource.newInstance(2000, 1, 1); + Resource capability7 = Resource.newInstance(2000, 1, 1); ContainerRequest storedContainer1 = new ContainerRequest(capability1, nodes, racks, priority); @@ -264,7 +264,7 @@ public void testAMRMClientMatchingFit() throws YarnException, IOException { List> matches; ContainerRequest storedRequest; // exact match - Resource testCapability1 = Resource.newInstance(1024, 2); + Resource testCapability1 = Resource.newInstance(1024, 2, 2); matches = amClient.getMatchingRequests(priority, node, testCapability1); verifyMatches(matches, 1); storedRequest = matches.get(0).iterator().next(); @@ -272,7 +272,7 @@ public void testAMRMClientMatchingFit() throws YarnException, IOException { amClient.removeContainerRequest(storedContainer1); // exact matching with order maintained - Resource testCapability2 = Resource.newInstance(2000, 1); + Resource testCapability2 = Resource.newInstance(2000, 1, 1); matches = amClient.getMatchingRequests(priority, node, testCapability2); verifyMatches(matches, 2); // must be returned in the order they were made @@ -287,11 +287,11 @@ public void testAMRMClientMatchingFit() throws YarnException, IOException { amClient.removeContainerRequest(storedContainer6); // matching with larger container. all requests returned - Resource testCapability3 = Resource.newInstance(4000, 4); + Resource testCapability3 = Resource.newInstance(4000, 4, 4); matches = amClient.getMatchingRequests(priority, node, testCapability3); assert(matches.size() == 4); - Resource testCapability4 = Resource.newInstance(1024, 2); + Resource testCapability4 = Resource.newInstance(1024, 2, 2); matches = amClient.getMatchingRequests(priority, node, testCapability4); assert(matches.size() == 2); // verify non-fitting containers are not returned and fitting ones are @@ -304,13 +304,13 @@ public void testAMRMClientMatchingFit() throws YarnException, IOException { testRequest == storedContainer3); } - Resource testCapability5 = Resource.newInstance(512, 4); + Resource testCapability5 = Resource.newInstance(512, 4, 4); matches = amClient.getMatchingRequests(priority, node, testCapability5); assert(matches.size() == 0); // verify requests without relaxed locality are only returned at specific // locations - Resource testCapability7 = Resource.newInstance(2000, 1); + Resource testCapability7 = Resource.newInstance(2000, 1, 1); matches = amClient.getMatchingRequests(priority2, ResourceRequest.ANY, testCapability7); assert(matches.size() == 0); @@ -344,7 +344,7 @@ public void testAMRMClientMatchingFitInferredRack() throws YarnException, IOExce amClient.start(); amClient.registerApplicationMaster("Host", 10000, ""); - Resource capability = Resource.newInstance(1024, 2); + Resource capability = Resource.newInstance(1024, 2, 2); ContainerRequest storedContainer1 = new ContainerRequest(capability, nodes, null, priority); @@ -549,7 +549,7 @@ public void testAllocationWithBlacklist() throws YarnException, IOException { // create a invalid ContainerRequest - memory value is minus ContainerRequest invalidContainerRequest = - new ContainerRequest(Resource.newInstance(-1024, 1), + new ContainerRequest(Resource.newInstance(-1024, 1, 1), nodes, racks, priority); amClient.addContainerRequest(invalidContainerRequest); amClient.updateBlacklist(localNodeBlacklist, null); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java index cb8c86a..db510bc 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java @@ -46,7 +46,7 @@ public void testFillInRacks() { MyResolver.class, DNSToSwitchMapping.class); client.init(conf); - Resource capability = Resource.newInstance(1024, 1); + Resource capability = Resource.newInstance(1024, 1, 1); ContainerRequest request = new ContainerRequest(capability, new String[] {"host1", "host2"}, new String[] {"/rack2"}, Priority.newInstance(1)); @@ -68,7 +68,7 @@ public void testDisableLocalityRelaxation() { MyResolver.class, DNSToSwitchMapping.class); client.init(conf); - Resource capability = Resource.newInstance(1024, 1); + Resource capability = Resource.newInstance(1024, 1, 1); ContainerRequest nodeLevelRequest = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); @@ -135,7 +135,7 @@ public void testDifferentLocalityRelaxationSamePriority() { MyResolver.class, DNSToSwitchMapping.class); client.init(conf); - Resource capability = Resource.newInstance(1024, 1); + Resource capability = Resource.newInstance(1024, 1, 1); ContainerRequest request1 = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); @@ -156,7 +156,7 @@ public void testInvalidValidWhenOldRemoved() { MyResolver.class, DNSToSwitchMapping.class); client.init(conf); - Resource capability = Resource.newInstance(1024, 1); + Resource capability = Resource.newInstance(1024, 1, 1); ContainerRequest request1 = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); @@ -195,7 +195,7 @@ public void testLocalityRelaxationDifferentLevels() { MyResolver.class, DNSToSwitchMapping.class); client.init(conf); - Resource capability = Resource.newInstance(1024, 1); + Resource capability = Resource.newInstance(1024, 1, 1); ContainerRequest request1 = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java index a28c6ed..6f4cd86 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java @@ -80,10 +80,25 @@ public void setVirtualCores(int vCores) { } @Override + public int getVirtualDisks() { + ResourceProtoOrBuilder p = viaProto ? proto : builder; + return (p.getVirtualDisks()); + } + + @Override + public void setVirtualDisks(int vDisks) { + maybeInitBuilder(); + builder.setVirtualDisks((vDisks)); + } + + @Override public int compareTo(Resource other) { int diff = this.getMemory() - other.getMemory(); if (diff == 0) { diff = this.getVirtualCores() - other.getVirtualCores(); + if (diff == 0) { + diff = this.getVirtualDisks() - other.getVirtualDisks(); + } } return diff; } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java index 593bfc3..418ffa9 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java @@ -1,20 +1,20 @@ /** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.yarn.security; @@ -48,7 +48,7 @@ /** * TokenIdentifier for a container. Encodes {@link ContainerId}, * {@link Resource} needed by the container and the target NMs host-address. - * + * */ @Public @Evolving @@ -61,17 +61,17 @@ private ContainerTokenIdentifierProto proto; public ContainerTokenIdentifier(ContainerId containerID, - String hostName, String appSubmitter, Resource r, long expiryTimeStamp, - int masterKeyId, long rmIdentifier, Priority priority, long creationTime) { + String hostName, String appSubmitter, Resource r, long expiryTimeStamp, + int masterKeyId, long rmIdentifier, Priority priority, long creationTime) { this(containerID, hostName, appSubmitter, r, expiryTimeStamp, masterKeyId, rmIdentifier, priority, creationTime, null); } public ContainerTokenIdentifier(ContainerId containerID, String hostName, - String appSubmitter, Resource r, long expiryTimeStamp, int masterKeyId, - long rmIdentifier, Priority priority, long creationTime, - LogAggregationContext logAggregationContext) { - ContainerTokenIdentifierProto.Builder builder = + String appSubmitter, Resource r, long expiryTimeStamp, int masterKeyId, + long rmIdentifier, Priority priority, long creationTime, + LogAggregationContext logAggregationContext) { + ContainerTokenIdentifierProto.Builder builder = ContainerTokenIdentifierProto.newBuilder(); if (containerID != null) { builder.setContainerId(((ContainerIdPBImpl)containerID).getProto()); @@ -88,7 +88,7 @@ public ContainerTokenIdentifier(ContainerId containerID, String hostName, builder.setPriority(((PriorityPBImpl)priority).getProto()); } builder.setCreationTime(creationTime); - + if (logAggregationContext != null) { builder.setLogAggregationContext( ((LogAggregationContextPBImpl)logAggregationContext).getProto()); @@ -149,7 +149,7 @@ public long getCreationTime() { public long getRMIdentifier() { return proto.getRmIdentifier(); } - + public ContainerTokenIdentifierProto getProto() { return proto; } @@ -195,7 +195,7 @@ protected Text getKind() { return KIND; } } - + @Override public int hashCode() { return getProto().hashCode(); @@ -215,4 +215,4 @@ public boolean equals(Object other) { public String toString() { return TextFormat.shortDebugString(getProto()); } -} +} \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java index 6f5b40e..d94f716 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java @@ -21,6 +21,9 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.Resource; +import java.util.Iterator; +import java.util.TreeSet; + /** * A {@link ResourceCalculator} which uses the concept of * dominant resource to compare multi-dimensional resources. @@ -45,32 +48,36 @@ @Private @Unstable public class DominantResourceCalculator extends ResourceCalculator { - + @Override public int compare(Resource clusterResource, Resource lhs, Resource rhs) { if (lhs.equals(rhs)) { return 0; } - - float l = getResourceAsValue(clusterResource, lhs, true); - float r = getResourceAsValue(clusterResource, rhs, true); - - if (l < r) { - return -1; - } else if (l > r) { - return 1; - } else { - l = getResourceAsValue(clusterResource, lhs, false); - r = getResourceAsValue(clusterResource, rhs, false); - if (l < r) { - return -1; - } else if (l > r) { - return 1; + + TreeSet lhsValues = new TreeSet(); + lhsValues.add((float) lhs.getMemory() / clusterResource.getMemory()); + lhsValues.add((float) lhs.getVirtualCores() / clusterResource.getVirtualCores()); + lhsValues.add((float) lhs.getVirtualDisks() / clusterResource.getVirtualDisks()); + TreeSet rhsValues = new TreeSet(); + rhsValues.add((float) rhs.getMemory() / clusterResource.getMemory()); + rhsValues.add((float) rhs.getVirtualCores() / clusterResource.getVirtualCores()); + rhsValues.add((float) rhs.getVirtualDisks() / clusterResource.getVirtualDisks()); + + Iterator lhsIter = lhsValues.descendingIterator(); + Iterator rhsIter = rhsValues.descendingIterator(); + + int diff = 0; + while (lhsIter.hasNext() && diff == 0) { + if (lhsIter.next() < rhsIter.next()) { + diff = -1; + } else if (lhsIter.next() > rhsIter.next()) { + diff = 1; } } - - return 0; + + return diff; } /** @@ -94,20 +101,31 @@ protected float getResourceAsValue( (float)resource.getVirtualCores() / clusterResource.getVirtualCores() ); } - + + protected float getResourceAsValueMax( Resource clusterResource, + Resource resource) { + return Math.max((float) resource.getMemory() / clusterResource.getMemory(), + (float) resource.getVirtualCores() / clusterResource.getVirtualCores()); + } + @Override public int computeAvailableContainers(Resource available, Resource required) { - return Math.min( - available.getMemory() / required.getMemory(), + int min = Math.min( + available.getMemory() / required.getMemory(), available.getVirtualCores() / required.getVirtualCores()); + if (required.getVirtualDisks() != 0) { + min = Math.min(min, + available.getVirtualDisks() / required.getVirtualDisks()); + } + return min; } @Override public float divide(Resource clusterResource, Resource numerator, Resource denominator) { return - getResourceAsValue(clusterResource, numerator, true) / - getResourceAsValue(clusterResource, denominator, true); + getResourceAsValueMax(clusterResource, numerator) / + getResourceAsValueMax(clusterResource, denominator); } @Override @@ -120,17 +138,22 @@ public boolean isInvalidDivisor(Resource r) { @Override public float ratio(Resource a, Resource b) { - return Math.max( - (float)a.getMemory()/b.getMemory(), - (float)a.getVirtualCores()/b.getVirtualCores() - ); + float max = Math.max( + (float) a.getMemory() / b.getMemory(), + (float) a.getVirtualCores() / b.getVirtualCores()); + if (b.getVirtualDisks() != 0) { + max = Math.max(max, + (float) a.getVirtualDisks() / b.getVirtualDisks()); + } + return max; } @Override public Resource divideAndCeil(Resource numerator, int denominator) { return Resources.createResource( divideAndCeil(numerator.getMemory(), denominator), - divideAndCeil(numerator.getVirtualCores(), denominator) + divideAndCeil(numerator.getVirtualCores(), denominator), + divideAndCeil(numerator.getVirtualDisks(), denominator) ); } @@ -147,15 +170,21 @@ public Resource normalize(Resource r, Resource minimumResource, Math.max(r.getVirtualCores(), minimumResource.getVirtualCores()), stepFactor.getVirtualCores()), maximumResource.getVirtualCores()); + int normalizedVdisks = Math.min( + roundUp( + Math.max(r.getVirtualDisks(), minimumResource.getVirtualDisks()), + stepFactor.getVirtualDisks()), + maximumResource.getVirtualDisks()); return Resources.createResource(normalizedMemory, - normalizedCores); + normalizedCores, normalizedVdisks); } @Override public Resource roundUp(Resource r, Resource stepFactor) { return Resources.createResource( roundUp(r.getMemory(), stepFactor.getMemory()), - roundUp(r.getVirtualCores(), stepFactor.getVirtualCores()) + roundUp(r.getVirtualCores(), stepFactor.getVirtualCores()), + roundUp(r.getVirtualDisks(), stepFactor.getVirtualDisks()) ); } @@ -163,7 +192,8 @@ public Resource roundUp(Resource r, Resource stepFactor) { public Resource roundDown(Resource r, Resource stepFactor) { return Resources.createResource( roundDown(r.getMemory(), stepFactor.getMemory()), - roundDown(r.getVirtualCores(), stepFactor.getVirtualCores()) + roundDown(r.getVirtualCores(), stepFactor.getVirtualCores()), + roundDown(r.getVirtualCores(), stepFactor.getVirtualDisks()) ); } @@ -175,7 +205,10 @@ public Resource multiplyAndNormalizeUp(Resource r, double by, (int)Math.ceil(r.getMemory() * by), stepFactor.getMemory()), roundUp( (int)Math.ceil(r.getVirtualCores() * by), - stepFactor.getVirtualCores()) + stepFactor.getVirtualCores()), + roundDown( + (int)Math.ceil(r.getVirtualDisks() * by), + stepFactor.getVirtualDisks()) ); } @@ -190,6 +223,10 @@ public Resource multiplyAndNormalizeDown(Resource r, double by, roundDown( (int)(r.getVirtualCores() * by), stepFactor.getVirtualCores() + ), + roundDown( + (int)(r.getVirtualDisks() * by), + stepFactor.getVirtualDisks() ) ); } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java index a205bd1..473d096 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java @@ -51,10 +51,23 @@ public void setVirtualCores(int cores) { } @Override + public int getVirtualDisks() { + return 0; + } + + @Override + public void setVirtualDisks(int disks) { + throw new RuntimeException("NONE cannot be modified!"); + } + + @Override public int compareTo(Resource o) { int diff = 0 - o.getMemory(); if (diff == 0) { diff = 0 - o.getVirtualCores(); + if (diff == 0) { + diff = 0 - o.getVirtualDisks(); + } } return diff; } @@ -84,10 +97,23 @@ public void setVirtualCores(int cores) { } @Override + public int getVirtualDisks() { + return Integer.MAX_VALUE; + } + + @Override + public void setVirtualDisks(int disks) { + throw new RuntimeException("NONE cannot be modified!"); + } + + @Override public int compareTo(Resource o) { int diff = 0 - o.getMemory(); if (diff == 0) { diff = 0 - o.getVirtualCores(); + if (diff == 0) { + diff = 0 - o.getVirtualDisks(); + } } return diff; } @@ -99,9 +125,14 @@ public static Resource createResource(int memory) { } public static Resource createResource(int memory, int cores) { + return createResource(memory, cores, 0); + } + + public static Resource createResource(int memory, int cores, int vdisks) { Resource resource = Records.newRecord(Resource.class); resource.setMemory(memory); resource.setVirtualCores(cores); + resource.setVirtualDisks(vdisks); return resource; } @@ -114,12 +145,14 @@ public static Resource unbounded() { } public static Resource clone(Resource res) { - return createResource(res.getMemory(), res.getVirtualCores()); + return createResource( + res.getMemory(), res.getVirtualCores(), res.getVirtualDisks()); } public static Resource addTo(Resource lhs, Resource rhs) { lhs.setMemory(lhs.getMemory() + rhs.getMemory()); lhs.setVirtualCores(lhs.getVirtualCores() + rhs.getVirtualCores()); + lhs.setVirtualDisks(lhs.getVirtualDisks() + rhs.getVirtualDisks()); return lhs; } @@ -130,6 +163,7 @@ public static Resource add(Resource lhs, Resource rhs) { public static Resource subtractFrom(Resource lhs, Resource rhs) { lhs.setMemory(lhs.getMemory() - rhs.getMemory()); lhs.setVirtualCores(lhs.getVirtualCores() - rhs.getVirtualCores()); + lhs.setVirtualDisks(lhs.getVirtualDisks() - rhs.getVirtualDisks()); return lhs; } @@ -144,6 +178,7 @@ public static Resource negate(Resource resource) { public static Resource multiplyTo(Resource lhs, double by) { lhs.setMemory((int)(lhs.getMemory() * by)); lhs.setVirtualCores((int)(lhs.getVirtualCores() * by)); + lhs.setVirtualDisks((int)(lhs.getVirtualDisks() * by)); return lhs; } @@ -165,6 +200,7 @@ public static Resource multiplyAndRoundDown(Resource lhs, double by) { Resource out = clone(lhs); out.setMemory((int)(lhs.getMemory() * by)); out.setVirtualCores((int)(lhs.getVirtualCores() * by)); + out.setVirtualDisks((int)(lhs.getVirtualDisks() * by)); return out; } @@ -253,11 +289,13 @@ public static Resource max( public static boolean fitsIn(Resource smaller, Resource bigger) { return smaller.getMemory() <= bigger.getMemory() && - smaller.getVirtualCores() <= bigger.getVirtualCores(); + smaller.getVirtualCores() <= bigger.getVirtualCores() && + smaller.getVirtualDisks() <= bigger.getVirtualDisks(); } public static Resource componentwiseMin(Resource lhs, Resource rhs) { return createResource(Math.min(lhs.getMemory(), rhs.getMemory()), - Math.min(lhs.getVirtualCores(), rhs.getVirtualCores())); + Math.min(lhs.getVirtualCores(), rhs.getVirtualCores()), + Math.min(lhs.getVirtualDisks(), rhs.getVirtualDisks())); } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 1e7d544..0a76a79 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -8,9 +8,7 @@ The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -23,7 +21,7 @@ - + Factory to create client IPC classes. @@ -45,14 +43,14 @@ yarn.ipc.rpc.class org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC - + The hostname of the RM. yarn.resourcemanager.hostname 0.0.0.0 - - + + The address of the applications manager interface in the RM. yarn.resourcemanager.address @@ -100,14 +98,14 @@ - - This configures the HTTP endpoint for Yarn Daemons.The following - values are supported: - - HTTP_ONLY : Service is provided only on http - - HTTPS_ONLY : Service is provided only on https - - yarn.http.policy - HTTP_ONLY + + This configures the HTTP endpoint for Yarn Daemons.The following + values are supported: + - HTTP_ONLY : Service is provided only on http + - HTTPS_ONLY : Service is provided only on https + + yarn.http.policy + HTTP_ONLY @@ -153,25 +151,25 @@ Maximum time to wait to establish connection to - ResourceManager. + ResourceManager. yarn.resourcemanager.connect.max-wait.ms 900000 How often to try connecting to the - ResourceManager. + ResourceManager. yarn.resourcemanager.connect.retry-interval.ms 30000 The maximum number of application attempts. It's a global - setting for all application masters. Each application master can specify - its individual maximum number of application attempts via the API, but the - individual number cannot be more than the global upper bound. If it is, - the resourcemanager will override it. The default number is set to 2, to - allow at least one retry for AM. + setting for all application masters. Each application master can specify + its individual maximum number of application attempts via the API, but the + individual number cannot be more than the global upper bound. If it is, + the resourcemanager will override it. The default number is set to 2, to + allow at least one retry for AM. yarn.resourcemanager.am.max-attempts 2 @@ -190,9 +188,9 @@ Flag to enable override of the default kerberos authentication - filter with the RM authentication filter to allow authentication using - delegation tokens(fallback to kerberos if the tokens are missing). Only - applicable when the http authentication type is kerberos. + filter with the RM authentication filter to allow authentication using + delegation tokens(fallback to kerberos if the tokens are missing). Only + applicable when the http authentication type is kerberos. yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled true @@ -229,38 +227,54 @@ The minimum allocation for every container request at the RM, - in MBs. Memory requests lower than this won't take effect, - and the specified value will get allocated at minimum. + in MBs. Memory requests lower than this won't take effect, + and the specified value will get allocated at minimum. yarn.scheduler.minimum-allocation-mb 1024 The maximum allocation for every container request at the RM, - in MBs. Memory requests higher than this won't take effect, - and will get capped to this value. + in MBs. Memory requests higher than this won't take effect, + and will get capped to this value. yarn.scheduler.maximum-allocation-mb 8192 The minimum allocation for every container request at the RM, - in terms of virtual CPU cores. Requests lower than this won't take effect, - and the specified value will get allocated the minimum. + in terms of virtual CPU cores. Requests lower than this won't take effect, + and the specified value will get allocated the minimum. yarn.scheduler.minimum-allocation-vcores 1 The maximum allocation for every container request at the RM, - in terms of virtual CPU cores. Requests higher than this won't take effect, - and will get capped to this value. + in terms of virtual CPU cores. Requests higher than this won't take effect, + and will get capped to this value. yarn.scheduler.maximum-allocation-vcores 32 - Enable RM to recover state after starting. If true, then + The minimum allocation for every container request at the RM, + in terms of virtual disks. Requests lower than this won't take effect, + and the specified value will get allocated the minimum. + yarn.scheduler.minimum-allocation-vdisks + 0 + + + + The maximum allocation for every container request at the RM, + in terms of virtual disks. Requests higher than this won't take effect, + and will get capped to this value. + yarn.scheduler.maximum-allocation-vdisks + 20 + + + + Enable RM to recover state after starting. If true, then yarn.resourcemanager.store.class must be specified. yarn.resourcemanager.recovery.enabled false @@ -268,7 +282,7 @@ Enable RM work preserving recovery. This configuration is private - to YARN for experimenting the feature. + to YARN for experimenting the feature. yarn.resourcemanager.work-preserving-recovery.enabled false @@ -276,9 +290,9 @@ Set the amount of time RM waits before allocating new - containers on work-preserving-recovery. Such wait period gives RM a chance - to settle down resyncing with NMs in the cluster on recovery, before assigning - new containers to applications. + containers on work-preserving-recovery. Such wait period gives RM a chance + to settle down resyncing with NMs in the cluster on recovery, before assigning + new containers to applications. yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms 10000 @@ -299,14 +313,14 @@ The maximum number of completed applications RM state - store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. - By default, it equals to ${yarn.resourcemanager.max-completed-applications}. - This ensures that the applications kept in the state store are consistent with - the applications remembered in RM memory. - Any values larger than ${yarn.resourcemanager.max-completed-applications} will - be reset to ${yarn.resourcemanager.max-completed-applications}. - Note that this value impacts the RM recovery performance.Typically, - a smaller value indicates better performance on RM recovery. + store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. + By default, it equals to ${yarn.resourcemanager.max-completed-applications}. + This ensures that the applications kept in the state store are consistent with + the applications remembered in RM memory. + Any values larger than ${yarn.resourcemanager.max-completed-applications} will + be reset to ${yarn.resourcemanager.max-completed-applications}. + Note that this value impacts the RM recovery performance.Typically, + a smaller value indicates better performance on RM recovery. yarn.resourcemanager.state-store.max-completed-applications ${yarn.resourcemanager.max-completed-applications} @@ -339,19 +353,19 @@ Full path of the ZooKeeper znode where RM state will be - stored. This must be supplied when using - org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore - as the value for yarn.resourcemanager.store.class + stored. This must be supplied when using + org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore + as the value for yarn.resourcemanager.store.class yarn.resourcemanager.zk-state-store.parent-path /rmstore ZooKeeper session timeout in milliseconds. Session expiration - is managed by the ZooKeeper cluster itself, not by the client. This value is - used by the cluster to determine when the client's session expires. - Expirations happens when the cluster does not hear from the client within - the specified session timeout period (i.e. no heartbeat). + is managed by the ZooKeeper cluster itself, not by the client. This value is + used by the cluster to determine when the client's session expires. + Expirations happens when the cluster does not hear from the client within + the specified session timeout period (i.e. no heartbeat). yarn.resourcemanager.zk-timeout-ms 10000 @@ -389,21 +403,21 @@ - Specify the auths to be used for the ACL's specified in both the - yarn.resourcemanager.zk-acl and - yarn.resourcemanager.zk-state-store.root-node.acl properties. This - takes a comma-separated list of authentication mechanisms, each of the - form 'scheme:auth' (the same syntax used for the 'addAuth' command in - the ZK CLI). + Specify the auths to be used for the ACL's specified in both the + yarn.resourcemanager.zk-acl and + yarn.resourcemanager.zk-state-store.root-node.acl properties. This + takes a comma-separated list of authentication mechanisms, each of the + form 'scheme:auth' (the same syntax used for the 'addAuth' command in + the ZK CLI). yarn.resourcemanager.zk-auth URI pointing to the location of the FileSystem path where - RM state will be stored. This must be supplied when using - org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore - as the value for yarn.resourcemanager.store.class + RM state will be stored. This must be supplied when using + org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore + as the value for yarn.resourcemanager.store.class yarn.resourcemanager.fs.state-store.uri ${hadoop.tmp.dir}/yarn/system/rmstore @@ -411,9 +425,9 @@ hdfs client retry policy specification. hdfs client retry - is always enabled. Specified in pairs of sleep-time and number-of-retries - and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on - average, the following n1 retries sleep t1 milliseconds on average, and so on. + is always enabled. Specified in pairs of sleep-time and number-of-retries + and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on + average, the following n1 retries sleep t1 milliseconds on average, and so on. yarn.resourcemanager.fs.state-store.retry-policy-spec 2000, 500 @@ -554,23 +568,23 @@ - If true, ResourceManager will have proxy-user privileges. - Use case: In a secure cluster, YARN requires the user hdfs delegation-tokens to - do localization and log-aggregation on behalf of the user. If this is set to true, - ResourceManager is able to request new hdfs delegation tokens on behalf of - the user. This is needed by long-running-service, because the hdfs tokens - will eventually expire and YARN requires new valid tokens to do localization - and log-aggregation. Note that to enable this use case, the corresponding - HDFS NameNode has to configure ResourceManager as the proxy-user so that - ResourceManager can itself ask for new tokens on behalf of the user when - tokens are past their max-life-time. + If true, ResourceManager will have proxy-user privileges. + Use case: In a secure cluster, YARN requires the user hdfs delegation-tokens to + do localization and log-aggregation on behalf of the user. If this is set to true, + ResourceManager is able to request new hdfs delegation tokens on behalf of + the user. This is needed by long-running-service, because the hdfs tokens + will eventually expire and YARN requires new valid tokens to do localization + and log-aggregation. Note that to enable this use case, the corresponding + HDFS NameNode has to configure ResourceManager as the proxy-user so that + ResourceManager can itself ask for new tokens on behalf of the user when + tokens are past their max-life-time. yarn.resourcemanager.proxy-user-privileges.enabled false Interval for the roll over for the master key used to generate - application tokens + application tokens yarn.resourcemanager.am-rm-tokens.master-key-rolling-interval-secs 86400 @@ -578,10 +592,10 @@ Interval for the roll over for the master key used to generate - container tokens. It is expected to be much greater than - yarn.nm.liveness-monitor.expiry-interval-ms and - yarn.rm.container-allocation.expiry-interval-ms. Otherwise the - behavior is undefined. + container tokens. It is expected to be much greater than + yarn.nm.liveness-monitor.expiry-interval-ms and + yarn.rm.container-allocation.expiry-interval-ms. Otherwise the + behavior is undefined. yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs 86400 @@ -603,26 +617,26 @@ Enable a set of periodic monitors (specified in - yarn.resourcemanager.scheduler.monitor.policies) that affect the - scheduler. + yarn.resourcemanager.scheduler.monitor.policies) that affect the + scheduler. yarn.resourcemanager.scheduler.monitor.enable false The list of SchedulingEditPolicy classes that interact with - the scheduler. A particular module may be incompatible with the - scheduler, other policies, or a configuration of either. + the scheduler. A particular module may be incompatible with the + scheduler, other policies, or a configuration of either. yarn.resourcemanager.scheduler.monitor.policies org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy The class to use as the configuration provider. - If org.apache.hadoop.yarn.LocalConfigurationProvider is used, - the local configuration will be loaded. - If org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider is used, - the configuration which will be loaded should be uploaded to remote File system first. + If org.apache.hadoop.yarn.LocalConfigurationProvider is used, + the local configuration will be loaded. + If org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider is used, + the configuration which will be loaded should be uploaded to remote File system first. yarn.resourcemanager.configuration.provider-class org.apache.hadoop.yarn.LocalConfigurationProvider @@ -631,14 +645,14 @@ The setting that controls whether yarn system metrics is - published on the timeline server or not by RM. + published on the timeline server or not by RM. yarn.resourcemanager.system-metrics-publisher.enabled false Number of worker threads that send the yarn system metrics - data. + data. yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size 10 @@ -649,7 +663,7 @@ yarn.nodemanager.hostname 0.0.0.0 - + The address of the container manager in the NM. yarn.nodemanager.address @@ -683,7 +697,7 @@ who will execute(launch) the containers. yarn.nodemanager.container-executor.class org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor - + @@ -700,18 +714,18 @@ - Number of seconds after an application finishes before the nodemanager's + Number of seconds after an application finishes before the nodemanager's DeletionService will delete the application's localized file directory and log directory. - + To diagnose Yarn application problems, set this property's value large enough (for example, to 600 = 10 minutes) to permit examination of these - directories. After changing the property's value, you must restart the + directories. After changing the property's value, you must restart the nodemanager in order for it to have an effect. The roots of Yarn applications' work directories is configurable with the yarn.nodemanager.local-dirs property (see below), and the roots - of the Yarn applications' log directories is configurable with the + of the Yarn applications' log directories is configurable with the yarn.nodemanager.log-dirs property (see also below). yarn.nodemanager.delete.debug-delay-sec @@ -725,12 +739,12 @@ - List of directories to store localized files in. An + List of directories to store localized files in. An application's localized file directory will be found in: ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}. Individual containers' work directories, called container_${contid}, will be subdirectories of this. - + yarn.nodemanager.local-dirs ${hadoop.tmp.dir}/nm-local-dir @@ -749,7 +763,7 @@ until it becomes full. If a file is removed from a sub-directory that is marked full, then that sub-directory will be used back again to localize files. - + yarn.nodemanager.local-cache.max-files-per-directory 8192 @@ -768,7 +782,7 @@ Target size of localizer cache in MB, per nodemanager. It is - a target retention size that only includes resources with PUBLIC and + a target retention size that only includes resources with PUBLIC and PRIVATE visibility and excludes resources with APPLICATION visibility yarn.nodemanager.localizer.cache.target-size-mb @@ -789,9 +803,9 @@ - Where to store container logs. An application's localized log directory + Where to store container logs. An application's localized log directory will be found in ${yarn.nodemanager.log-dirs}/application_${appid}. - Individual containers' log directories will be below this, in directories + Individual containers' log directories will be below this, in directories named container_{$contid}. Each container directory will contain the files stderr, stdin, and syslog generated by that container. @@ -813,24 +827,24 @@ - How long to keep aggregation logs before deleting them. -1 disables. - Be careful set this too small and you will spam the name node. + How long to keep aggregation logs before deleting them. -1 disables. + Be careful set this too small and you will spam the name node. yarn.log-aggregation.retain-seconds -1 - - + + How long to wait between aggregated log retention checks. - If set to 0 or a negative value then the value is computed as one-tenth - of the aggregated log retention time. Be careful set this too small and - you will spam the name node. + If set to 0 or a negative value then the value is computed as one-tenth + of the aggregated log retention time. Be careful set this too small and + you will spam the name node. yarn.log-aggregation.retain-check-interval-seconds -1 Time in seconds to retain user logs. Only applicable if - log aggregation is disabled + log aggregation is disabled yarn.nodemanager.log.retain-seconds 10800 @@ -842,7 +856,7 @@ /tmp/logs - The remote log dir will be created at + The remote log dir will be created at {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam} yarn.nodemanager.remote-app-log-dir-suffix @@ -850,31 +864,31 @@ - Amount of physical memory, in MB, that can be allocated - for containers. + Amount of physical memory, in MB, that can be allocated + for containers. yarn.nodemanager.resource.memory-mb 8192 Whether physical memory limits will be enforced for - containers. + containers. yarn.nodemanager.pmem-check-enabled true Whether virtual memory limits will be enforced for - containers. + containers. yarn.nodemanager.vmem-check-enabled true Ratio between virtual memory to physical memory when - setting memory limits for containers. Container allocations are - expressed in terms of physical memory, and virtual memory usage - is allowed to exceed this allocation by this ratio. + setting memory limits for containers. Container allocations are + expressed in terms of physical memory, and virtual memory usage + is allowed to exceed this allocation by this ratio. yarn.nodemanager.vmem-pmem-ratio 2.1 @@ -882,24 +896,31 @@ Number of vcores that can be allocated - for containers. This is used by the RM scheduler when allocating - resources for containers. This is not used to limit the number of - physical cores used by YARN containers. + for containers. This is used by the RM scheduler when allocating + resources for containers. This is not used to limit the number of + physical cores used by YARN containers. yarn.nodemanager.resource.cpu-vcores 8 Percentage of CPU that can be allocated - for containers. This setting allows users to limit the amount of - CPU that YARN containers use. Currently functional only - on Linux using cgroups. The default is to use 100% of CPU. + for containers. This setting allows users to limit the amount of + CPU that YARN containers use. Currently functional only + on Linux using cgroups. The default is to use 100% of CPU. yarn.nodemanager.resource.percentage-physical-cpu-limit 100 + Number of virtual disk resources that can be allocated + for containers. + yarn.nodemanager.resource.vdisks + 20 + + + NM Webapp address. yarn.nodemanager.webapp.address ${yarn.nodemanager.hostname}:8042 @@ -948,28 +969,28 @@ The minimum fraction of number of disks to be healthy for the - nodemanager to launch new containers. This correspond to both - yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e. If there - are less number of healthy local-dirs (or log-dirs) available, then - new containers will not be launched on this node. + nodemanager to launch new containers. This correspond to both + yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e. If there + are less number of healthy local-dirs (or log-dirs) available, then + new containers will not be launched on this node. yarn.nodemanager.disk-health-checker.min-healthy-disks 0.25 - The maximum percentage of disk space utilization allowed after - which a disk is marked as bad. Values can range from 0.0 to 100.0. - If the value is greater than or equal to 100, the nodemanager will check - for full disk. This applies to yarn-nodemanager.local-dirs and - yarn.nodemanager.log-dirs. + The maximum percentage of disk space utilization allowed after + which a disk is marked as bad. Values can range from 0.0 to 100.0. + If the value is greater than or equal to 100, the nodemanager will check + for full disk. This applies to yarn-nodemanager.local-dirs and + yarn.nodemanager.log-dirs. yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage 90.0 The minimum space that must be available on a disk for - it to be used. This applies to yarn-nodemanager.local-dirs and - yarn.nodemanager.log-dirs. + it to be used. This applies to yarn-nodemanager.local-dirs and + yarn.nodemanager.log-dirs. yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb 0 @@ -988,27 +1009,27 @@ The cgroups hierarchy under which to place YARN proccesses (cannot contain commas). - If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have - been pre-configured), then this cgroups hierarchy must already exist and be writable by the - NodeManager user, otherwise the NodeManager may fail. - Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler. + If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have + been pre-configured), then this cgroups hierarchy must already exist and be writable by the + NodeManager user, otherwise the NodeManager may fail. + Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler. yarn.nodemanager.linux-container-executor.cgroups.hierarchy /hadoop-yarn Whether the LCE should attempt to mount cgroups if not found. - Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler. + Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler. yarn.nodemanager.linux-container-executor.cgroups.mount false Where the LCE should attempt to mount cgroups if not found. Common locations - include /sys/fs/cgroup and /cgroup; the default location can vary depending on the Linux - distribution in use. This path must exist before the NodeManager is launched. - Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler, and - yarn.nodemanager.linux-container-executor.cgroups.mount is true. + include /sys/fs/cgroup and /cgroup; the default location can vary depending on the Linux + distribution in use. This path must exist before the NodeManager is launched. + Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler, and + yarn.nodemanager.linux-container-executor.cgroups.mount is true. yarn.nodemanager.linux-container-executor.cgroups.mount-path @@ -1035,18 +1056,18 @@ The allowed pattern for UNIX user names enforced by - Linux-container-executor when used in nonsecure mode (use case for this - is using cgroups). The default value is taken from /usr/sbin/adduser + Linux-container-executor when used in nonsecure mode (use case for this + is using cgroups). The default value is taken from /usr/sbin/adduser yarn.nodemanager.linux-container-executor.nonsecure-mode.user-pattern ^[_.A-Za-z0-9][-@_.A-Za-z0-9]{0,255}?[$]?$ This flag determines whether apps should run with strict resource limits - or be allowed to consume spare resources if they need them. For example, turning the - flag on will restrict apps to use only their share of CPU, even if the node has spare - CPU cycles. The default value is false i.e. use available resources. Please note that - turning this flag on may reduce job throughput on the cluster. + or be allowed to consume spare resources if they need them. For example, turning the + flag on will restrict apps to use only their share of CPU, even if the node has spare + CPU cycles. The default value is false i.e. use available resources. Please note that + turning this flag on may reduce job throughput on the cluster. yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage false @@ -1083,8 +1104,8 @@ - The minimum allowed version of a resourcemanager that a nodemanager will connect to. - The valid values are NONE (no version checking), EqualToNM (the resourcemanager's version is + The minimum allowed version of a resourcemanager that a nodemanager will connect to. + The valid values are NONE (no version checking), EqualToNM (the resourcemanager's version is equal to or greater than the NM version), or a Version String. yarn.nodemanager.resourcemanager.minimum.version NONE @@ -1092,7 +1113,7 @@ Max number of threads in NMClientAsync to process container - management events + management events yarn.client.nodemanager-client-async.thread-pool-max-size 500 @@ -1132,7 +1153,7 @@ yarn.client.max-cached-nodemanagers-proxies 0 - + Enable the node manager to recover after starting yarn.nodemanager.recovery.enabled @@ -1141,7 +1162,7 @@ The local filesystem directory in which the node manager will - store state when recovery is enabled. + store state when recovery is enabled. yarn.nodemanager.recovery.dir ${hadoop.tmp.dir}/yarn-nm-recovery @@ -1163,34 +1184,34 @@ - + The kerberos principal for the proxy, if the proxy is not - running as part of the RM. + running as part of the RM. yarn.web-proxy.principal - + - Keytab for WebAppProxy, if the proxy is not running as part of - the RM. + Keytab for WebAppProxy, if the proxy is not running as part of + the RM. yarn.web-proxy.keytab - + The address for the web proxy as HOST:PORT, if this is not - given then the proxy will run as part of the RM - yarn.web-proxy.address - + given then the proxy will run as part of the RM + yarn.web-proxy.address + - + CLASSPATH for YARN applications. A comma-separated list of CLASSPATH entries. When this value is empty, the following default - CLASSPATH for YARN applications would be used. + CLASSPATH for YARN applications would be used. For Linux: $HADOOP_CONF_DIR, $HADOOP_COMMON_HOME/share/hadoop/common/*, @@ -1216,7 +1237,7 @@ Indicate to clients whether timeline service is enabled or not. - If enabled, clients will put entities and events to the timeline server. + If enabled, clients will put entities and events to the timeline server. yarn.timeline-service.enabled false @@ -1230,7 +1251,7 @@ This is default address for the timeline server to start the - RPC server. + RPC server. yarn.timeline-service.address ${yarn.timeline-service.hostname}:10200 @@ -1345,7 +1366,7 @@ - Default maximum number of retires for timeline servive client. + Default maximum number of retires for timeline servive client. yarn.timeline-service.client.max-retries 30 @@ -1353,7 +1374,7 @@ - Default retry time interval for timeline servive client. + Default retry time interval for timeline servive client. yarn.timeline-service.client.retry-interval-ms 1000 @@ -1374,7 +1395,7 @@ The level of nested directories before getting to the checksum - directories. It must be non-negative. + directories. It must be non-negative. yarn.sharedcache.nested-level 3 @@ -1390,60 +1411,60 @@ yarn.sharedcache.app-checker.class org.apache.hadoop.yarn.server.sharedcachemanager.RemoteAppChecker - + A resource in the in-memory store is considered stale - if the time since the last reference exceeds the staleness period. - This value is specified in minutes. + if the time since the last reference exceeds the staleness period. + This value is specified in minutes. yarn.sharedcache.store.in-memory.staleness-period-mins 10080 - + Initial delay before the in-memory store runs its first check - to remove dead initial applications. Specified in minutes. + to remove dead initial applications. Specified in minutes. yarn.sharedcache.store.in-memory.initial-delay-mins 10 - + The frequency at which the in-memory store checks to remove - dead initial applications. Specified in minutes. + dead initial applications. Specified in minutes. yarn.sharedcache.store.in-memory.check-period-mins 720 The frequency at which a cleaner task runs. - Specified in minutes. + Specified in minutes. yarn.sharedcache.cleaner.period-mins 1440 Initial delay before the first cleaner task is scheduled. - Specified in minutes. + Specified in minutes. yarn.sharedcache.cleaner.initial-delay-mins 10 The time to sleep between processing each shared cache - resource. Specified in milliseconds. + resource. Specified in milliseconds. yarn.sharedcache.cleaner.resource-sleep-ms 0 The address of the node manager interface in the SCM - (shared cache manager) + (shared cache manager) yarn.sharedcache.uploader.server.address 0.0.0.0:8046 The number of threads used to handle shared cache manager - requests from the node manager (50 by default) + requests from the node manager (50 by default) yarn.sharedcache.uploader.server.thread-count 50 @@ -1451,20 +1472,20 @@ The interval that the yarn client library uses to poll the - completion status of the asynchronous API of application client protocol. + completion status of the asynchronous API of application client protocol. yarn.client.application-client-protocol.poll-interval-ms 200 - RSS usage of a process computed via - /proc/pid/stat is not very accurate as it includes shared pages of a - process. /proc/pid/smaps provides useful information like - Private_Dirty, Private_Clean, Shared_Dirty, Shared_Clean which can be used - for computing more accurate RSS. When this flag is enabled, RSS is computed - as Min(Shared_Dirty, Pss) + Private_Clean + Private_Dirty. It excludes - read-only shared mappings in RSS computation. + RSS usage of a process computed via + /proc/pid/stat is not very accurate as it includes shared pages of a + process. /proc/pid/smaps provides useful information like + Private_Dirty, Private_Clean, Shared_Dirty, Shared_Clean which can be used + for computing more accurate RSS. When this flag is enabled, RSS is computed + as Min(Shared_Dirty, Pss) + Private_Clean + Private_Dirty. It excludes + read-only shared mappings in RSS computation. yarn.nodemanager.container-monitor.procfs-tree.smaps-based-rss.enabled false @@ -1597,12 +1618,12 @@ Defines how often NMs wake up to upload log files. - The default value is -1. By default, the logs will be uploaded when - the application is finished. By setting this configure, logs can be uploaded - periodically when the application is running. The minimum rolling-interval-seconds - can be set is 3600. + The default value is -1. By default, the logs will be uploaded when + the application is finished. By setting this configure, logs can be uploaded + periodically when the application is running. The minimum rolling-interval-seconds + can be set is 3600. yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds -1 - + \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java index 45b2a06..61327fd 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestContainerLaunchRPC.java @@ -99,7 +99,7 @@ private void testRPCTimeout(String rpcClass) throws Exception { ContainerId containerId = ContainerId.newInstance(applicationAttemptId, 100); NodeId nodeId = NodeId.newInstance("localhost", 1234); - Resource resource = Resource.newInstance(1234, 2); + Resource resource = Resource.newInstance(1234, 2, 3); ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier(containerId, "localhost", "user", resource, System.currentTimeMillis() + 10000, 42, 42, diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java index f497d27..4a8b2c3 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceDecrease.java @@ -35,7 +35,7 @@ public void testResourceDecreaseContext() { ContainerId containerId = ContainerId .newInstance(ApplicationAttemptId.newInstance( ApplicationId.newInstance(1234, 3), 3), 7); - Resource resource = Resource.newInstance(1023, 3); + Resource resource = Resource.newInstance(1023, 3, 4); ContainerResourceDecrease ctx = ContainerResourceDecrease.newInstance( containerId, resource); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java index d307e39..74a8fe8 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncrease.java @@ -40,7 +40,7 @@ public void testResourceIncreaseContext() { ContainerId containerId = ContainerId .newInstance(ApplicationAttemptId.newInstance( ApplicationId.newInstance(1234, 3), 3), 7); - Resource resource = Resource.newInstance(1023, 3); + Resource resource = Resource.newInstance(1023, 3, 4); ContainerResourceIncrease ctx = ContainerResourceIncrease.newInstance( containerId, resource, token); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java index 0acad00..8359cfc 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerResourceIncreaseRequest.java @@ -35,7 +35,7 @@ public void ContainerResourceIncreaseRequest() { ContainerId containerId = ContainerId .newInstance(ApplicationAttemptId.newInstance( ApplicationId.newInstance(1234, 3), 3), 7); - Resource resource = Resource.newInstance(1023, 3); + Resource resource = Resource.newInstance(1023, 3, 4); ContainerResourceIncreaseRequest context = ContainerResourceIncreaseRequest .newInstance(containerId, resource); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java index 8f042a8..82ec525 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java @@ -400,6 +400,14 @@ public static Resource newResource(int memory, int vCores) { return resource; } + public static Resource newResource(int memory, int vCores, int vDisks) { + Resource resource = recordFactory.newRecordInstance(Resource.class); + resource.setMemory(memory); + resource.setVirtualCores(vCores); + resource.setVirtualDisks(vDisks); + return resource; + } + public static URL newURL(String scheme, String host, int port, String file) { URL url = recordFactory.newRecordInstance(URL.class); url.setScheme(scheme); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java index da25aa2..01c7e88 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java @@ -133,6 +133,7 @@ public void testRegisterNodeManagerRequestPBImpl() { Resource resource = recordFactory.newRecordInstance(Resource.class); resource.setMemory(10000); resource.setVirtualCores(2); + resource.setVirtualDisks(3); original.setResource(resource); RegisterNodeManagerRequestPBImpl copy = new RegisterNodeManagerRequestPBImpl( original.getProto()); @@ -141,7 +142,7 @@ public void testRegisterNodeManagerRequestPBImpl() { assertEquals(9090, copy.getNodeId().getPort()); assertEquals(10000, copy.getResource().getMemory()); assertEquals(2, copy.getResource().getVirtualCores()); - + assertEquals(3, copy.getResource().getVirtualDisks()); } /** diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java index ed902ba..99c2c9a 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/api/protocolrecords/TestProtocolRecords.java @@ -52,7 +52,7 @@ public void testNMContainerStatus() { ApplicationId appId = ApplicationId.newInstance(123456789, 1); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId containerId = ContainerId.newInstance(attemptId, 1); - Resource resource = Resource.newInstance(1000, 200); + Resource resource = Resource.newInstance(1000, 200, 300); NMContainerStatus report = NMContainerStatus.newInstance(containerId, @@ -80,13 +80,13 @@ public void testRegisterNodeManagerRequest() { NMContainerStatus containerReport = NMContainerStatus.newInstance(containerId, - ContainerState.RUNNING, Resource.newInstance(1024, 1), "diagnostics", + ContainerState.RUNNING, Resource.newInstance(1024, 1, 2), "diagnostics", 0, Priority.newInstance(10), 1234); List reports = Arrays.asList(containerReport); RegisterNodeManagerRequest request = RegisterNodeManagerRequest.newInstance( NodeId.newInstance("1.1.1.1", 1000), 8080, - Resource.newInstance(1024, 1), "NM-version-id", reports, + Resource.newInstance(1024, 1, 2), "NM-version-id", reports, Arrays.asList(appId)); RegisterNodeManagerRequest requestProto = new RegisterNodeManagerRequestPBImpl( @@ -97,7 +97,7 @@ public void testRegisterNodeManagerRequest() { Assert.assertEquals("NM-version-id", requestProto.getNMVersion()); Assert.assertEquals(NodeId.newInstance("1.1.1.1", 1000), requestProto.getNodeId()); - Assert.assertEquals(Resource.newInstance(1024, 1), + Assert.assertEquals(Resource.newInstance(1024, 1, 2), requestProto.getResource()); Assert.assertEquals(1, requestProto.getRunningApplications().size()); Assert.assertEquals(appId, requestProto.getRunningApplications().get(0)); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index ebbe503..2e6a724 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -142,7 +142,12 @@ protected void serviceInit(Configuration conf) throws Exception { conf.getInt( YarnConfiguration.NM_VCORES, YarnConfiguration.DEFAULT_NM_VCORES); - this.totalResource = Resource.newInstance(memoryMb, virtualCores); + int virtualDisks = + conf.getInt( + YarnConfiguration.NM_VDISKS, YarnConfiguration.DEFAULT_NM_VDISKS); + + this.totalResource = + Resource.newInstance(memoryMb, virtualCores, virtualDisks); metrics.addResource(totalResource); this.tokenKeepAliveEnabled = isTokenKeepAliveEnabled(conf); this.tokenRemovalDelayMs = diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index 02a63ac..f5c4757 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -20,15 +20,18 @@ import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -321,6 +324,9 @@ public MonitoringThread() { super("Container Monitor"); } + private Set ioContainersToBeMonitored = + new HashSet(); + @Override public void run() { @@ -345,6 +351,7 @@ public void run() { ProcessTreeInfo processTreeInfo = entry.getValue(); LOG.info("Starting resource-monitoring for " + containerId); trackingContainers.put(containerId, processTreeInfo); + ioContainersToBeMonitored.add(containerId); } containersToBeAdded.clear(); } @@ -385,6 +392,24 @@ public void run() { ResourceCalculatorProcessTree.getResourceCalculatorProcessTree(pId, processTreeClass, conf); ptInfo.setPid(pId); ptInfo.setProcessTree(pt); + + // monitor disk I/O usage + if (ioContainersToBeMonitored.contains(containerId)) { + final String[] command = new String[] { + "pidstat -d -p " + pId + " -T ALL 1 > logs/userlogs/" + containerId.getApplicationAttemptId().getApplicationId() + "/" + containerId + "/io.log"}; + new Thread() { + @Override + public void run() { + Shell.ShellCommandExecutor shExec = new Shell.ShellCommandExecutor(command); + try { + shExec.execute(); + } catch(Exception e) { + LOG.error("catch exception when start io monitor", e); + } + } + }.start(); + ioContainersToBeMonitored.remove(containerId); + } } } // End of initializing any uninitialized processTrees diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java index 3da21f0..e07cee7 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java @@ -43,6 +43,9 @@ @Metric("Current allocated Virtual Cores") MutableGaugeInt allocatedVCores; @Metric MutableGaugeInt availableVCores; + @Metric("Current allocated Virtual Disks") + MutableGaugeInt allocatedVDisks; + @Metric MutableGaugeInt availableVDisks; public static NodeManagerMetrics create() { return create(DefaultMetricsSystem.instance()); @@ -93,6 +96,8 @@ public void allocateContainer(Resource res) { availableGB.decr(res.getMemory() / 1024); allocatedVCores.incr(res.getVirtualCores()); availableVCores.decr(res.getVirtualCores()); + allocatedVDisks.incr(res.getVirtualDisks()); + availableVDisks.decr(res.getVirtualDisks()); } public void releaseContainer(Resource res) { @@ -101,11 +106,14 @@ public void releaseContainer(Resource res) { availableGB.incr(res.getMemory() / 1024); allocatedVCores.decr(res.getVirtualCores()); availableVCores.incr(res.getVirtualCores()); + allocatedVDisks.decr(res.getVirtualDisks()); + availableVDisks.incr(res.getVirtualDisks()); } public void addResource(Resource res) { availableGB.incr(res.getMemory() / 1024); availableVCores.incr(res.getVirtualCores()); + availableVDisks.incr(res.getVirtualDisks()); } public int getRunningContainers() { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java index 63039d8..10bab9e 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java @@ -57,11 +57,13 @@ private String cgroupMountPath; private boolean cpuWeightEnabled = true; + private boolean vdisksWeightEnabled = true; private boolean strictResourceUsageMode = false; private final String MTAB_FILE = "/proc/mounts"; private final String CGROUPS_FSTYPE = "cgroup"; private final String CONTROLLER_CPU = "cpu"; + private final String CONTROLLER_VDISKS = "blkio"; private final String CPU_PERIOD_US = "cfs_period_us"; private final String CPU_QUOTA_US = "cfs_quota_us"; private final int CPU_DEFAULT_WEIGHT = 1024; // set by kernel @@ -135,6 +137,8 @@ void init(LinuxContainerExecutor lce, ResourceCalculatorPlugin plugin) ArrayList cgroupKVs = new ArrayList(); cgroupKVs.add(CONTROLLER_CPU + "=" + cgroupMountPath + "/" + CONTROLLER_CPU); + cgroupKVs.add(CONTROLLER_VDISKS + "=" + cgroupMountPath + "/" + + CONTROLLER_VDISKS); lce.mountCgroups(cgroupKVs, cgroupPrefix); } @@ -211,6 +215,10 @@ boolean isCpuWeightEnabled() { return this.cpuWeightEnabled; } + boolean isVdisksWeightEnabled() { + return this.vdisksWeightEnabled; + } + /* * Next four functions are for an individual cgroup. */ @@ -318,12 +326,27 @@ private void setupLimits(ContainerId containerId, } } } + + if (isVdisksWeightEnabled()) { + createCgroup(CONTROLLER_VDISKS, containerName); + // The allowed weight is [100, 1000] for cgroups + int nodeVdisks = conf.getInt(YarnConfiguration.NM_VDISKS, + YarnConfiguration.DEFAULT_NM_VDISKS); + int vdisksShares = 100 + + (int)((900.0) * containerResource.getVirtualDisks() / nodeVdisks); + updateCgroup(CONTROLLER_VDISKS, containerName, "weight", + String.valueOf(vdisksShares)); + } } private void clearLimits(ContainerId containerId) { if (isCpuWeightEnabled()) { deleteCgroup(pathForCgroup(CONTROLLER_CPU, containerId.toString())); } + + if (isVdisksWeightEnabled()) { + deleteCgroup(pathForCgroup(CONTROLLER_VDISKS, containerId.toString())); + } } /* @@ -349,6 +372,11 @@ public String getResourcesOption(ContainerId containerId) { sb.append(","); } + if (isVdisksWeightEnabled()) { + sb.append(pathForCgroup(CONTROLLER_VDISKS, containerName) + "/tasks"); + sb.append(","); + } + if (sb.charAt(sb.length() - 1) == ',') { sb.deleteCharAt(sb.length() - 1); } @@ -438,6 +466,24 @@ private void initializeControllerPaths() throws IOException { throw new IOException("Not able to enforce cpu weights; cannot find " + "cgroup for cpu controller in " + getMtabFileName()); } + + // VDisks + + controllerPath = findControllerInMtab(CONTROLLER_VDISKS, parsedMtab); + + if (controllerPath != null) { + File f = new File(controllerPath + "/" + this.cgroupPrefix); + + if (FileUtil.canWrite(f)) { + controllerPaths.put(CONTROLLER_VDISKS, controllerPath); + } else { + throw new IOException("Not able to enfore vdisks weights; cannot write " + + "to cgroup at: " + controllerPath); + } + } else { + throw new IOException("Not able to enforce vdisks weights; cannot find " + + "cgroup for blkio controller in " + MTAB_FILE); + } } @VisibleForTesting diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 5c2dd2c..ec8b41d 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -225,7 +225,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) ContainerId.newInstance(appAttemptID, heartBeatID); ContainerLaunchContext launchContext = recordFactory .newRecordInstance(ContainerLaunchContext.class); - Resource resource = BuilderUtils.newResource(2, 1); + Resource resource = BuilderUtils.newResource(2, 1, 1); long currentTime = System.currentTimeMillis(); String user = "testUser"; ContainerTokenIdentifier containerToken = BuilderUtils @@ -257,7 +257,7 @@ public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) .newRecordInstance(ContainerLaunchContext.class); long currentTime = System.currentTimeMillis(); String user = "testUser"; - Resource resource = BuilderUtils.newResource(3, 1); + Resource resource = BuilderUtils.newResource(3, 1, 2); ContainerTokenIdentifier containerToken = BuilderUtils .newContainerTokenIdentifier(BuilderUtils.newContainerToken( secondContainerID, InetAddress.getByName("localhost") diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java index a54450d..3f87e40 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/metrics/TestNodeManagerMetrics.java @@ -32,10 +32,11 @@ Resource total = Records.newRecord(Resource.class); total.setMemory(8*GiB); total.setVirtualCores(16); + total.setVirtualDisks(20); Resource resource = Records.newRecord(Resource.class); resource.setMemory(1*GiB); resource.setVirtualCores(2); - + resource.setVirtualDisks(3); metrics.addResource(total); @@ -60,12 +61,14 @@ metrics.initingContainer(); metrics.runningContainer(); - checkMetrics(5, 1, 1, 1, 1, 1, 2, 2, 6, 4, 12); + checkMetrics(5, 1, 1, 1, 1, 1, 2, 2, 6, 4, 12, 6, 14); } private void checkMetrics(int launched, int completed, int failed, int killed, int initing, int running, int allocatedGB, - int allocatedContainers, int availableGB, int allocatedVCores, int availableVCores) { + int allocatedContainers, int availableGB, + int allocatedVCores, int availableVCores, + int allocatedVDisks, int availableVDisks) { MetricsRecordBuilder rb = getMetrics("NodeManagerMetrics"); assertCounter("ContainersLaunched", launched, rb); assertCounter("ContainersCompleted", completed, rb); @@ -75,9 +78,10 @@ private void checkMetrics(int launched, int completed, int failed, int killed, assertGauge("ContainersRunning", running, rb); assertGauge("AllocatedGB", allocatedGB, rb); assertGauge("AllocatedVCores", allocatedVCores, rb); + assertGauge("AllocatedVDisks", allocatedVDisks, rb); assertGauge("AllocatedContainers", allocatedContainers, rb); assertGauge("AvailableGB", availableGB, rb); assertGauge("AvailableVCores",availableVCores, rb); - + assertGauge("AvailableVDisks", availableVDisks, rb); } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index 4beb895..996135e 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -99,6 +99,7 @@ private int minAllocMb; private int minAllocVcores; + private int minAllocVdisks; static { resync.setNodeAction(NodeAction.RESYNC); @@ -144,6 +145,9 @@ protected void serviceInit(Configuration conf) throws Exception { minAllocVcores = conf.getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + minAllocVdisks = conf.getInt( + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); minimumNodeManagerVersion = conf.get( YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION, @@ -285,7 +289,8 @@ public RegisterNodeManagerResponse registerNodeManager( // Check if this node has minimum allocations if (capability.getMemory() < minAllocMb - || capability.getVirtualCores() < minAllocVcores) { + || capability.getVirtualCores() < minAllocVcores + || capability.getVirtualDisks() < minAllocVdisks) { String message = "NodeManager from " + host + " doesn't satisfy minimum allocations, Sending SHUTDOWN" diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceType.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceType.java index 9dd245b..a6ebcf2 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceType.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceType.java @@ -24,5 +24,5 @@ @Private @Evolving public enum ResourceType { - MEMORY, CPU + MEMORY, CPU, DISKIO } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java index 230f9a9..827f65f 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java @@ -28,9 +28,10 @@ private float[] weights = new float[ResourceType.values().length]; - public ResourceWeights(float memoryWeight, float cpuWeight) { + public ResourceWeights(float memoryWeight, float cpuWeight, float vdiskWeight) { weights[ResourceType.MEMORY.ordinal()] = memoryWeight; weights[ResourceType.CPU.ordinal()] = cpuWeight; + weights[ResourceType.DISKIO.ordinal()] = vdiskWeight; } public ResourceWeights(float weight) { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java index 507b798..c1aa37a 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java @@ -43,7 +43,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.utils.BuilderUtils; -import org.apache.hadoop.yarn.util.resource.Resources; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,16 +60,20 @@ @Metric("Allocated memory in MB") MutableGaugeInt allocatedMB; @Metric("Allocated CPU in virtual cores") MutableGaugeInt allocatedVCores; + @Metric("Allocated disk I/O in virtual disks") MutableGaugeInt allocatedVDisks; @Metric("# of allocated containers") MutableGaugeInt allocatedContainers; @Metric("Aggregate # of allocated containers") MutableCounterLong aggregateContainersAllocated; @Metric("Aggregate # of released containers") MutableCounterLong aggregateContainersReleased; @Metric("Available memory in MB") MutableGaugeInt availableMB; @Metric("Available CPU in virtual cores") MutableGaugeInt availableVCores; + @Metric("Available disk I/O in virtual disks") MutableGaugeInt availableVDisks; @Metric("Pending memory allocation in MB") MutableGaugeInt pendingMB; @Metric("Pending CPU allocation in virtual cores") MutableGaugeInt pendingVCores; + @Metric("Pending disk I/O allocation in virtual disks") MutableGaugeInt pendingVDisks; @Metric("# of pending containers") MutableGaugeInt pendingContainers; @Metric("# of reserved memory in MB") MutableGaugeInt reservedMB; @Metric("Reserved CPU in virtual cores") MutableGaugeInt reservedVCores; + @Metric("Reserved disk I/O in virtual disks") MutableGaugeInt reservedVDisks; @Metric("# of reserved containers") MutableGaugeInt reservedContainers; @Metric("# of active users") MutableGaugeInt activeUsers; @Metric("# of active applications") MutableGaugeInt activeApplications; @@ -319,6 +322,7 @@ public void moveAppTo(AppSchedulingInfo app) { public void setAvailableResourcesToQueue(Resource limit) { availableMB.set(limit.getMemory()); availableVCores.set(limit.getVirtualCores()); + availableVDisks.set(limit.getVirtualDisks()); } /** @@ -356,6 +360,7 @@ private void _incrPendingResources(int containers, Resource res) { pendingContainers.incr(containers); pendingMB.incr(res.getMemory() * containers); pendingVCores.incr(res.getVirtualCores() * containers); + pendingVDisks.incr(res.getVirtualDisks() * containers); } public void decrPendingResources(String user, int containers, Resource res) { @@ -373,6 +378,7 @@ private void _decrPendingResources(int containers, Resource res) { pendingContainers.decr(containers); pendingMB.decr(res.getMemory() * containers); pendingVCores.decr(res.getVirtualCores() * containers); + pendingVDisks.decr(res.getVirtualDisks() * containers); } public void allocateResources(String user, int containers, Resource res, @@ -381,6 +387,7 @@ public void allocateResources(String user, int containers, Resource res, aggregateContainersAllocated.incr(containers); allocatedMB.incr(res.getMemory() * containers); allocatedVCores.incr(res.getVirtualCores() * containers); + allocatedVDisks.incr(res.getVirtualDisks() * containers); if (decrPending) { _decrPendingResources(containers, res); } @@ -398,6 +405,7 @@ public void releaseResources(String user, int containers, Resource res) { aggregateContainersReleased.incr(containers); allocatedMB.decr(res.getMemory() * containers); allocatedVCores.decr(res.getVirtualCores() * containers); + allocatedVDisks.decr(res.getVirtualDisks() * containers); QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { userMetrics.releaseResources(user, containers, res); @@ -411,6 +419,7 @@ public void reserveResource(String user, Resource res) { reservedContainers.incr(); reservedMB.incr(res.getMemory()); reservedVCores.incr(res.getVirtualCores()); + reservedVDisks.incr(res.getVirtualDisks()); QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { userMetrics.reserveResource(user, res); @@ -424,6 +433,7 @@ public void unreserveResource(String user, Resource res) { reservedContainers.decr(); reservedMB.decr(res.getMemory()); reservedVCores.decr(res.getVirtualCores()); + reservedVDisks.decr(res.getVirtualDisks()); QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { userMetrics.unreserveResource(user, res); @@ -488,7 +498,8 @@ public int getAppsFailed() { } public Resource getAllocatedResources() { - return BuilderUtils.newResource(allocatedMB.value(), allocatedVCores.value()); + return BuilderUtils.newResource( + allocatedMB.value(), allocatedVCores.value(), allocatedVDisks.value()); } public int getAllocatedMB() { @@ -499,6 +510,10 @@ public int getAllocatedVirtualCores() { return allocatedVCores.value(); } + public int getAllocatedVirtualDisks() { + return allocatedVDisks.value(); + } + public int getAllocatedContainers() { return allocatedContainers.value(); } @@ -511,6 +526,10 @@ public int getAvailableVirtualCores() { return availableVCores.value(); } + public int getAvailableVirtualDisks() { + return availableVDisks.value(); + } + public int getPendingMB() { return pendingMB.value(); } @@ -519,6 +538,10 @@ public int getPendingVirtualCores() { return pendingVCores.value(); } + public int getPendingVirtualDisks() { + return pendingVDisks.value(); + } + public int getPendingContainers() { return pendingContainers.value(); } @@ -531,6 +554,10 @@ public int getReservedVirtualCores() { return reservedVCores.value(); } + public int getReservedVirtualDisks() { + return reservedVDisks.value(); + } + public int getReservedContainers() { return reservedContainers.value(); } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index 84975b6..26d2027 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -86,9 +86,9 @@ private final Multiset reReservations = HashMultiset.create(); - protected final Resource currentReservation = Resource.newInstance(0, 0); - private Resource resourceLimit = Resource.newInstance(0, 0); - protected Resource currentConsumption = Resource.newInstance(0, 0); + protected final Resource currentReservation = Resource.newInstance(0, 0, 0); + private Resource resourceLimit = Resource.newInstance(0, 0, 0); + protected Resource currentConsumption = Resource.newInstance(0, 0, 0); private Resource amResource = Resources.none(); private boolean unmanagedAM = true; private boolean amRunning = false; diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java index f4d8731..7fc5284 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java @@ -48,8 +48,8 @@ private static final Log LOG = LogFactory.getLog(SchedulerNode.class); - private Resource availableResource = Resource.newInstance(0, 0); - private Resource usedResource = Resource.newInstance(0, 0); + private Resource availableResource = Resource.newInstance(0, 0, 0); + private Resource usedResource = Resource.newInstance(0, 0, 0); private Resource totalResourceCapability; private RMContainer reservedContainer; private volatile int numContainers; diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index 5d00009..8598029 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -190,7 +190,7 @@ public static void normalizeRequest( /** * Utility method to validate a resource request, by insuring that the - * requested memory/vcore is non-negative and not greater than max + * requested memory/vcore/vdisks is non-negative and not greater than max * * @throws InvalidResourceRequestException when there is invalid * request @@ -250,6 +250,16 @@ public static void validateResourceRequest(ResourceRequest resReq, .getAccessibleNodeLabels().iterator(), ','))); } } + if (resReq.getCapability().getVirtualDisks() < 0 || + resReq.getCapability().getVirtualDisks() > + maximumResource.getVirtualDisks()) { + throw new InvalidResourceRequestException("Invalid resource request" + + ", requested virtual disks < 0" + + ", or requested virtual disks > max configured" + + ", requestedVirtualDisks=" + + resReq.getCapability().getVirtualDisks() + + ", maxVirtualDisks=" + maximumResource.getVirtualDisks()); + } } public static boolean checkQueueAccessToNode(Set queueLabels, diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index b36172c..bccc3ac 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -590,7 +590,10 @@ public Resource getMinimumAllocation() { int minimumCores = getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); - return Resources.createResource(minimumMemory, minimumCores); + int minimumVdisks = getInt( + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); + return Resources.createResource(minimumMemory, minimumCores, minimumVdisks); } public Resource getMaximumAllocation() { @@ -600,7 +603,10 @@ public Resource getMaximumAllocation() { int maximumCores = getInt( YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); - return Resources.createResource(maximumMemory, maximumCores); + int maximumVdisks = getInt( + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS); + return Resources.createResource(maximumMemory, maximumCores, maximumVdisks); } public boolean getEnableUserMetrics() { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java index 70a6496..ce76837 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java @@ -27,7 +27,9 @@ import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType; import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; import org.apache.hadoop.yarn.util.resource.Resources; import com.google.common.annotations.VisibleForTesting; @@ -79,6 +81,10 @@ private final Map schedulingPolicies; private final SchedulingPolicy defaultSchedulingPolicy; + + // Which types of resources are used to calculate DRF + private final Set drfPolicyResourcesDefault; + private final Map> drfPolicyResourcesMap; // Policy for mapping apps to queues @VisibleForTesting @@ -96,6 +102,8 @@ public AllocationConfiguration(Map minQueueResources, int queueMaxAppsDefault, float queueMaxAMShareDefault, Map schedulingPolicies, SchedulingPolicy defaultSchedulingPolicy, + Map> drfPolicyResourcesMap, + Set drfPolicyResourcesDefault, Map minSharePreemptionTimeouts, Map fairSharePreemptionTimeouts, Map fairSharePreemptionThresholds, @@ -112,6 +120,8 @@ public AllocationConfiguration(Map minQueueResources, this.queueMaxAppsDefault = queueMaxAppsDefault; this.queueMaxAMShareDefault = queueMaxAMShareDefault; this.defaultSchedulingPolicy = defaultSchedulingPolicy; + this.drfPolicyResourcesMap = drfPolicyResourcesMap; + this.drfPolicyResourcesDefault = drfPolicyResourcesDefault; this.schedulingPolicies = schedulingPolicies; this.minSharePreemptionTimeouts = minSharePreemptionTimeouts; this.fairSharePreemptionTimeouts = fairSharePreemptionTimeouts; @@ -143,6 +153,10 @@ public AllocationConfiguration(Configuration conf) { } placementPolicy = QueuePlacementPolicy.fromConfiguration(conf, configuredQueues); + drfPolicyResourcesDefault = new HashSet(); + drfPolicyResourcesDefault.add(ResourceType.CPU); + drfPolicyResourcesDefault.add(ResourceType.MEMORY); + drfPolicyResourcesMap = new HashMap>(); } /** @@ -246,9 +260,19 @@ public boolean hasAccess(String queueName, QueueACL acl, return false; } - public SchedulingPolicy getSchedulingPolicy(String queueName) { - SchedulingPolicy policy = schedulingPolicies.get(queueName); - return (policy == null) ? defaultSchedulingPolicy : policy; + public SchedulingPolicy getSchedulingPolicy(String queueName) + throws AllocationConfigurationException { + SchedulingPolicy policy = schedulingPolicies.containsKey(queueName) ? + schedulingPolicies.get(queueName) : defaultSchedulingPolicy; + if (policy instanceof DominantResourceFairnessPolicy) { + Set enabledResources = getDRFPolicyEnabledResources(queueName); + if (enabledResources == null || enabledResources.isEmpty()) { + enabledResources = drfPolicyResourcesDefault; + } + ((DominantResourceFairnessPolicy) policy) + .setEnabledResourceTypes(enabledResources); + } + return policy; } public SchedulingPolicy getDefaultSchedulingPolicy() { @@ -262,4 +286,9 @@ public SchedulingPolicy getDefaultSchedulingPolicy() { public QueuePlacementPolicy getPlacementPolicy() { return placementPolicy; } + + private Set getDRFPolicyEnabledResources(String queueName) { + return drfPolicyResourcesMap.containsKey(queueName) ? + drfPolicyResourcesMap.get(queueName) : drfPolicyResourcesDefault; + } } \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index 2022510..4b8ccd3 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -1,20 +1,20 @@ /** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; import java.io.File; @@ -40,6 +40,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType; import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.SystemClock; @@ -56,10 +57,10 @@ @Public @Unstable public class AllocationFileLoaderService extends AbstractService { - + public static final Log LOG = LogFactory.getLog( AllocationFileLoaderService.class.getName()); - + /** Time to wait between checks of the allocation file */ public static final long ALLOC_RELOAD_INTERVAL_MS = 10 * 1000; @@ -75,28 +76,28 @@ private long lastSuccessfulReload; // Last time we successfully reloaded queues private boolean lastReloadAttemptFailed = false; - + // Path to XML file containing allocations. private File allocFile; - + private Listener reloadListener; - + @VisibleForTesting long reloadIntervalMs = ALLOC_RELOAD_INTERVAL_MS; - + private Thread reloadThread; private volatile boolean running = true; - + public AllocationFileLoaderService() { this(new SystemClock()); } - + public AllocationFileLoaderService(Clock clock) { super(AllocationFileLoaderService.class.getName()); this.clock = clock; - + } - + @Override public void serviceInit(Configuration conf) throws Exception { this.allocFile = getAllocationFile(conf); @@ -140,7 +141,7 @@ public void run() { } super.serviceInit(conf); } - + @Override public void serviceStart() throws Exception { if (reloadThread != null) { @@ -148,7 +149,7 @@ public void serviceStart() throws Exception { } super.serviceStart(); } - + @Override public void serviceStop() throws Exception { running = false; @@ -162,7 +163,7 @@ public void serviceStop() throws Exception { } super.serviceStop(); } - + /** * Path to XML file containing allocations. If the * path is relative, it is searched for in the @@ -187,11 +188,11 @@ public File getAllocationFile(Configuration conf) { } return allocFile; } - + public synchronized void setReloadListener(Listener reloadListener) { this.reloadListener = reloadListener; } - + /** * Updates the allocation list from the allocation config file. This file is * expected to be in the XML format specified in the design doc. @@ -216,6 +217,8 @@ public synchronized void reloadAllocations() throws IOException, Map queueMaxAMShares = new HashMap(); Map queueWeights = new HashMap(); Map queuePolicies = new HashMap(); + Map> drfPolicyResourcesMap = + new HashMap>(); Map minSharePreemptionTimeouts = new HashMap(); Map fairSharePreemptionTimeouts = new HashMap(); Map fairSharePreemptionThresholds = @@ -229,6 +232,9 @@ public synchronized void reloadAllocations() throws IOException, long defaultMinSharePreemptionTimeout = Long.MAX_VALUE; float defaultFairSharePreemptionThreshold = 0.5f; SchedulingPolicy defaultSchedPolicy = SchedulingPolicy.DEFAULT_POLICY; + Set drfPolicyResourcesDefault = new HashSet(); + drfPolicyResourcesDefault.add(ResourceType.CPU); + drfPolicyResourcesDefault.add(ResourceType.MEMORY); QueuePlacementPolicy newPlacementPolicy = null; @@ -244,7 +250,7 @@ public synchronized void reloadAllocations() throws IOException, // Read and parse the allocations file. DocumentBuilderFactory docBuilderFactory = - DocumentBuilderFactory.newInstance(); + DocumentBuilderFactory.newInstance(); docBuilderFactory.setIgnoringComments(true); DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); Document doc = builder.parse(allocFile); @@ -260,7 +266,7 @@ public synchronized void reloadAllocations() throws IOException, if (node instanceof Element) { Element element = (Element)node; if ("queue".equals(element.getTagName()) || - "pool".equals(element.getTagName())) { + "pool".equals(element.getTagName())) { queueElements.add(element); } else if ("user".equals(element.getTagName())) { String userName = element.getAttribute("name"); @@ -317,6 +323,13 @@ public synchronized void reloadAllocations() throws IOException, defaultSchedPolicy = SchedulingPolicy.parse(text); } else if ("queuePlacementPolicy".equals(element.getTagName())) { placementPolicyElement = element; + } else if ("DRFPolicyResourcesDefault".equals(element.getTagName())) { + String text = ((Text)element.getFirstChild()).getData().trim(); + drfPolicyResourcesDefault = getResourceTypes(text); + if (drfPolicyResourcesDefault.isEmpty()) { + throw new AllocationConfigurationException("The default DRF policy " + + "resources can not be null."); + } } else { LOG.warn("Bad element in allocations file: " + element.getTagName()); } @@ -336,8 +349,9 @@ public synchronized void reloadAllocations() throws IOException, } loadQueue(parent, element, minQueueResources, maxQueueResources, queueMaxApps, userMaxApps, queueMaxAMShares, queueWeights, - queuePolicies, minSharePreemptionTimeouts, fairSharePreemptionTimeouts, - fairSharePreemptionThresholds, queueAcls, configuredQueues); + queuePolicies, drfPolicyResourcesMap, minSharePreemptionTimeouts, + fairSharePreemptionTimeouts, fairSharePreemptionThresholds, queueAcls, + configuredQueues); } // Load placement policy and pass it configured queues @@ -370,30 +384,32 @@ public synchronized void reloadAllocations() throws IOException, maxQueueResources, queueMaxApps, userMaxApps, queueWeights, queueMaxAMShares, userMaxAppsDefault, queueMaxAppsDefault, queueMaxAMShareDefault, queuePolicies, defaultSchedPolicy, + drfPolicyResourcesMap, drfPolicyResourcesDefault, minSharePreemptionTimeouts, fairSharePreemptionTimeouts, fairSharePreemptionThresholds, queueAcls, newPlacementPolicy, configuredQueues); - + lastSuccessfulReload = clock.getTime(); lastReloadAttemptFailed = false; reloadListener.onReload(info); } - + /** * Loads a queue from a queue element in the configuration file */ private void loadQueue(String parentName, Element element, - Map minQueueResources, - Map maxQueueResources, Map queueMaxApps, - Map userMaxApps, Map queueMaxAMShares, - Map queueWeights, - Map queuePolicies, - Map minSharePreemptionTimeouts, - Map fairSharePreemptionTimeouts, - Map fairSharePreemptionThresholds, - Map> queueAcls, - Map> configuredQueues) + Map minQueueResources, + Map maxQueueResources, Map queueMaxApps, + Map userMaxApps, Map queueMaxAMShares, + Map queueWeights, + Map queuePolicies, + Map> drfResourcesMap, + Map minSharePreemptionTimeouts, + Map fairSharePreemptionTimeouts, + Map fairSharePreemptionThresholds, + Map> queueAcls, + Map> configuredQueues) throws AllocationConfigurationException { String queueName = element.getAttribute("name"); if (parentName != null) { @@ -454,15 +470,21 @@ private void loadQueue(String parentName, Element element, } else if ("aclAdministerApps".equals(field.getTagName())) { String text = ((Text)field.getFirstChild()).getData(); acls.put(QueueACL.ADMINISTER_QUEUE, new AccessControlList(text)); - } else if ("queue".endsWith(field.getTagName()) || + } else if ("queue".endsWith(field.getTagName()) || "pool".equals(field.getTagName())) { loadQueue(queueName, field, minQueueResources, maxQueueResources, queueMaxApps, userMaxApps, queueMaxAMShares, queueWeights, - queuePolicies, minSharePreemptionTimeouts, + queuePolicies, drfResourcesMap, minSharePreemptionTimeouts, fairSharePreemptionTimeouts, fairSharePreemptionThresholds, queueAcls, configuredQueues); configuredQueues.get(FSQueueType.PARENT).add(queueName); isLeaf = false; + }else if ("DRFPolicyResources".equals(field.getTagName())) { + String text = ((Text) field.getFirstChild()).getData(); + Set < ResourceType > drfResources = getResourceTypes(text); + if (drfResources != null && !drfResources.isEmpty()) { + drfResourcesMap.put(queueName, drfResources); + } } } if (isLeaf) { @@ -478,16 +500,28 @@ private void loadQueue(String parentName, Element element, if (maxQueueResources.containsKey(queueName) && minQueueResources.containsKey(queueName) && !Resources.fitsIn(minQueueResources.get(queueName), - maxQueueResources.get(queueName))) { + maxQueueResources.get(queueName))) { LOG.warn( String.format( "Queue %s has max resources %s less than min resources %s", - queueName, maxQueueResources.get(queueName), + queueName, maxQueueResources.get(queueName), minQueueResources.get(queueName))); } } - + public interface Listener { public void onReload(AllocationConfiguration info); } -} + + protected Set getResourceTypes(String value) { + Set resourceTypes = new HashSet(); + String[] types = value.split(","); + for (String type : types) { + try { + ResourceType rType = ResourceType.valueOf(type.trim()); + resourceTypes.add(rType); + } catch (Exception e) {} + } + return resourceTypes; + } +} \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java index b9966e7..c69cb25 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java @@ -408,6 +408,7 @@ public void resetPreemptedResources() { public void clearPreemptedResources() { preemptedResources.setMemory(0); preemptedResources.setVirtualCores(0); + preemptedResources.setVirtualDisks(0); } /** diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java index 345ea8b..bd8d9dd 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java @@ -68,7 +68,7 @@ public FSLeafQueue(String name, FairScheduler scheduler, this.lastTimeAtMinShare = scheduler.getClock().getTime(); this.lastTimeAtFairShareThreshold = scheduler.getClock().getTime(); activeUsersManager = new ActiveUsersManager(getMetrics()); - amResourceUsage = Resource.newInstance(0, 0); + amResourceUsage = Resource.newInstance(0, 0, 0); } public void addApp(FSAppAttempt app, boolean runnable) { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java index 82c422b..a2cd6ba 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java @@ -33,12 +33,16 @@ @Metric("Fair share of memory in MB") MutableGaugeInt fairShareMB; @Metric("Fair share of CPU in vcores") MutableGaugeInt fairShareVCores; + @Metric("Fair share of disk IO in vdisks") MutableGaugeInt fairShareVDisks; @Metric("Steady fair share of memory in MB") MutableGaugeInt steadyFairShareMB; @Metric("Steady fair share of CPU in vcores") MutableGaugeInt steadyFairShareVCores; + @Metric("Steady fair share of disk IO in vdisks") MutableGaugeInt steadyFairShareVDisks; @Metric("Minimum share of memory in MB") MutableGaugeInt minShareMB; @Metric("Minimum share of CPU in vcores") MutableGaugeInt minShareVCores; + @Metric("Minimum share of disk IO in vdisks") MutableGaugeInt minShareVDisks; @Metric("Maximum share of memory in MB") MutableGaugeInt maxShareMB; @Metric("Maximum share of CPU in vcores") MutableGaugeInt maxShareVCores; + @Metric("Maximum share of disk IO in vdisks") MutableGaugeInt maxShareVDisks; FSQueueMetrics(MetricsSystem ms, String queueName, Queue parent, boolean enableUserMetrics, Configuration conf) { @@ -48,6 +52,7 @@ public void setFairShare(Resource resource) { fairShareMB.set(resource.getMemory()); fairShareVCores.set(resource.getVirtualCores()); + fairShareVDisks.set(resource.getVirtualDisks()); } public int getFairShareMB() { @@ -61,6 +66,7 @@ public int getFairShareVirtualCores() { public void setSteadyFairShare(Resource resource) { steadyFairShareMB.set(resource.getMemory()); steadyFairShareVCores.set(resource.getVirtualCores()); + steadyFairShareVDisks.set(resource.getVirtualDisks()); } public int getSteadyFairShareMB() { @@ -71,9 +77,14 @@ public int getSteadyFairShareVCores() { return steadyFairShareVCores.value(); } + public int getSteadyFairShareVDisks() { + return steadyFairShareVDisks.value(); + } + public void setMinShare(Resource resource) { minShareMB.set(resource.getMemory()); minShareVCores.set(resource.getVirtualCores()); + minShareVDisks.set(resource.getVirtualDisks()); } public int getMinShareMB() { @@ -83,10 +94,15 @@ public int getMinShareMB() { public int getMinShareVirtualCores() { return minShareVCores.value(); } - + + public int getMinShareVirtualDisks() { + return minShareVDisks.value(); + } + public void setMaxShare(Resource resource) { maxShareMB.set(resource.getMemory()); maxShareVCores.set(resource.getVirtualCores()); + maxShareVDisks.set(resource.getVirtualDisks()); } public int getMaxShareMB() { @@ -96,7 +112,11 @@ public int getMaxShareMB() { public int getMaxShareVirtualCores() { return maxShareVCores.value(); } - + + public int getMaxShareVirtualDisks() { + return maxShareVDisks.value(); + } + public synchronized static FSQueueMetrics forQueue(String queueName, Queue parent, boolean enableUserMetrics, Configuration conf) { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 3fc3019..a72f789 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -231,6 +231,24 @@ private void validateConf(Configuration conf) { + "=" + maxVcores + ", min should equal greater than 0" + ", max should be no smaller than min."); } + + // validate scheduler vdisks allocation setting + int minVdisks = conf.getInt( + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); + int maxVdisks = conf.getInt( + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS); + + if (minVdisks < 0 || minVdisks > maxVdisks) { + throw new YarnRuntimeException("Invalid resource scheduler vdisks" + + " allocation configuration" + + ", " + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS + + "=" + minVdisks + + ", " + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS + + "=" + maxVdisks + ", min should equal greater than 0" + + ", max should be no smaller than min."); + } } public FairSchedulerConfiguration getConf() { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java index 32ef906..7504814 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java @@ -48,6 +48,9 @@ public static final String RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES = YarnConfiguration.YARN_PREFIX + "scheduler.increment-allocation-vcores"; public static final int DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES = 1; + public static final String RM_SCHEDULER_INCREMENT_ALLOCATION_VDISKS = + YarnConfiguration.YARN_PREFIX + "scheduler.increment-allocation-vdisks"; + public static final int DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VDISKS = 1; private static final String CONF_PREFIX = "yarn.scheduler.fair."; @@ -143,7 +146,10 @@ public Resource getMinimumAllocation() { int cpu = getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); - return Resources.createResource(mem, cpu); + int vdisks = getInt( + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); + return Resources.createResource(mem, cpu, vdisks); } public Resource getMaximumAllocation() { @@ -153,7 +159,10 @@ public Resource getMaximumAllocation() { int cpu = getInt( YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); - return Resources.createResource(mem, cpu); + int vdisks = getInt( + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS); + return Resources.createResource(mem, cpu, vdisks); } public Resource getIncrementAllocation() { @@ -163,7 +172,11 @@ public Resource getIncrementAllocation() { int incrementCores = getInt( RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES); - return Resources.createResource(incrementMemory, incrementCores); + int incrementVdisks = getInt( + RM_SCHEDULER_INCREMENT_ALLOCATION_VDISKS, + DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_VDISKS); + return Resources.createResource( + incrementMemory, incrementCores, incrementVdisks); } public float getLocalityThresholdNode() { @@ -234,7 +247,8 @@ public boolean getUsePortForNodeName() { /** * Parses a resource config value of a form like "1024", "1024 mb", - * or "1024 mb, 3 vcores". If no units are given, megabytes are assumed. + * "1024 mb, 3 vcores", or "1024mb, 3 vcores, 1 vdisks". If no units are + * given, megabytes are assumed. * * @throws AllocationConfigurationException */ @@ -244,7 +258,8 @@ public static Resource parseResourceConfigValue(String val) val = val.toLowerCase(); int memory = findResource(val, "mb"); int vcores = findResource(val, "vcores"); - return BuilderUtils.newResource(memory, vcores); + int vdisks = findResource(val, "vdisks"); + return BuilderUtils.newResource(memory, vcores, vdisks); } catch (AllocationConfigurationException ex) { throw ex; } catch (Exception ex) { @@ -261,9 +276,11 @@ private static int findResource(String val, String units) throws AllocationConfigurationException { Pattern pattern = Pattern.compile("(\\d+)\\s*" + units); Matcher matcher = pattern.matcher(val); - if (!matcher.find()) { + + if (!matcher.find() && !units.equals("vdisks")) { throw new AllocationConfigurationException("Missing resource: " + units); + } else { + return matcher.find(0) ? Integer.parseInt(matcher.group(1)) : 0; } - return Integer.parseInt(matcher.group(1)); } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java index 3bea985..c981e1b 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java @@ -257,6 +257,8 @@ private static int getResourceValue(Resource resource, ResourceType type) { return resource.getMemory(); case CPU: return resource.getVirtualCores(); + case DISKIO: + return resource.getVirtualDisks(); default: throw new IllegalArgumentException("Invalid resource"); } @@ -270,6 +272,9 @@ private static void setResourceValue(int val, Resource resource, ResourceType ty case CPU: resource.setVirtualCores(val); break; + case DISKIO: + resource.setVirtualDisks(val); + break; default: throw new IllegalArgumentException("Invalid resource"); } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java index 3f6cbd1..c17560e 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java @@ -20,6 +20,7 @@ import java.util.Collection; import java.util.Comparator; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; @@ -47,6 +48,8 @@ private DominantResourceFairnessComparator comparator = new DominantResourceFairnessComparator(); + private Set enabledResourceTypes; + @Override public String getName() { return NAME; @@ -108,9 +111,20 @@ public void initialize(Resource clusterCapacity) { comparator.setClusterCapacity(clusterCapacity); } - public static class DominantResourceFairnessComparator implements Comparator { - private static final int NUM_RESOURCES = ResourceType.values().length; - + /** + * Set the resources (cpu/memory/diskio) to be considered in DRF. + * + * @param enabledResourceTypes the resources to be considered in DRF. + */ + public void setEnabledResourceTypes(Set enabledResourceTypes) { + this.enabledResourceTypes = enabledResourceTypes; + } + + public Set getEnabledResourceTypes() { + return this.enabledResourceTypes; + } + + public class DominantResourceFairnessComparator implements Comparator { private Resource clusterCapacity; public void setClusterCapacity(Resource clusterCapacity) { @@ -119,13 +133,16 @@ public void setClusterCapacity(Resource clusterCapacity) { @Override public int compare(Schedulable s1, Schedulable s2) { + if (enabledResourceTypes == null || enabledResourceTypes.isEmpty()) { + return (int)(s1.getStartTime() - s2.getStartTime()); + } + ResourceWeights sharesOfCluster1 = new ResourceWeights(); ResourceWeights sharesOfCluster2 = new ResourceWeights(); ResourceWeights sharesOfMinShare1 = new ResourceWeights(); ResourceWeights sharesOfMinShare2 = new ResourceWeights(); - ResourceType[] resourceOrder1 = new ResourceType[NUM_RESOURCES]; - ResourceType[] resourceOrder2 = new ResourceType[NUM_RESOURCES]; - + ResourceType[] resourceOrder1 = new ResourceType[enabledResourceTypes.size()]; + ResourceType[] resourceOrder2 = new ResourceType[enabledResourceTypes.size()]; // Calculate shares of the cluster for each resource both schedulables. calculateShares(s1.getResourceUsage(), clusterCapacity, sharesOfCluster1, resourceOrder1, s1.getWeights()); @@ -141,7 +158,6 @@ public int compare(Schedulable s1, Schedulable s2) { // for that resource boolean s1Needy = sharesOfMinShare1.getWeight(resourceOrder1[0]) < 1.0f; boolean s2Needy = sharesOfMinShare2.getWeight(resourceOrder2[0]) < 1.0f; - int res = 0; if (!s2Needy && !s1Needy) { res = compareShares(sharesOfCluster1, sharesOfCluster2, @@ -167,22 +183,57 @@ public int compare(Schedulable s1, Schedulable s2) { * it takes up. The resourceOrder vector contains an ordering of resources * by largest share. So if resource=<10 MB, 5 CPU>, and pool=<100 MB, 10 CPU>, * shares will be [.1, .5] and resourceOrder will be [CPU, MEMORY]. + * // FIXME: update the example */ void calculateShares(Resource resource, Resource pool, ResourceWeights shares, ResourceType[] resourceOrder, ResourceWeights weights) { - shares.setWeight(MEMORY, (float)resource.getMemory() / - (pool.getMemory() * weights.getWeight(MEMORY))); - shares.setWeight(CPU, (float)resource.getVirtualCores() / - (pool.getVirtualCores() * weights.getWeight(CPU))); - // sort order vector by resource share - if (resourceOrder != null) { - if (shares.getWeight(MEMORY) > shares.getWeight(CPU)) { - resourceOrder[0] = MEMORY; - resourceOrder[1] = CPU; - } else { + if (enabledResourceTypes.contains(ResourceType.MEMORY)) { + shares.setWeight(MEMORY, (float)resource.getMemory() / + (pool.getMemory() * weights.getWeight(MEMORY))); + } + if (enabledResourceTypes.contains(ResourceType.CPU)) { + shares.setWeight(CPU, (float) resource.getVirtualCores() / + (pool.getVirtualCores() * weights.getWeight(CPU))); + } + if (enabledResourceTypes.contains(ResourceType.DISKIO)) { + shares.setWeight(DISKIO, (float) resource.getVirtualDisks() / + (pool.getVirtualDisks() * weights.getWeight(DISKIO))); + } + if (resourceOrder == null) { + return; + } + + int position = 0; + if (enabledResourceTypes.contains(ResourceType.MEMORY)) { + resourceOrder[0] = MEMORY; + position ++; + } + if (enabledResourceTypes.contains(ResourceType.CPU)) { + if (position == 0) { resourceOrder[0] = CPU; - resourceOrder[1] = MEMORY; + } else { + if (shares.getWeight(MEMORY) >= shares.getWeight(CPU)) { + resourceOrder[1] = CPU; + } else { + resourceOrder[0] = CPU; + resourceOrder[1] = MEMORY; + } + } + position ++; + } + if (enabledResourceTypes.contains(ResourceType.DISKIO)) { + int startIndex = 0; + while (startIndex < position) { + if (shares.getWeight(DISKIO) >= + shares.getWeight(resourceOrder[startIndex])) { + break; + } + startIndex ++; + } + for (int i = position; i > startIndex; i --) { + resourceOrder[i] = resourceOrder[i-1]; } + resourceOrder[startIndex] = DISKIO; } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java index 7e41e53..deeab86 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/MetricsOverviewTable.java @@ -75,6 +75,9 @@ protected void render(Block html) { th().$class("ui-state-default")._("VCores Used")._(). th().$class("ui-state-default")._("VCores Total")._(). th().$class("ui-state-default")._("VCores Reserved")._(). + th().$class("ui-state-default")._("VDisks Used")._(). + th().$class("ui-state-default")._("VDisks Total")._(). + th().$class("ui-state-default")._("VDisks Reserved")._(). th().$class("ui-state-default")._("Active Nodes")._(). th().$class("ui-state-default")._("Decommissioned Nodes")._(). th().$class("ui-state-default")._("Lost Nodes")._(). @@ -100,6 +103,9 @@ protected void render(Block html) { td(String.valueOf(clusterMetrics.getAllocatedVirtualCores())). td(String.valueOf(clusterMetrics.getTotalVirtualCores())). td(String.valueOf(clusterMetrics.getReservedVirtualCores())). + td(String.valueOf(clusterMetrics.getAllocatedVirtualDisks())). + td(String.valueOf(clusterMetrics.getTotalVirtualCores())). + td(String.valueOf(clusterMetrics.getReservedVirtualDisks())). td().a(url("nodes"),String.valueOf(clusterMetrics.getActiveNodes()))._(). td().a(url("nodes/decommissioned"),String.valueOf(clusterMetrics.getDecommissionedNodes()))._(). td().a(url("nodes/lost"),String.valueOf(clusterMetrics.getLostNodes()))._(). @@ -129,6 +135,9 @@ protected void render(Block html) { th().$class("ui-state-default")._("VCores Used")._(). th().$class("ui-state-default")._("VCores Pending")._(). th().$class("ui-state-default")._("VCores Reserved")._(). + th().$class("ui-state-default")._("VDisks Used")._(). + th().$class("ui-state-default")._("VDisks Pending")._(). + th().$class("ui-state-default")._("VDisks Reserved")._(). _(). _(). tbody().$class("ui-widget-content"). @@ -151,6 +160,9 @@ protected void render(Block html) { td(String.valueOf(userMetrics.getAllocatedVirtualCores())). td(String.valueOf(userMetrics.getPendingVirtualCores())). td(String.valueOf(userMetrics.getReservedVirtualCores())). + td(String.valueOf(userMetrics.getAllocatedVirtualDisks())). + td(String.valueOf(userMetrics.getPendingVirtualDisks())). + td(String.valueOf(userMetrics.getReservedVirtualDisks())). _(). _()._(); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java index db553ae..7b3a711 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java @@ -46,6 +46,10 @@ protected long availableVirtualCores; protected long allocatedVirtualCores; + protected long reservedVirtualDisks; + protected long availableVirtualDisks; + protected long allocatedVirtualDisks; + protected int containersAllocated; protected int containersReserved; protected int containersPending; @@ -82,6 +86,10 @@ public ClusterMetricsInfo(final ResourceManager rm, final RMContext rmContext) { this.availableVirtualCores = metrics.getAvailableVirtualCores(); this.allocatedVirtualCores = metrics.getAllocatedVirtualCores(); + this.reservedVirtualDisks = metrics.getReservedVirtualDisks(); + this.availableVirtualDisks = metrics.getAvailableVirtualDisks(); + this.allocatedVirtualDisks = metrics.getAllocatedVirtualDisks(); + this.containersAllocated = metrics.getAllocatedContainers(); this.containersPending = metrics.getPendingContainers(); this.containersReserved = metrics.getReservedContainers(); @@ -145,6 +153,18 @@ public long getAllocatedVirtualCores() { return this.allocatedVirtualCores; } + public long getReservedVirtualDisks() { + return this.reservedVirtualDisks; + } + + public long getAvailableVirtualDisks() { + return this.reservedVirtualDisks; + } + + public long getAllocatedVirtualDisks() { + return this.allocatedVirtualDisks; + } + public int getContainersAllocated() { return this.containersAllocated; } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java index e05cd85..c42c9ba 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java @@ -46,6 +46,9 @@ protected long reservedVirtualCores; protected long pendingVirtualCores; protected long allocatedVirtualCores; + protected long reservedVirtualDisks; + protected long pendingVirtualDisks; + protected long allocatedVirtualDisks; @XmlTransient protected boolean userMetricsAvailable; @@ -81,6 +84,10 @@ public UserMetricsInfo(final ResourceManager rm, final RMContext rmContext, this.reservedVirtualCores = userMetrics.getReservedVirtualCores(); this.pendingVirtualCores = userMetrics.getPendingVirtualCores(); this.allocatedVirtualCores = userMetrics.getAllocatedVirtualCores(); + + this.reservedVirtualDisks = userMetrics.getReservedVirtualDisks(); + this.pendingVirtualDisks = userMetrics.getPendingVirtualDisks(); + this.allocatedVirtualDisks = userMetrics.getAllocatedVirtualDisks(); } } @@ -136,6 +143,18 @@ public long getPendingVirtualCores() { return this.pendingVirtualCores; } + public long getReservedVirtualDisks() { + return this.reservedVirtualDisks; + } + + public long getAllocatedVirtualDisks() { + return this.allocatedVirtualDisks; + } + + public long getPendingVirtualDisks() { + return this.pendingVirtualDisks; + } + public int getReservedContainers() { return this.reservedContainers; } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java index 76ede39..5ec3f2a 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java @@ -328,6 +328,7 @@ public synchronized void assign(List containers) throws IOException, YarnException { int numContainers = containers.size(); + // Schedule in priority order for (Priority priority : requests.keySet()) { assign(priority, NodeType.NODE_LOCAL, containers); @@ -353,8 +354,7 @@ private synchronized void assign(Priority priority, NodeType type, for (Iterator i=containers.iterator(); i.hasNext();) { Container container = i.next(); String host = container.getNodeId().toString(); - - if (Resources.equals(requestSpec.get(priority), container.getResource())) { + if (Resources.equals(requestSpec.get(priority), container.getResource())) { // See which task can use this container for (Iterator t=tasks.get(priority).iterator(); t.hasNext();) { Task task = t.next(); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java index 5f53805..aacce49 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java @@ -50,6 +50,7 @@ private NodeId nodeId; private final int memory; private final int vCores; + private final int vDisks; private ResourceTrackerService resourceTracker; private final int httpPort = 2; private MasterKey currentContainerTokenMasterKey; @@ -65,14 +66,33 @@ public MockNM(String nodeIdStr, int memory, ResourceTrackerService resourceTrack } public MockNM(String nodeIdStr, int memory, int vcores, + ResourceTrackerService resourceTracker) { + // scale vdisks based on the requested memory + this(nodeIdStr, memory, vcores, + Math.max(1, (memory * YarnConfiguration.DEFAULT_NM_VDISKS) / + YarnConfiguration.DEFAULT_NM_PMEM_MB), + resourceTracker); + } + + public MockNM(String nodeIdStr, int memory, int vcores, int vdisks, ResourceTrackerService resourceTracker) { - this(nodeIdStr, memory, vcores, resourceTracker, YarnVersionInfo.getVersion()); + this(nodeIdStr, memory, vcores, vdisks, resourceTracker, + YarnVersionInfo.getVersion()); } public MockNM(String nodeIdStr, int memory, int vcores, ResourceTrackerService resourceTracker, String version) { + this(nodeIdStr, memory, vcores, + Math.max(1, (memory * YarnConfiguration.DEFAULT_NM_VDISKS) / + YarnConfiguration.DEFAULT_NM_PMEM_MB), + resourceTracker, version); + } + + public MockNM(String nodeIdStr, int memory, int vcores, int vDisks, + ResourceTrackerService resourceTracker, String version) { this.memory = memory; this.vCores = vcores; + this.vDisks = vDisks; this.resourceTracker = resourceTracker; this.version = version; String[] splits = nodeIdStr.split(":"); @@ -115,7 +135,7 @@ public RegisterNodeManagerResponse registerNode( RegisterNodeManagerRequest.class); req.setNodeId(nodeId); req.setHttpPort(httpPort); - Resource resource = BuilderUtils.newResource(memory, vCores); + Resource resource = BuilderUtils.newResource(memory, vCores, vDisks); req.setResource(resource); req.setContainerStatuses(containerReports); req.setNMVersion(version); @@ -198,4 +218,8 @@ public int getMemory() { public int getvCores() { return vCores; } + + public int getVDisks() { + return vDisks; + } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index d877e25..9f322ef 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -463,7 +463,7 @@ private RMNodeImpl getRunningNode() { private RMNodeImpl getRunningNode(String nmVersion) { NodeId nodeId = BuilderUtils.newNodeId("localhost", 0); - Resource capability = Resource.newInstance(4096, 4); + Resource capability = Resource.newInstance(4096, 4, 4); RMNodeImpl node = new RMNodeImpl(nodeId, rmContext,null, 0, 0, null, capability, nmVersion); node.handle(new RMNodeStartedEvent(node.getNodeID(), null, null)); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java index 6735575..6c93a05 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java @@ -90,18 +90,19 @@ public void testResourceAllocation() throws IOException, final int memory = 4 * 1024; final int vcores = 4; + final int vdisks = 4; // Register node1 String host1 = "host1"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm1 = registerNode(host1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(memory, vcores)); + Resources.createResource(memory, vcores, vdisks)); // Register node2 String host2 = "host2"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm2 = registerNode(host2, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(memory/2, vcores/2)); + Resources.createResource(memory/2, vcores/2, vcores/2)); // Submit an application Application application = new Application("user1", resourceManager); @@ -131,10 +132,10 @@ public void testResourceAllocation() throws IOException, // Send a heartbeat to kick the tires on the Scheduler nodeUpdate(nm1); - + // Get allocations from the scheduler application.schedule(); - + checkResourceUsage(nm1, nm2); LOG.info("Adding new tasks..."); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java index 28d1d63..ce0ab27 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java @@ -382,6 +382,7 @@ public void testNodeRegistrationWithMinimumAllocations() throws Exception { Configuration conf = new Configuration(); conf.set(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, "2048"); conf.set(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, "4"); + conf.set(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS, "4"); rm = new MockRM(conf); rm.start(); @@ -392,7 +393,7 @@ public void testNodeRegistrationWithMinimumAllocations() throws Exception { NodeId nodeId = BuilderUtils.newNodeId("host", 1234); req.setNodeId(nodeId); - Resource capability = BuilderUtils.newResource(1024, 1); + Resource capability = BuilderUtils.newResource(1024, 1, 1); req.setResource(capability); RegisterNodeManagerResponse response1 = resourceTrackerService.registerNodeManager(req); @@ -400,6 +401,7 @@ public void testNodeRegistrationWithMinimumAllocations() throws Exception { capability.setMemory(2048); capability.setVirtualCores(1); + capability.setVirtualDisks(2); req.setResource(capability); RegisterNodeManagerResponse response2 = resourceTrackerService.registerNodeManager(req); @@ -407,6 +409,7 @@ public void testNodeRegistrationWithMinimumAllocations() throws Exception { capability.setMemory(1024); capability.setVirtualCores(4); + capability.setVirtualDisks(4); req.setResource(capability); RegisterNodeManagerResponse response3 = resourceTrackerService.registerNodeManager(req); @@ -414,6 +417,7 @@ public void testNodeRegistrationWithMinimumAllocations() throws Exception { capability.setMemory(2048); capability.setVirtualCores(4); + capability.setVirtualCores(4); req.setResource(capability); RegisterNodeManagerResponse response4 = resourceTrackerService.registerNodeManager(req); @@ -512,7 +516,7 @@ public void testHandleContainerStatusInvalidCompletions() throws Exception { NMContainerStatus.newInstance( ContainerId.newInstance( ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1), - ContainerState.COMPLETE, Resource.newInstance(1024, 1), + ContainerState.COMPLETE, Resource.newInstance(1024, 1, 1), "Dummy Completed", 0, Priority.newInstance(10), 1234); rm.getResourceTrackerService().handleNMContainerStatus(report, null); verify(handler, never()).handle((Event) any()); @@ -523,7 +527,7 @@ public void testHandleContainerStatusInvalidCompletions() throws Exception { currentAttempt.setMasterContainer(null); report = NMContainerStatus.newInstance( ContainerId.newInstance(currentAttempt.getAppAttemptId(), 0), - ContainerState.COMPLETE, Resource.newInstance(1024, 1), + ContainerState.COMPLETE, Resource.newInstance(1024, 1, 1), "Dummy Completed", 0, Priority.newInstance(10), 1234); rm.getResourceTrackerService().handleNMContainerStatus(report, null); verify(handler, never()).handle((Event)any()); @@ -535,7 +539,7 @@ public void testHandleContainerStatusInvalidCompletions() throws Exception { report = NMContainerStatus.newInstance( ContainerId.newInstance( ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1), - ContainerState.COMPLETE, Resource.newInstance(1024, 1), + ContainerState.COMPLETE, Resource.newInstance(1024, 1, 1), "Dummy Completed", 0, Priority.newInstance(10), 1234); try { rm.getResourceTrackerService().handleNMContainerStatus(report, null); @@ -550,7 +554,7 @@ public void testHandleContainerStatusInvalidCompletions() throws Exception { currentAttempt.setMasterContainer(null); report = NMContainerStatus.newInstance( ContainerId.newInstance(currentAttempt.getAppAttemptId(), 0), - ContainerState.COMPLETE, Resource.newInstance(1024, 1), + ContainerState.COMPLETE, Resource.newInstance(1024, 1, 1), "Dummy Completed", 0, Priority.newInstance(10), 1234); try { rm.getResourceTrackerService().handleNMContainerStatus(report, null); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResourceWeights.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResourceWeights.java index f420b9e..f14372c 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResourceWeights.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResourceWeights.java @@ -30,19 +30,25 @@ public void testWeights() { rw1.getWeight(ResourceType.CPU), 0.00001f); Assert.assertEquals("Default memory weight should be 0.0f", 0.0f, rw1.getWeight(ResourceType.MEMORY), 0.00001f); + Assert.assertEquals("Default Disk I/O weight should be 0.0f", 0.0f, + rw1.getWeight(ResourceType.DISKIO), 0.00001f); ResourceWeights rw2 = new ResourceWeights(2.0f); Assert.assertEquals("The CPU weight should be 2.0f.", 2.0f, rw2.getWeight(ResourceType.CPU), 0.00001f); Assert.assertEquals("The memory weight should be 2.0f", 2.0f, rw2.getWeight(ResourceType.MEMORY), 0.00001f); + Assert.assertEquals("Default Disk I/O weight should be 2.0f", 2.0f, + rw2.getWeight(ResourceType.DISKIO), 0.00001f); // set each individually - ResourceWeights rw3 = new ResourceWeights(1.5f, 2.0f); + ResourceWeights rw3 = new ResourceWeights(1.5f, 2.0f, 2.5f); Assert.assertEquals("The CPU weight should be 2.0f", 2.0f, rw3.getWeight(ResourceType.CPU), 0.00001f); Assert.assertEquals("The memory weight should be 1.5f", 1.5f, rw3.getWeight(ResourceType.MEMORY), 0.00001f); + Assert.assertEquals("Default Disk I/O weight should be 2.5f", 2.5f, + rw3.getWeight(ResourceType.DISKIO), 0.00001f); // reset weights rw3.setWeight(ResourceType.CPU, 2.5f); @@ -51,5 +57,8 @@ public void testWeights() { rw3.setWeight(ResourceType.MEMORY, 4.0f); Assert.assertEquals("The memory weight should be set to 4.0f.", 4.0f, rw3.getWeight(ResourceType.MEMORY), 0.00001f); + rw3.setWeight(ResourceType.DISKIO, 5.0f); + Assert.assertEquals("Default Disk I/O weight should be 5.0f", 5.0f, + rw3.getWeight(ResourceType.DISKIO), 0.00001f); } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java index ae98660..9fc9ccc 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resource/TestResources.java @@ -24,20 +24,20 @@ public class TestResources { @Test(timeout=1000) public void testFitsIn() { - assertTrue(fitsIn(createResource(1, 1), createResource(2, 2))); - assertTrue(fitsIn(createResource(2, 2), createResource(2, 2))); - assertFalse(fitsIn(createResource(2, 2), createResource(1, 1))); - assertFalse(fitsIn(createResource(1, 2), createResource(2, 1))); - assertFalse(fitsIn(createResource(2, 1), createResource(1, 2))); + assertTrue(fitsIn(createResource(1, 1, 1), createResource(2, 2, 2))); + assertTrue(fitsIn(createResource(2, 2, 2), createResource(2, 2, 2))); + assertFalse(fitsIn(createResource(2, 2, 2), createResource(1, 1, 1))); + assertFalse(fitsIn(createResource(1, 2, 3), createResource(3, 2, 1))); + assertFalse(fitsIn(createResource(2, 1, 3), createResource(1, 2, 3))); } @Test(timeout=1000) public void testComponentwiseMin() { - assertEquals(createResource(1, 1), - componentwiseMin(createResource(1, 1), createResource(2, 2))); - assertEquals(createResource(1, 1), - componentwiseMin(createResource(2, 2), createResource(1, 1))); - assertEquals(createResource(1, 1), - componentwiseMin(createResource(1, 2), createResource(2, 1))); + assertEquals(createResource(1, 1, 1), + componentwiseMin(createResource(1, 1, 1), createResource(2, 2, 2))); + assertEquals(createResource(1, 1, 1), + componentwiseMin(createResource(2, 2, 2), createResource(1, 1, 1))); + assertEquals(createResource(1, 1, 1), + componentwiseMin(createResource(1, 2, 1), createResource(2, 1, 2))); } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java index c837450..c18ed8f 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java @@ -132,7 +132,7 @@ public void testNMExpiry() throws Exception { String hostname1 = "localhost1"; String hostname2 = "localhost2"; String hostname3 = "localhost3"; - Resource capability = BuilderUtils.newResource(1024, 1); + Resource capability = BuilderUtils.newResource(1024, 1, 1); RegisterNodeManagerRequest request1 = recordFactory .newRecordInstance(RegisterNodeManagerRequest.class); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java index d16d551..a381b1b 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java @@ -102,7 +102,7 @@ public void setUp() { @Test public void testReconnect() throws Exception { String hostname1 = "localhost1"; - Resource capability = BuilderUtils.newResource(1024, 1); + Resource capability = BuilderUtils.newResource(1024, 1, 1); RegisterNodeManagerRequest request1 = recordFactory .newRecordInstance(RegisterNodeManagerRequest.class); @@ -121,7 +121,7 @@ public void testReconnect() throws Exception { rmNodeEvents.clear(); resourceTrackerService.registerNodeManager(request1); - capability = BuilderUtils.newResource(1024, 2); + capability = BuilderUtils.newResource(1024, 2, 2); request1.setResource(capability); Assert.assertEquals(RMNodeEventType.RECONNECTED, rmNodeEvents.get(0).getType()); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java index 4f94695..afe55b6 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java @@ -94,7 +94,7 @@ public void tearDown() { @Test public void testRPCResponseId() throws IOException, YarnException { String node = "localhost"; - Resource capability = BuilderUtils.newResource(1024, 1); + Resource capability = BuilderUtils.newResource(1024, 1, 1); RegisterNodeManagerRequest request = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); nodeId = NodeId.newInstance(node, 1234); request.setNodeId(nodeId); @@ -137,4 +137,4 @@ public void testRPCResponseId() throws IOException, YarnException { Assert.assertEquals("Too far behind rm response id:2 nm response id:0", response.getDiagnosticsMessage()); } -} \ No newline at end of file +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index e5daf6f..a6fa95d 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -311,7 +311,7 @@ public void setUp() throws Exception { final String queue = MockApps.newQueue(); submissionContext = mock(ApplicationSubmissionContext.class); when(submissionContext.getQueue()).thenReturn(queue); - Resource resource = BuilderUtils.newResource(1536, 1); + Resource resource = BuilderUtils.newResource(1536, 1, 1); ContainerLaunchContext amContainerSpec = BuilderUtils.newContainerLaunchContext(null, null, null, null, null, null); @@ -639,7 +639,7 @@ private Container allocateApplicationAttempt() { // Mock the allocation of AM container Container container = mock(Container.class); - Resource resource = BuilderUtils.newResource(2048, 1); + Resource resource = BuilderUtils.newResource(2048, 1, 2); when(container.getId()).thenReturn( BuilderUtils.newContainerId(applicationAttempt.getAppAttemptId(), 1)); when(container.getResource()).thenReturn(resource); diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java index 553587e..2a26033 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java @@ -88,7 +88,7 @@ public void testReleaseWhileRunning() { ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1); ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class); - Resource resource = BuilderUtils.newResource(512, 1); + Resource resource = BuilderUtils.newResource(512, 1, 1); Priority priority = BuilderUtils.newPriority(5); Container container = BuilderUtils.newContainer(containerId, nodeId, @@ -183,7 +183,7 @@ public void testExpireWhileRunning() { ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1); ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class); - Resource resource = BuilderUtils.newResource(512, 1); + Resource resource = BuilderUtils.newResource(512, 1, 1); Priority priority = BuilderUtils.newPriority(5); Container container = BuilderUtils.newContainer(containerId, nodeId, diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java index 8ad71d2..913e513 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java @@ -57,7 +57,8 @@ public void setUp() { QueueMetrics.clearQueueMetrics(); } - @Test public void testDefaultSingleQueueMetrics() { + @Test + public void testDefaultSingleQueueMetrics() { String queueName = "single"; String user = "alice"; @@ -72,20 +73,23 @@ public void setUp() { metrics.submitAppAttempt(user); checkApps(queueSource, 1, 1, 0, 0, 0, 0, true); - metrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100)); - metrics.incrPendingResources(user, 5, Resources.createResource(3*GB, 3)); + metrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100, 100)); + metrics.incrPendingResources(user, 5, Resources.createResource(3*GB, 3, 3)); // Available resources is set externally, as it depends on dynamic // configurable cluster/queue resources - checkResources(queueSource, 0, 0, 0, 0, 0, 100*GB, 100, 15*GB, 15, 5, 0, 0, 0); + checkResources(queueSource, 0, 0, 0, 0, 0, 0, 100*GB, 100, 100, 15*GB, 15, + 15, 5, 0, 0, 0, 0); metrics.runAppAttempt(app.getApplicationId(), user); checkApps(queueSource, 1, 0, 1, 0, 0, 0, true); - metrics.allocateResources(user, 3, Resources.createResource(2*GB, 2), true); - checkResources(queueSource, 6*GB, 6, 3, 3, 0, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0); + metrics.allocateResources(user, 3, Resources.createResource(2*GB, 2, 2), true); + checkResources(queueSource, 6*GB, 6, 6, 3, 3, 0, 100*GB, 100, 100, 9*GB, 9, + 9, 2, 0, 0, 0, 0); - metrics.releaseResources(user, 1, Resources.createResource(2*GB, 2)); - checkResources(queueSource, 4*GB, 4, 2, 3, 1, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0); + metrics.releaseResources(user, 1, Resources.createResource(2*GB, 2, 2)); + checkResources(queueSource, 4*GB, 4, 4, 2, 3, 1, 100*GB, 100, 100, 9*GB, 9, + 9, 2, 0, 0, 0, 0); metrics.finishAppAttempt( app.getApplicationId(), app.isPending(), app.getUser()); @@ -169,25 +173,31 @@ public void testQueueAppMetricsForMultipleFailures() { checkApps(queueSource, 1, 1, 0, 0, 0, 0, true); checkApps(userSource, 1, 1, 0, 0, 0, 0, true); - metrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100)); - metrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB, 10)); - metrics.incrPendingResources(user, 5, Resources.createResource(3*GB, 3)); + metrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100, 100)); + metrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB, 10, 10)); + metrics.incrPendingResources(user, 5, Resources.createResource(3*GB, 3, 3)); // Available resources is set externally, as it depends on dynamic // configurable cluster/queue resources - checkResources(queueSource, 0, 0, 0, 0, 0, 100*GB, 100, 15*GB, 15, 5, 0, 0, 0); - checkResources(userSource, 0, 0, 0, 0, 0, 10*GB, 10, 15*GB, 15, 5, 0, 0, 0); + checkResources(queueSource, 0, 0, 0, 0, 0, 0, 100*GB, 100, 100, 15*GB, 15, + 15, 5, 0, 0, 0, 0); + checkResources(userSource, 0, 0, 0, 0, 0, 0, 10*GB, 10, 10, 15*GB, 15, 15, + 5, 0, 0, 0, 0); metrics.runAppAttempt(app.getApplicationId(), user); checkApps(queueSource, 1, 0, 1, 0, 0, 0, true); checkApps(userSource, 1, 0, 1, 0, 0, 0, true); - metrics.allocateResources(user, 3, Resources.createResource(2*GB, 2), true); - checkResources(queueSource, 6*GB, 6, 3, 3, 0, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0); - checkResources(userSource, 6*GB, 6, 3, 3, 0, 10*GB, 10, 9*GB, 9, 2, 0, 0, 0); + metrics.allocateResources(user, 3, Resources.createResource(2*GB, 2, 2), true); + checkResources(queueSource, 6*GB, 6, 6, 3, 3, 0, 100*GB, 100, 100, 9*GB, 9, + 9, 2, 0, 0, 0, 0); + checkResources(userSource, 6*GB, 6, 6, 3, 3, 0, 10*GB, 10, 10, 9*GB, 9, 9, + 2, 0, 0, 0, 0); - metrics.releaseResources(user, 1, Resources.createResource(2*GB, 2)); - checkResources(queueSource, 4*GB, 4, 2, 3, 1, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0); - checkResources(userSource, 4*GB, 4, 2, 3, 1, 10*GB, 10, 9*GB, 9, 2, 0, 0, 0); + metrics.releaseResources(user, 1, Resources.createResource(2*GB, 2, 2)); + checkResources(queueSource, 4*GB, 4, 4, 2, 3, 1, 100*GB, 100, 100, 9*GB, 9, + 9, 2, 0, 0, 0, 0); + checkResources(userSource, 4*GB, 4, 4, 2, 3, 1, 10*GB, 10, 10, 9*GB, 9, 9, + 2, 0, 0, 0, 0); metrics.finishAppAttempt( app.getApplicationId(), app.isPending(), app.getUser()); @@ -228,35 +238,47 @@ public void testQueueAppMetricsForMultipleFailures() { checkApps(userSource, 1, 1, 0, 0, 0, 0, true); checkApps(parentUserSource, 1, 1, 0, 0, 0, 0, true); - parentMetrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100)); - metrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100)); - parentMetrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB, 10)); - metrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB, 10)); - metrics.incrPendingResources(user, 5, Resources.createResource(3*GB, 3)); - checkResources(queueSource, 0, 0, 0, 0, 0, 100*GB, 100, 15*GB, 15, 5, 0, 0, 0); - checkResources(parentQueueSource, 0, 0, 0, 0, 0, 100*GB, 100, 15*GB, 15, 5, 0, 0, 0); - checkResources(userSource, 0, 0, 0, 0, 0, 10*GB, 10, 15*GB, 15, 5, 0, 0, 0); - checkResources(parentUserSource, 0, 0, 0, 0, 0, 10*GB, 10, 15*GB, 15, 5, 0, 0, 0); + parentMetrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100, 100)); + metrics.setAvailableResourcesToQueue(Resources.createResource(100*GB, 100, 100)); + parentMetrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB, 10, 10)); + metrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB, 10, 10)); + metrics.incrPendingResources(user, 5, Resources.createResource(3*GB, 3, 3)); + checkResources(queueSource, 0, 0, 0, 0, 0, 0, 100*GB, 100, 100, 15*GB, 15, + 15, 5, 0, 0, 0, 0); + checkResources(parentQueueSource, 0, 0, 0, 0, 0, 0, 100*GB, 100, 100, 15*GB, + 15, 15, 5, 0, 0, 0, 0); + checkResources(userSource, 0, 0, 0, 0, 0, 0, 10*GB, 10, 10, 15*GB, 15, 15, + 5, 0, 0, 0, 0); + checkResources(parentUserSource, 0, 0, 0, 0, 0, 0, 10*GB, 10, 10, 15*GB, 15, + 15, 5, 0, 0, 0, 0); metrics.runAppAttempt(app.getApplicationId(), user); checkApps(queueSource, 1, 0, 1, 0, 0, 0, true); checkApps(userSource, 1, 0, 1, 0, 0, 0, true); - metrics.allocateResources(user, 3, Resources.createResource(2*GB, 2), true); - metrics.reserveResource(user, Resources.createResource(3*GB, 3)); + metrics.allocateResources(user, 3, Resources.createResource(2*GB, 2, 2), true); + metrics.reserveResource(user, Resources.createResource(3*GB, 3, 3)); // Available resources is set externally, as it depends on dynamic // configurable cluster/queue resources - checkResources(queueSource, 6*GB, 6, 3, 3, 0, 100*GB, 100, 9*GB, 9, 2, 3*GB, 3, 1); - checkResources(parentQueueSource, 6*GB, 6, 3, 3, 0, 100*GB, 100, 9*GB, 9, 2, 3*GB, 3, 1); - checkResources(userSource, 6*GB, 6, 3, 3, 0, 10*GB, 10, 9*GB, 9, 2, 3*GB, 3, 1); - checkResources(parentUserSource, 6*GB, 6, 3, 3, 0, 10*GB, 10, 9*GB, 9, 2, 3*GB, 3, 1); - - metrics.releaseResources(user, 1, Resources.createResource(2*GB, 2)); - metrics.unreserveResource(user, Resources.createResource(3*GB, 3)); - checkResources(queueSource, 4*GB, 4, 2, 3, 1, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0); - checkResources(parentQueueSource, 4*GB, 4, 2, 3, 1, 100*GB, 100, 9*GB, 9, 2, 0, 0, 0); - checkResources(userSource, 4*GB, 4, 2, 3, 1, 10*GB, 10, 9*GB, 9, 2, 0, 0, 0); - checkResources(parentUserSource, 4*GB, 4, 2, 3, 1, 10*GB, 10, 9*GB, 9, 2, 0, 0, 0); + checkResources(queueSource, 6*GB, 6, 6, 3, 3, 0, 100*GB, 100, 100, 9*GB, 9, + 9, 2, 3*GB, 3, 3, 1); + checkResources(parentQueueSource, 6*GB, 6, 6, 3, 3, 0, 100*GB, 100, 100, + 9*GB, 9, 9, 2, 3*GB, 3, 3, 1); + checkResources(userSource, 6*GB, 6, 6, 3, 3, 0, 10*GB, 10, 10, 9*GB, 9, 9, + 2, 3*GB, 3, 3, 1); + checkResources(parentUserSource, 6*GB, 6, 6, 3, 3, 0, 10*GB, 10, 10, 9*GB, + 9, 9, 2, 3*GB, 3, 3, 1); + + metrics.releaseResources(user, 1, Resources.createResource(2*GB, 2, 2)); + metrics.unreserveResource(user, Resources.createResource(3*GB, 3, 3)); + checkResources(queueSource, 4*GB, 4, 4, 2, 3, 1, 100*GB, 100, 100, 9*GB, 9, + 9, 2, 0, 0, 0, 0); + checkResources(parentQueueSource, 4*GB, 4, 4, 2, 3, 1, 100*GB, 100, 100, + 9*GB, 9, 9, 2, 0, 0, 0, 0); + checkResources(userSource, 4*GB, 4, 4, 2, 3, 1, 10*GB, 10, 10, 9*GB, 9, 9, + 2, 0, 0, 0, 0); + checkResources(parentUserSource, 4*GB, 4, 4, 2, 3, 1, 10*GB, 10, 10, 9*GB, + 9, 9, 2, 0, 0, 0, 0); metrics.finishAppAttempt( app.getApplicationId(), app.isPending(), app.getUser()); @@ -347,23 +369,28 @@ public static void checkApps(MetricsSource source, int submitted, int pending, } public static void checkResources(MetricsSource source, int allocatedMB, - int allocatedCores, int allocCtnrs, long aggreAllocCtnrs, - long aggreReleasedCtnrs, int availableMB, int availableCores, int pendingMB, - int pendingCores, int pendingCtnrs, int reservedMB, int reservedCores, - int reservedCtnrs) { + int allocatedCores, int allocatedVdisks, int allocCtnrs, + long aggreAllocCtnrs, long aggreReleasedCtnrs, int availableMB, + int availableCores, int availableVdisks, int pendingMB, int pendingCores, + int pendingVdisks, int pendingCtnrs, int reservedMB, int reservedCores, + int reservedVdisks, int reservedCtnrs) { MetricsRecordBuilder rb = getMetrics(source); assertGauge("AllocatedMB", allocatedMB, rb); assertGauge("AllocatedVCores", allocatedCores, rb); + assertGauge("AllocatedVDisks", allocatedVdisks, rb); assertGauge("AllocatedContainers", allocCtnrs, rb); assertCounter("AggregateContainersAllocated", aggreAllocCtnrs, rb); assertCounter("AggregateContainersReleased", aggreReleasedCtnrs, rb); assertGauge("AvailableMB", availableMB, rb); assertGauge("AvailableVCores", availableCores, rb); + assertGauge("AvailableVDisks", availableVdisks, rb); assertGauge("PendingMB", pendingMB, rb); assertGauge("PendingVCores", pendingCores, rb); + assertGauge("PendingVDisks", pendingVdisks, rb); assertGauge("PendingContainers", pendingCtnrs, rb); assertGauge("ReservedMB", reservedMB, rb); assertGauge("ReservedVCores", reservedCores, rb); + assertGauge("ReservedVDisks", reservedVdisks, rb); assertGauge("ReservedContainers", reservedCtnrs, rb); } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java index c168b95..ed79066 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java @@ -72,7 +72,7 @@ public void testMove() { assertEquals(0x30000000001L, app.getNewContainerId()); // Resource request - Resource requestedResource = Resource.newInstance(1536, 2); + Resource requestedResource = Resource.newInstance(1536, 2, 2); Priority requestedPriority = Priority.newInstance(2); ResourceRequest request = ResourceRequest.newInstance(requestedPriority, ResourceRequest.ANY, requestedResource, 3); @@ -87,36 +87,41 @@ public void testMove() { // Reserved container Priority prio1 = Priority.newInstance(1); - Resource reservedResource = Resource.newInstance(2048, 3); + Resource reservedResource = Resource.newInstance(2048, 3, 3); RMContainer container2 = createReservedRMContainer(appAttId, 1, reservedResource, node.getNodeID(), prio1); Map reservations = new HashMap(); reservations.put(node.getNodeID(), container2); app.reservedContainers.put(prio1, reservations); oldMetrics.reserveResource(user, reservedResource); - - checkQueueMetrics(oldMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4); - checkQueueMetrics(newMetrics, 0, 0, 0, 0, 0, 0, 0, 0); - checkQueueMetrics(parentMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4); + + // reservedDisks is 4, expected 3. + checkQueueMetrics(oldMetrics, 1, 1, 1536, 2, 2, 2048, 3, 3, 3072, 4, 4); + checkQueueMetrics(newMetrics, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + checkQueueMetrics(parentMetrics, 1, 1, 1536, 2, 2, 2048, 3, 3, 3072, 4, 4); app.move(newQueue); - checkQueueMetrics(oldMetrics, 0, 0, 0, 0, 0, 0, 0, 0); - checkQueueMetrics(newMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4); - checkQueueMetrics(parentMetrics, 1, 1, 1536, 2, 2048, 3, 3072, 4); + checkQueueMetrics(oldMetrics, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + checkQueueMetrics(newMetrics, 1, 1, 1536, 2, 2, 2048, 3, 3, 3072, 4, 4); + checkQueueMetrics(parentMetrics, 1, 1, 1536, 2, 2, 2048, 3, 3, 3072, 4, 4); } private void checkQueueMetrics(QueueMetrics metrics, int activeApps, - int runningApps, int allocMb, int allocVcores, int reservedMb, - int reservedVcores, int pendingMb, int pendingVcores) { + int runningApps, int allocMb, int allocVcores, int allocVdisks, + int reservedMb, int reservedVcores, int reservedVdisks, + int pendingMb, int pendingVcores, int pendingVdisks) { assertEquals(activeApps, metrics.getActiveApps()); assertEquals(runningApps, metrics.getAppsRunning()); assertEquals(allocMb, metrics.getAllocatedMB()); assertEquals(allocVcores, metrics.getAllocatedVirtualCores()); + assertEquals(allocVdisks, metrics.getAllocatedVirtualDisks()); assertEquals(reservedMb, metrics.getReservedMB()); assertEquals(reservedVcores, metrics.getReservedVirtualCores()); + assertEquals(reservedVdisks, metrics.getReservedVirtualDisks()); assertEquals(pendingMb, metrics.getPendingMB()); assertEquals(pendingVcores, metrics.getPendingVirtualCores()); + assertEquals(pendingVdisks, metrics.getPendingVirtualDisks()); } private SchedulerNode createNode() { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index c3ae38c..a14b098 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -87,16 +87,16 @@ public class TestSchedulerUtils { private static final Log LOG = LogFactory.getLog(TestSchedulerUtils.class); - + @Test (timeout = 30000) public void testNormalizeRequest() { ResourceCalculator resourceCalculator = new DefaultResourceCalculator(); - + final int minMemory = 1024; final int maxMemory = 8192; - Resource minResource = Resources.createResource(minMemory, 0); - Resource maxResource = Resources.createResource(maxMemory, 0); - + Resource minResource = Resources.createResource(minMemory, 0, 0); + Resource maxResource = Resources.createResource(maxMemory, 0, 0); + ResourceRequest ask = new ResourceRequestPBImpl(); // case negative memory @@ -150,40 +150,42 @@ public void testNormalizeRequest() { maxResource); assertEquals(maxResource.getMemory(), ask.getCapability().getMemory()); } - + @Test (timeout = 30000) public void testNormalizeRequestWithDominantResourceCalculator() { ResourceCalculator resourceCalculator = new DominantResourceCalculator(); - - Resource minResource = Resources.createResource(1024, 1); - Resource maxResource = Resources.createResource(10240, 10); - Resource clusterResource = Resources.createResource(10 * 1024, 10); - + + Resource minResource = Resources.createResource(1024, 1, 0); + Resource maxResource = Resources.createResource(10240, 10, 10); + Resource clusterResource = Resources.createResource(10 * 1024, 10, 10); + ResourceRequest ask = new ResourceRequestPBImpl(); - // case negative memory/vcores - ask.setCapability(Resources.createResource(-1024, -1)); + // case negative memory/vcores/vdisks + ask.setCapability(Resources.createResource(-1024, -1, -1)); SchedulerUtils.normalizeRequest( ask, resourceCalculator, clusterResource, minResource, maxResource); assertEquals(minResource, ask.getCapability()); - // case zero memory/vcores - ask.setCapability(Resources.createResource(0, 0)); + // case zero memory/vcores/vdisks + ask.setCapability(Resources.createResource(0, 0, 0)); SchedulerUtils.normalizeRequest( ask, resourceCalculator, clusterResource, minResource, maxResource); assertEquals(minResource, ask.getCapability()); assertEquals(1, ask.getCapability().getVirtualCores()); assertEquals(1024, ask.getCapability().getMemory()); + assertEquals(0, ask.getCapability().getVirtualDisks()); - // case non-zero memory & zero cores - ask.setCapability(Resources.createResource(1536, 0)); + // case non-zero memory & zero cores & zero vdisks + ask.setCapability(Resources.createResource(1536, 0, 0)); SchedulerUtils.normalizeRequest( ask, resourceCalculator, clusterResource, minResource, maxResource); - assertEquals(Resources.createResource(2048, 1), ask.getCapability()); + assertEquals(Resources.createResource(2048, 1, 0), ask.getCapability()); assertEquals(1, ask.getCapability().getVirtualCores()); assertEquals(2048, ask.getCapability().getMemory()); + assertEquals(0, ask.getCapability().getVirtualDisks()); } - + @Test (timeout = 30000) public void testValidateResourceRequestWithErrorLabelsPermission() throws IOException { @@ -195,7 +197,7 @@ public void testValidateResourceRequestWithErrorLabelsPermission() when(queueInfo.getAccessibleNodeLabels()).thenReturn(queueAccessibleNodeLabels); when(scheduler.getQueueInfo(any(String.class), anyBoolean(), anyBoolean())) .thenReturn(queueInfo); - + Resource maxResource = Resources.createResource( YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); @@ -213,19 +215,19 @@ public void testValidateResourceRequestWithErrorLabelsPermission() resReq.setNodeLabelExpression("x"); SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", scheduler); - + resReq.setNodeLabelExpression("x && y"); SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", scheduler); - + resReq.setNodeLabelExpression("y"); SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", scheduler); - + resReq.setNodeLabelExpression(""); SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", scheduler); - + resReq.setNodeLabelExpression(" "); SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", scheduler); @@ -233,13 +235,13 @@ public void testValidateResourceRequestWithErrorLabelsPermission() e.printStackTrace(); fail("Should be valid when request labels is a subset of queue labels"); } - + // queue has labels, failed cases (when ask a label not included by queue) try { // set queue accessible node labesl to [x, y] queueAccessibleNodeLabels.clear(); queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y")); - + Resource resource = Resources.createResource( 0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); @@ -251,12 +253,12 @@ public void testValidateResourceRequestWithErrorLabelsPermission() fail("Should fail"); } catch (InvalidResourceRequestException e) { } - + try { // set queue accessible node labesl to [x, y] queueAccessibleNodeLabels.clear(); queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y")); - + Resource resource = Resources.createResource( 0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); @@ -268,13 +270,13 @@ public void testValidateResourceRequestWithErrorLabelsPermission() fail("Should fail"); } catch (InvalidResourceRequestException e) { } - + // queue doesn't have label, succeed (when request no label) queueAccessibleNodeLabels.clear(); try { // set queue accessible node labels to empty queueAccessibleNodeLabels.clear(); - + Resource resource = Resources.createResource( 0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); @@ -282,11 +284,11 @@ public void testValidateResourceRequestWithErrorLabelsPermission() mock(Priority.class), ResourceRequest.ANY, resource, 1); SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", scheduler); - + resReq.setNodeLabelExpression(""); SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", scheduler); - + resReq.setNodeLabelExpression(" "); SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", scheduler); @@ -294,12 +296,12 @@ public void testValidateResourceRequestWithErrorLabelsPermission() e.printStackTrace(); fail("Should be valid when request labels is empty"); } - + // queue doesn't have label, failed (when request any label) try { // set queue accessible node labels to empty queueAccessibleNodeLabels.clear(); - + Resource resource = Resources.createResource( 0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); @@ -311,13 +313,13 @@ public void testValidateResourceRequestWithErrorLabelsPermission() fail("Should fail"); } catch (InvalidResourceRequestException e) { } - + // queue is "*", always succeeded try { // set queue accessible node labels to empty queueAccessibleNodeLabels.clear(); queueAccessibleNodeLabels.add(RMNodeLabelsManager.ANY); - + Resource resource = Resources.createResource( 0, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); @@ -326,11 +328,11 @@ public void testValidateResourceRequestWithErrorLabelsPermission() resReq.setNodeLabelExpression("x"); SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", scheduler); - + resReq.setNodeLabelExpression("x && y && z"); SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", scheduler); - + resReq.setNodeLabelExpression("z"); SchedulerUtils.validateResourceRequest(resReq, maxResource, "queue", scheduler); @@ -347,13 +349,15 @@ public void testValidateResourceRequest() { Resource maxResource = Resources.createResource( YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS); // zero memory try { Resource resource = Resources.createResource(0, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); @@ -367,7 +371,8 @@ public void testValidateResourceRequest() { try { Resource resource = Resources.createResource( - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0); + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); @@ -377,12 +382,27 @@ public void testValidateResourceRequest() { fail("Zero vcores should be accepted"); } + // zero vdisks + try { + Resource resource = Resources.createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + 0); + ResourceRequest resReq = BuilderUtils.newResourceRequest( + mock(Priority.class), ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); + } catch (InvalidResourceRequestException e) { + fail("Zero vdisks should be accepted"); + } + // max memory try { Resource resource = Resources.createResource( YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); @@ -397,7 +417,8 @@ public void testValidateResourceRequest() { Resource resource = Resources.createResource( YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); @@ -407,11 +428,26 @@ public void testValidateResourceRequest() { fail("Max vcores should not be accepted"); } + // max vdisks + try { + Resource resource = Resources.createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS); + ResourceRequest resReq = BuilderUtils.newResourceRequest( + mock(Priority.class), ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); + } catch (InvalidResourceRequestException e) { + fail("Max vdisks should not be accepted"); + } + // negative memory try { Resource resource = Resources.createResource(-1, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); @@ -426,7 +462,8 @@ public void testValidateResourceRequest() { try { Resource resource = Resources.createResource( - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -1); + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -1, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); @@ -437,12 +474,28 @@ public void testValidateResourceRequest() { // expected } + // negative vdisks + try { + Resource resource = Resources.createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + -1); + ResourceRequest resReq = BuilderUtils.newResourceRequest( + mock(Priority.class), ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); + fail("Negative vdisks should not be accepted"); + } catch (InvalidResourceRequestException e) { + // expected + } + // more than max memory try { Resource resource = Resources.createResource( YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB + 1, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); @@ -459,7 +512,8 @@ public void testValidateResourceRequest() { Resources .createResource( YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES + 1); + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES + 1, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS); ResourceRequest resReq = BuilderUtils.newResourceRequest(mock(Priority.class), ResourceRequest.ANY, resource, 1); @@ -469,8 +523,23 @@ public void testValidateResourceRequest() { } catch (InvalidResourceRequestException e) { // expected } + + // more than max vcores + try { + Resource resource = Resources.createResource( + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS + 1); + ResourceRequest resReq = BuilderUtils.newResourceRequest( + mock(Priority.class), ResourceRequest.ANY, resource, 1); + SchedulerUtils.validateResourceRequest(resReq, maxResource, null, + mockScheduler); + fail("More than max vdisks should not be accepted"); + } catch (InvalidResourceRequestException e) { + // expected + } } - + @Test public void testValidateResourceBlacklistRequest() throws Exception { @@ -496,21 +565,21 @@ public void testValidateResourceBlacklistRequest() throws Exception { final Configuration conf = rm.getConfig(); final YarnRPC rpc = YarnRPC.create(conf); - UserGroupInformation currentUser = + UserGroupInformation currentUser = UserGroupInformation.createRemoteUser(applicationAttemptId.toString()); Credentials credentials = containerManager.getContainerCredentials(); final InetSocketAddress rmBindAddress = rm.getApplicationMasterService().getBindAddress(); Token amRMToken = MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress, - credentials.getAllTokens()); + credentials.getAllTokens()); currentUser.addToken(amRMToken); ApplicationMasterProtocol client = currentUser.doAs(new PrivilegedAction() { @Override public ApplicationMasterProtocol run() { return (ApplicationMasterProtocol) rpc.getProxy( - ApplicationMasterProtocol.class, rmBindAddress, conf); + ApplicationMasterProtocol.class, rmBindAddress, conf); } }); @@ -532,7 +601,7 @@ public ApplicationMasterProtocol run() { } rm.stop(); - + Assert.assertTrue( "Didn't not catch InvalidResourceBlacklistRequestException", error); } @@ -561,7 +630,7 @@ public void testComparePriorities(){ public void testCreateAbnormalContainerStatus() { ContainerStatus cd = SchedulerUtils.createAbnormalContainerStatus( ContainerId.newInstance(ApplicationAttemptId.newInstance( - ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x"); + ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x"); Assert.assertEquals(ContainerExitStatus.ABORTED, cd.getExitStatus()); } @@ -569,15 +638,15 @@ public void testCreateAbnormalContainerStatus() { public void testCreatePreemptedContainerStatus() { ContainerStatus cd = SchedulerUtils.createPreemptedContainerStatus( ContainerId.newInstance(ApplicationAttemptId.newInstance( - ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x"); + ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x"); Assert.assertEquals(ContainerExitStatus.PREEMPTED, cd.getExitStatus()); } public static SchedulerApplication - verifyAppAddedAndRemovedFromScheduler( - Map> applications, - EventHandler handler, String queueName) - throws Exception { + verifyAppAddedAndRemovedFromScheduler( + Map> applications, + EventHandler handler, String queueName) + throws Exception { ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); AppAddedSchedulerEvent appAddedEvent = @@ -595,4 +664,4 @@ public void testCreatePreemptedContainerStatus() { Assert.assertNull(applications.get(appId)); return app; } -} +} \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java index 7b6aaf3..132574e 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java @@ -102,8 +102,15 @@ protected ResourceRequest createResourceRequest( protected ResourceRequest createResourceRequest( int memory, int vcores, String host, int priority, int numContainers, boolean relaxLocality) { + return createResourceRequest(memory, vcores, 0, host, priority, + numContainers, relaxLocality); + } + + protected ResourceRequest createResourceRequest( + int memory, int vcores, int vdisks, String host, int priority, + int numContainers, boolean relaxLocality) { ResourceRequest request = recordFactory.newRecordInstance(ResourceRequest.class); - request.setCapability(BuilderUtils.newResource(memory, vcores)); + request.setCapability(BuilderUtils.newResource(memory, vcores, vdisks)); request.setResourceName(host); request.setNumContainers(numContainers); Priority prio = recordFactory.newRecordInstance(Priority.class); @@ -128,6 +135,11 @@ protected ApplicationAttemptId createSchedulingRequest( } protected ApplicationAttemptId createSchedulingRequest( + int memory, int vcores, int vdisks, String queueId, String userId) { + return createSchedulingRequest(memory, vcores, vdisks, queueId, userId, 1); + } + + protected ApplicationAttemptId createSchedulingRequest( int memory, String queueId, String userId, int numContainers) { return createSchedulingRequest(memory, queueId, userId, numContainers, 1); } @@ -138,6 +150,13 @@ protected ApplicationAttemptId createSchedulingRequest( } protected ApplicationAttemptId createSchedulingRequest( + int memory, int vcores, int vdisks, String queueId, String userId, + int numContainers) { + return createSchedulingRequest(memory, vcores, vdisks, queueId, userId, + numContainers, 1); + } + + protected ApplicationAttemptId createSchedulingRequest( int memory, String queueId, String userId, int numContainers, int priority) { return createSchedulingRequest(memory, 1, queueId, userId, numContainers, priority); @@ -146,6 +165,13 @@ protected ApplicationAttemptId createSchedulingRequest( protected ApplicationAttemptId createSchedulingRequest( int memory, int vcores, String queueId, String userId, int numContainers, int priority) { + return createSchedulingRequest(memory, vcores, 0, queueId, userId, + numContainers, priority); + } + + protected ApplicationAttemptId createSchedulingRequest( + int memory, int vcores, int vdisks, String queueId, String userId, + int numContainers, int priority) { ApplicationAttemptId id = createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++); scheduler.addApplication(id.getApplicationId(), queueId, userId, false); // This conditional is for testAclSubmitApplication where app is rejected @@ -154,8 +180,8 @@ protected ApplicationAttemptId createSchedulingRequest( scheduler.addApplicationAttempt(id, false, false); } List ask = new ArrayList(); - ResourceRequest request = createResourceRequest(memory, vcores, ResourceRequest.ANY, - priority, numContainers, true); + ResourceRequest request = createResourceRequest(memory, vcores, vdisks, + ResourceRequest.ANY, priority, numContainers, true); ask.add(request); scheduler.allocate(id, ask, new ArrayList(), null, null); RMApp rmApp = mock(RMApp.class); @@ -204,6 +230,13 @@ protected void createSchedulingRequestExistingApplication( } protected void createSchedulingRequestExistingApplication( + int memory, int vcores, int vdisks, int priority, ApplicationAttemptId attId) { + ResourceRequest request = createResourceRequest(memory, vcores, vdisks, + ResourceRequest.ANY, priority, 1, true); + createSchedulingRequestExistingApplication(request, attId); + } + + protected void createSchedulingRequestExistingApplication( ResourceRequest request, ApplicationAttemptId attId) { List ask = new ArrayList(); ask.add(request); @@ -225,4 +258,4 @@ protected void createApplicationWithAMResource(ApplicationAttemptId attId, new AppAttemptAddedSchedulerEvent(attId, false); scheduler.handle(attempAddedEvent); } -} \ No newline at end of file +} diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java index 5a170cf..5725389 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FakeSchedulable.java @@ -59,14 +59,14 @@ public FakeSchedulable(int minShare, int maxShare, double memoryWeight) { public FakeSchedulable(int minShare, int maxShare, double weight, int fairShare, int usage, long startTime) { - this(Resources.createResource(minShare, 0), Resources.createResource(maxShare, 0), - new ResourceWeights((float)weight), Resources.createResource(fairShare, 0), - Resources.createResource(usage, 0), startTime); + this(Resources.createResource(minShare, 0, 0), Resources.createResource(maxShare, 0, 0), + new ResourceWeights((float)weight), Resources.createResource(fairShare, 0, 0), + Resources.createResource(usage, 0, 0), startTime); } public FakeSchedulable(Resource minShare, ResourceWeights weights) { - this(minShare, Resources.createResource(Integer.MAX_VALUE, Integer.MAX_VALUE), - weights, Resources.createResource(0, 0), Resources.createResource(0, 0), 0); + this(minShare, Resources.createResource(Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE), + weights, Resources.createResource(0, 0, 0), Resources.createResource(0, 0, 0), 0); } public FakeSchedulable(Resource minShare, Resource maxShare, diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java index 656e20d..3820e1e 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java @@ -23,10 +23,12 @@ import java.io.FileWriter; import java.io.PrintWriter; import java.util.List; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueuePlacementRule.NestedUserQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FairSharePolicy; @@ -162,11 +164,11 @@ public void testAllocationFileParsing() throws Exception { out.println(""); // Give queue A a minimum of 1024 M out.println(""); - out.println("1024mb,0vcores"); + out.println("1024mb,0vcores,0vdisks"); out.println(""); // Give queue B a minimum of 2048 M out.println(""); - out.println("2048mb,0vcores"); + out.println("2048mb,0vcores,0vdisks"); out.println("alice,bob admins"); out.println("fair"); out.println(""); @@ -232,9 +234,9 @@ public void testAllocationFileParsing() throws Exception { assertEquals(Resources.createResource(0), queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); - assertEquals(Resources.createResource(1024, 0), + assertEquals(Resources.createResource(1024, 0, 0), queueConf.getMinResources("root.queueA")); - assertEquals(Resources.createResource(2048, 0), + assertEquals(Resources.createResource(2048, 0, 0), queueConf.getMinResources("root.queueB")); assertEquals(Resources.createResource(0), queueConf.getMinResources("root.queueC")); @@ -355,11 +357,11 @@ public void testBackwardsCompatibleAllocationFileParsing() throws Exception { out.println(""); // Give queue A a minimum of 1024 M out.println(""); - out.println("1024mb,0vcores"); + out.println("1024mb,0vcores,0vdisks"); out.println(""); // Give queue B a minimum of 2048 M out.println(""); - out.println("2048mb,0vcores"); + out.println("2048mb,0vcores,0vdisks"); out.println("alice,bob admins"); out.println(""); // Give queue C no minimum @@ -405,9 +407,9 @@ public void testBackwardsCompatibleAllocationFileParsing() throws Exception { assertEquals(Resources.createResource(0), queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); - assertEquals(Resources.createResource(1024, 0), + assertEquals(Resources.createResource(1024, 0, 0), queueConf.getMinResources("root.queueA")); - assertEquals(Resources.createResource(2048, 0), + assertEquals(Resources.createResource(2048, 0, 0), queueConf.getMinResources("root.queueB")); assertEquals(Resources.createResource(0), queueConf.getMinResources("root.queueC")); @@ -525,6 +527,144 @@ public void testQueueAlongsideRoot() throws Exception { allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); } + + @Test + public void testDRFResourceTypeConfiguration() throws Exception { + Configuration conf = new Configuration(); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + // Queue A: cpu + out.println(""); + out.println("CPU"); + out.println("drf"); + out.println(""); + // Queue B: memory + out.println(""); + out.println("MEMORY"); + out.println(""); + // Queue C: diskIO + out.println(""); + out.println("DISKIO"); + out.println(""); + // Queue D: cpu & memory + out.println(""); + out.println("CPU,MEMORY"); + out.println(""); + // Queue E: cpu & diskIO + out.println(""); + out.println("CPU,DISKIO"); + out.println(""); + // Queue F: memory & diskIO + out.println(""); + out.println("MEMORY,DISKIO"); + out.println(""); + // Queue G: cpu & memory & diskIO + out.println(""); + out.println("CPU,MEMORY,DISKIO"); + out.println(""); + // Queue H: default + out.println(""); + out.println(""); + // Set default scheduling policy to DRF + out.println("drf"); + //Default: only enable cpu & memory + out.println("CPU,MEMORY"); + out.println(""); + out.close(); + + allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); + allocLoader.reloadAllocations(); + AllocationConfiguration queueConf = confHolder.allocConf; + + // Queue A: cpu + Set enabledResources = ((DominantResourceFairnessPolicy) + queueConf.getSchedulingPolicy("root.queueA")).getEnabledResourceTypes(); + assertTrue(checkDRFResourceTypes(enabledResources, + new ResourceType[]{ResourceType.CPU})); + // Queue B: memory + enabledResources = ((DominantResourceFairnessPolicy) + queueConf.getSchedulingPolicy("root.queueB")).getEnabledResourceTypes(); + assertTrue(checkDRFResourceTypes(enabledResources, + new ResourceType[]{ResourceType.MEMORY})); + // Queue C: diskIO + enabledResources = ((DominantResourceFairnessPolicy) + queueConf.getSchedulingPolicy("root.queueC")).getEnabledResourceTypes(); + assertTrue(checkDRFResourceTypes(enabledResources, + new ResourceType[]{ResourceType.DISKIO})); + // Queue D: cpu & memory + enabledResources = ((DominantResourceFairnessPolicy) + queueConf.getSchedulingPolicy("root.queueD")).getEnabledResourceTypes(); + assertTrue(checkDRFResourceTypes(enabledResources, + new ResourceType[]{ResourceType.CPU, ResourceType.MEMORY})); + // Queue E: cpu & diskIO + enabledResources = ((DominantResourceFairnessPolicy) + queueConf.getSchedulingPolicy("root.queueE")).getEnabledResourceTypes(); + assertTrue(checkDRFResourceTypes(enabledResources, + new ResourceType[]{ResourceType.CPU, ResourceType.DISKIO})); + // Queue F: memory & diskIO + enabledResources = ((DominantResourceFairnessPolicy) + queueConf.getSchedulingPolicy("root.queueF")).getEnabledResourceTypes(); + assertTrue(checkDRFResourceTypes(enabledResources, + new ResourceType[]{ResourceType.MEMORY, ResourceType.DISKIO})); + // Queue G: cpu & memory & diskIO + enabledResources = ((DominantResourceFairnessPolicy) + queueConf.getSchedulingPolicy("root.queueG")).getEnabledResourceTypes(); + assertTrue(checkDRFResourceTypes(enabledResources, + new ResourceType[]{ResourceType.CPU, ResourceType.MEMORY, + ResourceType.DISKIO})); + // Queue H: default + enabledResources = ((DominantResourceFairnessPolicy) + queueConf.getSchedulingPolicy("root.queueH")).getEnabledResourceTypes(); + assertTrue(checkDRFResourceTypes(enabledResources, + new ResourceType[]{ResourceType.CPU, ResourceType.MEMORY})); + } + + @Test + public void testDRFResourceTypeDefaultConfiguration() throws Exception { + Configuration conf = new Configuration(); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + AllocationFileLoaderService allocLoader = new AllocationFileLoaderService(); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + // Queue A: default + out.println(""); + out.println(""); + // Set default scheduling policy to DRF + out.println("drf"); + out.println(""); + out.close(); + + allocLoader.init(conf); + ReloadListener confHolder = new ReloadListener(); + allocLoader.setReloadListener(confHolder); + allocLoader.reloadAllocations(); + AllocationConfiguration queueConf = confHolder.allocConf; + + // Queue A: cpu + Set enabledResources = ((DominantResourceFairnessPolicy) + queueConf.getSchedulingPolicy("root.queueA")).getEnabledResourceTypes(); + assertTrue(checkDRFResourceTypes(enabledResources, + new ResourceType[]{ResourceType.CPU, ResourceType.MEMORY})); + } + + private boolean checkDRFResourceTypes(Set configured, + ResourceType... types) { + if (types.length != configured.size()) { + return false; + } + for (ResourceType type : types) { + configured.remove(type); + } + return configured.isEmpty(); + } private class ReloadListener implements AllocationFileLoaderService.Listener { public AllocationConfiguration allocConf; diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java index 9d8dd07..21bf4d5 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java @@ -177,19 +177,37 @@ public void testEmptyList() { */ @Test public void testCPU() { - scheds.add(new FakeSchedulable(Resources.createResource(0, 20), + scheds.add(new FakeSchedulable(Resources.createResource(0, 20, 0), new ResourceWeights(2.0f))); - scheds.add(new FakeSchedulable(Resources.createResource(0, 0), + scheds.add(new FakeSchedulable(Resources.createResource(0, 0, 0), new ResourceWeights(1.0f))); - scheds.add(new FakeSchedulable(Resources.createResource(0, 5), + scheds.add(new FakeSchedulable(Resources.createResource(0, 5, 0), new ResourceWeights(1.0f))); - scheds.add(new FakeSchedulable(Resources.createResource(0, 15), + scheds.add(new FakeSchedulable(Resources.createResource(0, 15, 0), new ResourceWeights(0.5f))); ComputeFairShares.computeShares(scheds, - Resources.createResource(0, 45), ResourceType.CPU); + Resources.createResource(0, 45, 0), ResourceType.CPU); verifyCPUShares(20, 5, 5, 15); } - + + /** + * Test that VDisks works as well as memory + */ + @Test + public void testVDisks() { + scheds.add(new FakeSchedulable(Resources.createResource(0, 0, 20), + new ResourceWeights(2.0f))); + scheds.add(new FakeSchedulable(Resources.createResource(0, 0, 0), + new ResourceWeights(1.0f))); + scheds.add(new FakeSchedulable(Resources.createResource(0, 0, 5), + new ResourceWeights(1.0f))); + scheds.add(new FakeSchedulable(Resources.createResource(0, 0, 15), + new ResourceWeights(0.5f))); + ComputeFairShares.computeShares(scheds, + Resources.createResource(0, 0, 45), ResourceType.DISKIO); + verifyVDisksShares(20, 5, 5, 15); + } + /** * Check that a given list of shares have been assigned to this.scheds. */ @@ -209,4 +227,14 @@ private void verifyCPUShares(int... shares) { Assert.assertEquals(shares[i], scheds.get(i).getFairShare().getVirtualCores()); } } + + /** + * Check that a given list of shares have been assigned to this.scheds. + */ + private void verifyVDisksShares(int... shares) { + Assert.assertEquals(scheds.size(), shares.length); + for (int i = 0; i < shares.length; i ++) { + Assert.assertEquals(shares[i], scheds.get(i).getFairShare().getVirtualDisks()); + } + } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 843555f..77e1672 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -153,12 +153,12 @@ public void testConfValidation() throws Exception { try { scheduler.serviceInit(conf); fail("Exception is expected because the min memory allocation is" + - " larger than the max memory allocation."); + " larger than the max memory allocation."); } catch (YarnRuntimeException e) { // Exception is expected. assertTrue("The thrown exception is not the expected one.", - e.getMessage().startsWith( - "Invalid resource scheduler memory")); + e.getMessage().startsWith( + "Invalid resource scheduler memory")); } conf = new YarnConfiguration(); @@ -167,12 +167,25 @@ public void testConfValidation() throws Exception { try { scheduler.serviceInit(conf); fail("Exception is expected because the min vcores allocation is" + - " larger than the max vcores allocation."); + " larger than the max vcores allocation."); } catch (YarnRuntimeException e) { // Exception is expected. assertTrue("The thrown exception is not the expected one.", - e.getMessage().startsWith( - "Invalid resource scheduler vcores")); + e.getMessage().startsWith( + "Invalid resource scheduler vcores")); + } + + conf = new YarnConfiguration(); + conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS, 2); + conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS, 1); + try { + scheduler.serviceInit(conf); + fail("Exception is expected because the min vdisks allocation is" + + " larger than the max vdisks allocation."); + } catch (YarnRuntimeException e) { + // Exception is expected. + assertTrue("The thrown exception is not the expected one.", + e.getMessage().startsWith("Invalid resource scheduler vdisks")); } } @@ -186,17 +199,20 @@ public void testLoadConfigurationOnInitialize() throws IOException { conf.setDouble(FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE, .5); conf.setDouble(FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK, .7); conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, - true); + true); conf.setInt(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS, - 10); + 10); conf.setInt(FairSchedulerConfiguration.LOCALITY_DELAY_RACK_MS, - 5000); + 5000); conf.setInt(FairSchedulerConfiguration.LOCALITY_DELAY_NODE_MS, - 5000); + 5000); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 1024); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512); - conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB, - 128); + conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VDISKS, 5); + conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS, 2); + conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB, + 128); + conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VDISKS, 1); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); @@ -206,52 +222,66 @@ public void testLoadConfigurationOnInitialize() throws IOException { Assert.assertEquals(.5, scheduler.nodeLocalityThreshold, .01); Assert.assertEquals(.7, scheduler.rackLocalityThreshold, .01); Assert.assertTrue("The continuous scheduling should be enabled", - scheduler.continuousSchedulingEnabled); + scheduler.continuousSchedulingEnabled); Assert.assertEquals(10, scheduler.continuousSchedulingSleepMs); Assert.assertEquals(5000, scheduler.nodeLocalityDelayMs); Assert.assertEquals(5000, scheduler.rackLocalityDelayMs); Assert.assertEquals(1024, scheduler.getMaximumResourceCapability().getMemory()); Assert.assertEquals(512, scheduler.getMinimumResourceCapability().getMemory()); - Assert.assertEquals(128, - scheduler.getIncrementResourceCapability().getMemory()); + Assert.assertEquals(5, scheduler.getMaximumResourceCapability().getVirtualDisks()); + Assert.assertEquals(2, scheduler.getMinimumResourceCapability().getVirtualDisks()); + Assert.assertEquals(128, + scheduler.getIncrementResourceCapability().getMemory()); + Assert.assertEquals(1, + scheduler.getIncrementResourceCapability().getVirtualDisks()); } - - @Test + + @Test public void testNonMinZeroResourcesSettings() throws IOException { scheduler = new FairScheduler(); YarnConfiguration conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 256); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 1); + conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS, 1); + conf.setInt( + FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB, 512); conf.setInt( - FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB, 512); + FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2); conf.setInt( - FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2); + FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VDISKS, 1); scheduler.init(conf); scheduler.reinitialize(conf, null); Assert.assertEquals(256, scheduler.getMinimumResourceCapability().getMemory()); Assert.assertEquals(1, scheduler.getMinimumResourceCapability().getVirtualCores()); + Assert.assertEquals(1, scheduler.getMinimumResourceCapability().getVirtualDisks()); Assert.assertEquals(512, scheduler.getIncrementResourceCapability().getMemory()); Assert.assertEquals(2, scheduler.getIncrementResourceCapability().getVirtualCores()); - } - - @Test - public void testMinZeroResourcesSettings() throws IOException { - scheduler = new FairScheduler(); + Assert.assertEquals(1, scheduler.getIncrementResourceCapability().getVirtualDisks()); + } + + @Test + public void testMinZeroResourcesSettings() throws IOException { + FairScheduler fs = new FairScheduler(); YarnConfiguration conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 0); + conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VDISKS, 0); conf.setInt( - FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB, 512); + FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB, 512); conf.setInt( - FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2); - scheduler.init(conf); - scheduler.reinitialize(conf, null); - Assert.assertEquals(0, scheduler.getMinimumResourceCapability().getMemory()); - Assert.assertEquals(0, scheduler.getMinimumResourceCapability().getVirtualCores()); - Assert.assertEquals(512, scheduler.getIncrementResourceCapability().getMemory()); - Assert.assertEquals(2, scheduler.getIncrementResourceCapability().getVirtualCores()); - } - + FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2); + conf.setInt( + FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VDISKS, 1); + fs.init(conf); + fs.reinitialize(conf, null); + Assert.assertEquals(0, fs.getMinimumResourceCapability().getMemory()); + Assert.assertEquals(0, fs.getMinimumResourceCapability().getVirtualCores()); + Assert.assertEquals(0, fs.getMinimumResourceCapability().getVirtualDisks()); + Assert.assertEquals(512, fs.getIncrementResourceCapability().getMemory()); + Assert.assertEquals(2, fs.getIncrementResourceCapability().getVirtualCores()); + Assert.assertEquals(1, fs.getIncrementResourceCapability().getVirtualDisks()); + } + @Test public void testAggregateCapacityTracking() throws Exception { scheduler.init(conf); @@ -304,7 +334,7 @@ public void testSimpleFairShareCalculation() throws IOException { Collection queues = scheduler.getQueueManager().getLeafQueues(); assertEquals(3, queues.size()); - + // Divided three ways - between the two queues and the default queue for (FSLeafQueue p : queues) { assertEquals(3414, p.getFairShare().getMemory()); @@ -532,7 +562,7 @@ public void testSimpleHierarchicalFairShareCalculation() throws IOException { QueueManager queueManager = scheduler.getQueueManager(); Collection queues = queueManager.getLeafQueues(); assertEquals(3, queues.size()); - + FSLeafQueue queue1 = queueManager.getLeafQueue("default", true); FSLeafQueue queue2 = queueManager.getLeafQueue("parent.queue2", true); FSLeafQueue queue3 = queueManager.getLeafQueue("parent.queue3", true); @@ -565,11 +595,11 @@ public void testHierarchicalQueuesSimilarParents() throws IOException { FSLeafQueue leafQueue2 = queueManager.getLeafQueue("parent", true); Assert.assertNull(leafQueue2); Assert.assertEquals(2, queueManager.getLeafQueues().size()); - + FSLeafQueue leafQueue3 = queueManager.getLeafQueue("parent.child.grandchild", true); Assert.assertNull(leafQueue3); Assert.assertEquals(2, queueManager.getLeafQueues().size()); - + FSLeafQueue leafQueue4 = queueManager.getLeafQueue("parent.sister", true); Assert.assertNotNull(leafQueue4); Assert.assertEquals("root.parent.sister", leafQueue4.getName()); @@ -600,7 +630,7 @@ public void testSchedulerRootQueueMetrics() throws Exception { // Make sure reserved memory gets updated correctly assertEquals(1024, scheduler.rootMetrics.getReservedMB()); - + // Now another node checks in with capacity RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); @@ -626,17 +656,17 @@ public void testSimpleContainerAllocation() throws IOException { // Add a node RMNode node1 = MockNodes - .newNodeInfo(1, Resources.createResource(1024, 4), 1, "127.0.0.1"); + .newNodeInfo(1, Resources.createResource(1024, 4, 10), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); // Add another node RMNode node2 = - MockNodes.newNodeInfo(1, Resources.createResource(512, 2), 2, "127.0.0.2"); + MockNodes.newNodeInfo(1, Resources.createResource(512, 2, 5), 2, "127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); - createSchedulingRequest(512, 2, "queue1", "user1", 2); + createSchedulingRequest(512, 2, 5, "queue1", "user1", 2); scheduler.update(); @@ -646,25 +676,30 @@ public void testSimpleContainerAllocation() throws IOException { // Asked for less than increment allocation. assertEquals(FairSchedulerConfiguration.DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemory()); NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2); scheduler.handle(updateEvent2); assertEquals(1024, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemory()); assertEquals(2, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getVirtualCores()); + getResourceUsage().getVirtualCores()); + assertEquals(5, scheduler.getQueueManager().getQueue("queue1"). + getResourceUsage().getVirtualDisks()); // verify metrics QueueMetrics queue1Metrics = scheduler.getQueueManager().getQueue("queue1") .getMetrics(); assertEquals(1024, queue1Metrics.getAllocatedMB()); assertEquals(2, queue1Metrics.getAllocatedVirtualCores()); + assertEquals(5, queue1Metrics.getAllocatedVirtualDisks()); assertEquals(1024, scheduler.getRootQueueMetrics().getAllocatedMB()); assertEquals(2, scheduler.getRootQueueMetrics().getAllocatedVirtualCores()); + assertEquals(5, scheduler.getRootQueueMetrics().getAllocatedVirtualDisks()); assertEquals(512, scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(4, scheduler.getRootQueueMetrics().getAvailableVirtualCores()); + assertEquals(10, scheduler.getRootQueueMetrics().getAvailableVirtualDisks()); } @Test (timeout = 5000) @@ -684,7 +719,7 @@ public void testSimpleContainerReservation() throws Exception { createSchedulingRequest(1024, "queue1", "user1", 1); scheduler.update(); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1); - + scheduler.handle(updateEvent); // Make sure queue 1 is allocated app capacity @@ -698,7 +733,7 @@ public void testSimpleContainerReservation() throws Exception { // Make sure queue 2 is waiting with a reservation assertEquals(0, scheduler.getQueueManager().getQueue("queue2"). - getResourceUsage().getMemory()); + getResourceUsage().getMemory()); assertEquals(1024, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemory()); // Now another node checks in with capacity @@ -737,7 +772,7 @@ public void testUserAsDefaultQueue() throws Exception { assertEquals("root.user1", resourceManager.getRMContext().getRMApps() .get(appAttemptId.getApplicationId()).getQueue()); } - + @Test public void testNotUserAsDefaultQueue() throws Exception { conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "false"); @@ -784,10 +819,10 @@ public void testAssignToQueue() throws Exception { RMApp rmApp1 = new MockRMApp(0, 0, RMAppState.NEW); RMApp rmApp2 = new MockRMApp(1, 1, RMAppState.NEW); - + FSLeafQueue queue1 = scheduler.assignToQueue(rmApp1, "default", "asterix"); FSLeafQueue queue2 = scheduler.assignToQueue(rmApp2, "notdefault", "obelix"); - + // assert FSLeafQueue's name is the correct name is the one set in the RMApp assertEquals(rmApp1.getQueue(), queue1.getName()); assertEquals("root.asterix", rmApp1.getQueue()); @@ -812,7 +847,7 @@ public void testAssignToNonLeafQueueReturnsNull() throws Exception { assertNull(scheduler.assignToQueue(rmApp1, "root.child1", "tintin")); assertNotNull(scheduler.assignToQueue(rmApp2, "root.child2", "snowy")); } - + @Test public void testQueuePlacementWithPolicy() throws Exception { conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, @@ -848,7 +883,7 @@ public void testQueuePlacementWithPolicy() throws Exception { assertEquals("root.user5subgroup2", scheduler.getSchedulerApp(appId).getQueueName()); appId = createSchedulingRequest(1024, "default", "otheruser"); assertEquals("root.default", scheduler.getSchedulerApp(appId).getQueueName()); - + // test without specified as first rule rules = new ArrayList(); rules.add(new QueuePlacementRule.User().initialize(false, null)); @@ -872,10 +907,10 @@ public void testFairShareWithMinAlloc() throws Exception { out.println(""); out.println(""); out.println(""); - out.println("1024mb,0vcores"); + out.println("1024mb,0vcores,0vdisks"); out.println(""); out.println(""); - out.println("2048mb,0vcores"); + out.println("2048mb,0vcores,0vdisks"); out.println(""); out.println(""); out.close(); @@ -908,7 +943,7 @@ else if (p.getName().equals("root.queueB")) { } } } - + @Test public void testNestedUserQueue() throws IOException { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); @@ -918,7 +953,7 @@ public void testNestedUserQueue() throws IOException { out.println(""); out.println(""); out.println(""); - out.println("1024mb,0vcores"); + out.println("1024mb,0vcores,0vdisks"); out.println(""); out.println(""); out.println(""); @@ -950,7 +985,7 @@ public void testFairShareAndWeightsInNestedUserQueueRule() throws Exception { out.println(""); out.println(""); out.println(""); - out.println("1024mb,0vcores"); + out.println("1024mb,0vcores,0vdisks"); out.println(""); out.println(""); out.println(""); @@ -1135,9 +1170,9 @@ public void testQueueDemandCalculation() throws Exception { scheduler.addApplication(id22.getApplicationId(), "root.queue2", "user1", false); scheduler.addApplicationAttempt(id22, false, false); - int minReqSize = + int minReqSize = FairSchedulerConfiguration.DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB; - + // First ask, queue1 requests 1 large (minReqSize * 2). List ask1 = new ArrayList(); ResourceRequest request1 = @@ -1178,7 +1213,7 @@ public void testAppAdditionAndRemoval() throws Exception { scheduler.reinitialize(conf, resourceManager.getRMContext()); ApplicationAttemptId attemptId =createAppAttemptId(1, 1); AppAddedSchedulerEvent appAddedEvent = new AppAddedSchedulerEvent(attemptId.getApplicationId(), "default", - "user1"); + "user1"); scheduler.handle(appAddedEvent); AppAttemptAddedSchedulerEvent attemptAddedEvent = new AppAttemptAddedSchedulerEvent(createAppAttemptId(1, 1), false); @@ -1211,15 +1246,15 @@ public void testHierarchicalQueueAllocationFileParsing() throws IOException, SAX out.println(""); out.println(""); out.println(""); - out.println("2048mb,0vcores"); + out.println("2048mb,0vcores,0vdisks"); out.println(""); out.println(""); - out.println("2048mb,0vcores"); + out.println("2048mb,0vcores,0vdisks"); out.println(""); - out.println("2048mb,0vcores"); + out.println("2048mb,0vcores,0vdisks"); out.println(""); out.println(""); - out.println("2048mb,0vcores"); + out.println("2048mb,0vcores,0vdisks"); out.println(""); out.println(""); out.println(""); @@ -1239,7 +1274,7 @@ public void testHierarchicalQueueAllocationFileParsing() throws IOException, SAX // Make sure querying for queues didn't create any new ones: Assert.assertEquals(4, leafQueues.size()); } - + @Test public void testConfigureRootQueue() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); @@ -1251,10 +1286,10 @@ public void testConfigureRootQueue() throws Exception { out.println(""); out.println(" drf"); out.println(" "); - out.println(" 1024mb,1vcores"); + out.println(" 1024mb,1vcores,1vdisks"); out.println(" "); out.println(" "); - out.println(" 1024mb,4vcores"); + out.println(" 1024mb,4vcores,4vdisks"); out.println(" "); out.println(" 100"); out.println(" 120"); @@ -1270,10 +1305,10 @@ public void testConfigureRootQueue() throws Exception { scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); QueueManager queueManager = scheduler.getQueueManager(); - + FSQueue root = queueManager.getRootQueue(); assertTrue(root.getPolicy() instanceof DominantResourceFairnessPolicy); - + assertNotNull(queueManager.getLeafQueue("child1", false)); assertNotNull(queueManager.getLeafQueue("child2", false)); @@ -1291,10 +1326,10 @@ public void testChoiceOfPreemptedContainers() throws Exception { conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, 10000); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE + ".allocation.file", ALLOC_FILE); conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE, "false"); - + MockClock clock = new MockClock(); scheduler.setClock(clock); - + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); @@ -1366,10 +1401,10 @@ public void testChoiceOfPreemptedContainers() throws Exception { assertEquals(2, scheduler.getSchedulerApp(app4).getLiveContainers().size()); // Now new requests arrive from queueC and default - createSchedulingRequest(1 * 1024, 1, "queueC", "user1", 1, 1); - createSchedulingRequest(1 * 1024, 1, "queueC", "user1", 1, 1); - createSchedulingRequest(1 * 1024, 1, "default", "user1", 1, 1); - createSchedulingRequest(1 * 1024, 1, "default", "user1", 1, 1); + createSchedulingRequest(1 * 1024, 1, 1, "queueC", "user1", 1, 1); + createSchedulingRequest(1 * 1024, 1, 1, "queueC", "user1", 1, 1); + createSchedulingRequest(1 * 1024, 1, 1, "default", "user1", 1, 1); + createSchedulingRequest(1 * 1024, 1, 1, "default", "user1", 1, 1); scheduler.update(); // We should be able to claw back one container from queueA and queueB each. @@ -1532,23 +1567,23 @@ public void testPreemptionDecision() throws Exception { out.println(""); out.println(""); out.println(""); - out.println("0mb,0vcores"); + out.println("0mb,0vcores,0vdisks"); out.println(""); out.println(""); out.println(".25"); - out.println("1024mb,0vcores"); + out.println("1024mb,0vcores,0vdisks"); out.println(""); out.println(""); out.println(".25"); - out.println("1024mb,0vcores"); + out.println("1024mb,0vcores,0vdisks"); out.println(""); out.println(""); out.println(".25"); - out.println("1024mb,0vcores"); + out.println("1024mb,0vcores,0vdisks"); out.println(""); out.println(""); out.println(".25"); - out.println("1024mb,0vcores"); + out.println("1024mb,0vcores,0vdisks"); out.println(""); out.println("5"); out.println("10"); @@ -1562,19 +1597,19 @@ public void testPreemptionDecision() throws Exception { // Create four nodes RMNode node1 = - MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024, 2), 1, + MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024, 2, 4), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2 = - MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024, 2), 2, + MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024, 2, 4), 2, "127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); RMNode node3 = - MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024, 2), 3, + MockNodes.newNodeInfo(1, Resources.createResource(2 * 1024, 2, 4), 3, "127.0.0.3"); NodeAddedSchedulerEvent nodeEvent3 = new NodeAddedSchedulerEvent(node3); scheduler.handle(nodeEvent3); @@ -1776,7 +1811,7 @@ public void testPreemptionDecisionWithVariousTimeout() throws Exception { scheduler.update(); clock.tick(6); assertEquals( - 1024, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory()); + 1024, scheduler.resToPreempt(queueB1, clock.getTime()).getMemory()); assertEquals( 0, scheduler.resToPreempt(queueB2, clock.getTime()).getMemory()); assertEquals( @@ -1970,10 +2005,10 @@ public void testMultipleContainersWaitingForReservation() throws IOException { ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue2", "user2", 1); ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue3", "user3", 1); - + scheduler.update(); scheduler.handle(updateEvent); - + // One container should get reservation and the other should get nothing assertEquals(1024, scheduler.getSchedulerApp(attId1).getCurrentReservation().getMemory()); @@ -2002,40 +2037,40 @@ public void testUserMaxRunningApps() throws Exception { // Add a node RMNode node1 = MockNodes - .newNodeInfo(1, Resources.createResource(8192, 8), 1, "127.0.0.1"); + .newNodeInfo(1, Resources.createResource(8192, 8, 10), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); - + // Request for app 1 ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 1); - + scheduler.update(); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); - + // App 1 should be running assertEquals(1, scheduler.getSchedulerApp(attId1).getLiveContainers().size()); - + ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1", "user1", 1); - + scheduler.update(); scheduler.handle(updateEvent); - + // App 2 should not be running assertEquals(0, scheduler.getSchedulerApp(attId2).getLiveContainers().size()); - + // Request another container for app 1 createSchedulingRequestExistingApplication(1024, 1, attId1); - + scheduler.update(); scheduler.handle(updateEvent); - + // Request should be fulfilled assertEquals(2, scheduler.getSchedulerApp(attId1).getLiveContainers().size()); } - + @Test (timeout = 5000) public void testReservationWhileMultiplePriorities() throws IOException { scheduler.init(conf); @@ -2045,52 +2080,53 @@ public void testReservationWhileMultiplePriorities() throws IOException { // Add a node RMNode node1 = MockNodes - .newNodeInfo(1, Resources.createResource(1024, 4), 1, "127.0.0.1"); + .newNodeInfo(1, Resources.createResource(1024, 4, 2), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); - ApplicationAttemptId attId = createSchedulingRequest(1024, 4, "queue1", + ApplicationAttemptId attId = createSchedulingRequest(1024, 4, 2, "queue1", "user1", 1, 2); scheduler.update(); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); - + FSAppAttempt app = scheduler.getSchedulerApp(attId); assertEquals(1, app.getLiveContainers().size()); - + ContainerId containerId = scheduler.getSchedulerApp(attId) .getLiveContainers().iterator().next().getContainerId(); // Cause reservation to be created - createSchedulingRequestExistingApplication(1024, 4, 2, attId); + createSchedulingRequestExistingApplication(1024, 4, 2, 2, attId); scheduler.update(); scheduler.handle(updateEvent); assertEquals(1, app.getLiveContainers().size()); assertEquals(0, scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualCores()); - + // Create request at higher priority - createSchedulingRequestExistingApplication(1024, 4, 1, attId); + createSchedulingRequestExistingApplication(1024, 4, 2, 1, attId); scheduler.update(); scheduler.handle(updateEvent); - + assertEquals(1, app.getLiveContainers().size()); // Reserved container should still be at lower priority for (RMContainer container : app.getReservedContainers()) { assertEquals(2, container.getReservedPriority().getPriority()); } - + // Complete container scheduler.allocate(attId, new ArrayList(), Arrays.asList(containerId), null, null); assertEquals(1024, scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(4, scheduler.getRootQueueMetrics().getAvailableVirtualCores()); - + assertEquals(2, scheduler.getRootQueueMetrics().getAvailableVirtualDisks()); + // Schedule at opening scheduler.update(); scheduler.handle(updateEvent); - + // Reserved container (at lower priority) should be run Collection liveContainers = app.getLiveContainers(); assertEquals(1, liveContainers.size()); @@ -2099,8 +2135,9 @@ public void testReservationWhileMultiplePriorities() throws IOException { } assertEquals(0, scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualCores()); + assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualDisks()); } - + @Test public void testAclSubmitApplication() throws Exception { // Set acl's @@ -2134,7 +2171,7 @@ public void testAclSubmitApplication() throws Exception { FSAppAttempt app2 = scheduler.getSchedulerApp(attId2); assertNull("The application was allowed", app2); } - + @Test (timeout = 5000) public void testMultipleNodesSingleRackRequest() throws Exception { scheduler.init(conf); @@ -2154,11 +2191,11 @@ public void testMultipleNodesSingleRackRequest() throws Exception { scheduler.handle(nodeEvent1); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); - + ApplicationAttemptId appId = createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++); scheduler.addApplication(appId.getApplicationId(), "queue1", "user1", false); scheduler.addApplicationAttempt(appId, false, false); - + // 1 request with 2 nodes on the same rack. another request with 1 node on // a different rack List asks = new ArrayList(); @@ -2170,7 +2207,7 @@ public void testMultipleNodesSingleRackRequest() throws Exception { asks.add(createResourceRequest(1024, ResourceRequest.ANY, 1, 2, true)); scheduler.allocate(appId, asks, new ArrayList(), null, null); - + // node 1 checks in scheduler.update(); NodeUpdateSchedulerEvent updateEvent1 = new NodeUpdateSchedulerEvent(node1); @@ -2185,7 +2222,7 @@ public void testMultipleNodesSingleRackRequest() throws Exception { // should assign rack local assertEquals(2, scheduler.getSchedulerApp(appId).getLiveContainers().size()); } - + @Test (timeout = 5000) public void testFifoWithinQueue() throws Exception { scheduler.init(conf); @@ -2194,10 +2231,10 @@ public void testFifoWithinQueue() throws Exception { RMNode node1 = MockNodes - .newNodeInfo(1, Resources.createResource(3072, 3), 1, "127.0.0.1"); + .newNodeInfo(1, Resources.createResource(3072, 3, 10), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); - + // Even if submitted at exact same time, apps will be deterministically // ordered by name. ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", @@ -2206,26 +2243,26 @@ public void testFifoWithinQueue() throws Exception { "user1", 2); FSAppAttempt app1 = scheduler.getSchedulerApp(attId1); FSAppAttempt app2 = scheduler.getSchedulerApp(attId2); - + FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true); queue1.setPolicy(new FifoPolicy()); - + scheduler.update(); // First two containers should go to app 1, third should go to app 2. // Because tests set assignmultiple to false, each heartbeat assigns a single // container. - + NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); assertEquals(1, app1.getLiveContainers().size()); assertEquals(0, app2.getLiveContainers().size()); - + scheduler.handle(updateEvent); assertEquals(2, app1.getLiveContainers().size()); assertEquals(0, app2.getLiveContainers().size()); - + scheduler.handle(updateEvent); assertEquals(2, app1.getLiveContainers().size()); assertEquals(1, app2.getLiveContainers().size()); @@ -2239,7 +2276,7 @@ public void testMaxAssign() throws Exception { scheduler.reinitialize(conf, resourceManager.getRMContext()); RMNode node = - MockNodes.newNodeInfo(1, Resources.createResource(16384, 16), 0, + MockNodes.newNodeInfo(1, Resources.createResource(16384, 16, 20), 0, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); @@ -2263,25 +2300,25 @@ public void testMaxAssign() throws Exception { assertEquals("Incorrect number of containers allocated", 8, app .getLiveContainers().size()); } - + @Test(timeout = 3000) public void testMaxAssignWithZeroMemoryContainers() throws Exception { conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, true); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0); - + scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); RMNode node = - MockNodes.newNodeInfo(1, Resources.createResource(16384, 16), 0, + MockNodes.newNodeInfo(1, Resources.createResource(16384, 16, 20), 0, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); ApplicationAttemptId attId = - createSchedulingRequest(0, 1, "root.default", "user", 8); + createSchedulingRequest(0, 1, 1, "root.default", "user", 8); FSAppAttempt app = scheduler.getSchedulerApp(attId); // set maxAssign to 2: only 2 containers should be allocated @@ -2302,14 +2339,14 @@ public void testMaxAssignWithZeroMemoryContainers() throws Exception { /** * Test to verify the behavior of * {@link FSQueue#assignContainer(FSSchedulerNode)}) - * + * * Create two queues under root (fifoQueue and fairParent), and two queues * under fairParent (fairChild1 and fairChild2). Submit two apps to the * fifoQueue and one each to the fairChild* queues, all apps requiring 4 * containers each of the total 16 container capacity - * + * * Assert the number of containers for each app after 4, 8, 12 and 16 updates. - * + * * @throws Exception */ @Test(timeout = 5000) @@ -2326,10 +2363,10 @@ public void testAssignContainer() throws Exception { RMNode node1 = MockNodes - .newNodeInfo(1, Resources.createResource(8192, 8), 1, "127.0.0.1"); + .newNodeInfo(1, Resources.createResource(8192, 8, 10), 1, "127.0.0.1"); RMNode node2 = MockNodes - .newNodeInfo(1, Resources.createResource(8192, 8), 2, "127.0.0.2"); + .newNodeInfo(1, Resources.createResource(8192, 8, 10), 2, "127.0.0.2"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); @@ -2380,7 +2417,7 @@ public void testAssignContainer() throws Exception { } } } - + @SuppressWarnings("unchecked") @Test public void testNotAllowSubmitApplication() throws Exception { @@ -2418,9 +2455,9 @@ public void testNotAllowSubmitApplication() throws Exception { submissionContext.setApplicationId(applicationId); submissionContext.setAMContainerSpec(clc); RMApp application = - new RMAppImpl(applicationId, resourceManager.getRMContext(), conf, name, user, - queue, submissionContext, scheduler, masterService, - System.currentTimeMillis(), "YARN", null, null); + new RMAppImpl(applicationId, resourceManager.getRMContext(), conf, name, user, + queue, submissionContext, scheduler, masterService, + System.currentTimeMillis(), "YARN", null, null); resourceManager.getRMContext().getRMApps().putIfAbsent(applicationId, application); application.handle(new RMAppEvent(applicationId, RMAppEventType.START)); @@ -2449,7 +2486,7 @@ public void testNotAllowSubmitApplication() throws Exception { } assertEquals(FinalApplicationStatus.FAILED, application.getFinalApplicationStatus()); } - + @Test public void testReservationThatDoesntFit() throws IOException { scheduler.init(conf); @@ -2461,25 +2498,25 @@ public void testReservationThatDoesntFit() throws IOException { .newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); - + ApplicationAttemptId attId = createSchedulingRequest(2048, "queue1", "user1", 1); scheduler.update(); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); - + FSAppAttempt app = scheduler.getSchedulerApp(attId); assertEquals(0, app.getLiveContainers().size()); assertEquals(0, app.getReservedContainers().size()); - + createSchedulingRequestExistingApplication(1024, 2, attId); scheduler.update(); scheduler.handle(updateEvent); - + assertEquals(1, app.getLiveContainers().size()); assertEquals(0, app.getReservedContainers().size()); } - + @Test public void testRemoveNodeUpdatesRootQueueMetrics() throws IOException { scheduler.init(conf); @@ -2487,28 +2524,33 @@ public void testRemoveNodeUpdatesRootQueueMetrics() throws IOException { scheduler.reinitialize(conf, resourceManager.getRMContext()); assertEquals(0, scheduler.getRootQueueMetrics().getAvailableMB()); - assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualCores()); - - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024, 4), 1, + assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualCores()); + assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualDisks()); + + RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024, 4, 8), 1, "127.0.0.1"); NodeAddedSchedulerEvent addEvent = new NodeAddedSchedulerEvent(node1); scheduler.handle(addEvent); - + assertEquals(1024, scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(4, scheduler.getRootQueueMetrics().getAvailableVirtualCores()); + assertEquals(8, scheduler.getRootQueueMetrics().getAvailableVirtualDisks()); scheduler.update(); // update shouldn't change things assertEquals(1024, scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(4, scheduler.getRootQueueMetrics().getAvailableVirtualCores()); - + assertEquals(8, scheduler.getRootQueueMetrics().getAvailableVirtualDisks()); + NodeRemovedSchedulerEvent removeEvent = new NodeRemovedSchedulerEvent(node1); scheduler.handle(removeEvent); - + assertEquals(0, scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualCores()); + assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualDisks()); scheduler.update(); // update shouldn't change things assertEquals(0, scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualCores()); -} + assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualDisks()); + } @Test public void testStrictLocality() throws IOException { @@ -2526,7 +2568,7 @@ public void testStrictLocality() throws IOException { ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 0); - + ResourceRequest nodeRequest = createResourceRequest(1024, node1.getHostName(), 1, 1, true); ResourceRequest rackRequest = createResourceRequest(1024, node1.getRackName(), 1, 1, false); ResourceRequest anyRequest = createResourceRequest(1024, ResourceRequest.ANY, @@ -2551,7 +2593,7 @@ public void testStrictLocality() throws IOException { scheduler.handle(node1UpdateEvent); assertEquals(1, app.getLiveContainers().size()); } - + @Test public void testCancelStrictLocality() throws IOException { scheduler.init(conf); @@ -2568,7 +2610,7 @@ public void testCancelStrictLocality() throws IOException { ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 0); - + ResourceRequest nodeRequest = createResourceRequest(1024, node1.getHostName(), 1, 1, true); ResourceRequest rackRequest = createResourceRequest(1024, "rack1", 1, 1, false); ResourceRequest anyRequest = createResourceRequest(1024, ResourceRequest.ANY, @@ -2587,14 +2629,14 @@ public void testCancelStrictLocality() throws IOException { scheduler.handle(node2UpdateEvent); assertEquals(0, app.getLiveContainers().size()); } - + // relax locality List update = Arrays.asList( createResourceRequest(1024, node1.getHostName(), 1, 0, true), createResourceRequest(1024, "rack1", 1, 0, true), createResourceRequest(1024, ResourceRequest.ANY, 1, 1, true)); scheduler.allocate(attId1, update, new ArrayList(), null, null); - + // then node2 should get the container scheduler.handle(node2UpdateEvent); assertEquals(1, app.getLiveContainers().size()); @@ -2618,7 +2660,7 @@ public void testReservationsStrictLocality() throws IOException { ApplicationAttemptId attId = createSchedulingRequest(1024, "queue1", "user1", 0); FSAppAttempt app = scheduler.getSchedulerApp(attId); - + ResourceRequest nodeRequest = createResourceRequest(1024, node2.getHostName(), 1, 2, true); ResourceRequest rackRequest = createResourceRequest(1024, "rack1", 1, 2, true); ResourceRequest anyRequest = createResourceRequest(1024, ResourceRequest.ANY, @@ -2626,7 +2668,7 @@ public void testReservationsStrictLocality() throws IOException { createSchedulingRequestExistingApplication(nodeRequest, attId); createSchedulingRequestExistingApplication(rackRequest, attId); createSchedulingRequestExistingApplication(anyRequest, attId); - + scheduler.update(); NodeUpdateSchedulerEvent nodeUpdateEvent = new NodeUpdateSchedulerEvent(node1); @@ -2634,7 +2676,7 @@ public void testReservationsStrictLocality() throws IOException { assertEquals(1, app.getLiveContainers().size()); scheduler.handle(nodeUpdateEvent); assertEquals(1, app.getReservedContainers().size()); - + // now, make our request node-specific (on a different node) rackRequest = createResourceRequest(1024, "rack1", 1, 1, false); anyRequest = createResourceRequest(1024, ResourceRequest.ANY, @@ -2645,19 +2687,42 @@ public void testReservationsStrictLocality() throws IOException { scheduler.handle(nodeUpdateEvent); assertEquals(0, app.getReservedContainers().size()); } - + @Test public void testNoMoreCpuOnNode() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); - RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 1), + RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 1, 2), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); - - ApplicationAttemptId attId = createSchedulingRequest(1024, 1, "default", + + ApplicationAttemptId attId = createSchedulingRequest(1024, 1, 1, "default", + "user1", 2); + FSAppAttempt app = scheduler.getSchedulerApp(attId); + scheduler.update(); + + NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1); + scheduler.handle(updateEvent); + assertEquals(1, app.getLiveContainers().size()); + scheduler.handle(updateEvent); + assertEquals(1, app.getLiveContainers().size()); + } + + @Test + public void testNoMoreVdisksOnNode() throws IOException { + scheduler.init(conf); + scheduler.start(); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 2, 1), + 1, "127.0.0.1"); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + + ApplicationAttemptId attId = createSchedulingRequest(1024, 1, 1, "default", "user1", 2); FSAppAttempt app = scheduler.getSchedulerApp(attId); scheduler.update(); @@ -2675,25 +2740,29 @@ public void testBasicDRFAssignment() throws Exception { scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); - RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 5)); + RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 5, 8)); NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node); scheduler.handle(nodeEvent); - ApplicationAttemptId appAttId1 = createSchedulingRequest(2048, 1, "queue1", + ApplicationAttemptId appAttId1 = createSchedulingRequest(2048, 1, 3, "queue1", "user1", 2); FSAppAttempt app1 = scheduler.getSchedulerApp(appAttId1); - ApplicationAttemptId appAttId2 = createSchedulingRequest(1024, 2, "queue1", + ApplicationAttemptId appAttId2 = createSchedulingRequest(1024, 2, 1, "queue1", "user1", 2); FSAppAttempt app2 = scheduler.getSchedulerApp(appAttId2); DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy(); + Set enabledResources = new HashSet(); + enabledResources.add(ResourceType.CPU); + enabledResources.add(ResourceType.MEMORY); + drfPolicy.setEnabledResourceTypes(enabledResources); drfPolicy.initialize(scheduler.getClusterResource()); scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy); scheduler.update(); // First both apps get a container // Then the first gets another container because its dominant share of - // 2048/8192 is less than the other's of 2/5 + // 3/8 is less than the other's of 2/5 NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); scheduler.handle(updateEvent); Assert.assertEquals(1, app1.getLiveContainers().size()); @@ -2717,12 +2786,12 @@ public void testBasicDRFWithQueues() throws Exception { scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); - RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 7), + RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 7, 10), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node); scheduler.handle(nodeEvent); - ApplicationAttemptId appAttId1 = createSchedulingRequest(3072, 1, "queue1", + ApplicationAttemptId appAttId1 = createSchedulingRequest(3072, 1, 2, "queue1", "user1", 2); FSAppAttempt app1 = scheduler.getSchedulerApp(appAttId1); ApplicationAttemptId appAttId2 = createSchedulingRequest(2048, 2, "queue1", @@ -2731,8 +2800,12 @@ public void testBasicDRFWithQueues() throws Exception { ApplicationAttemptId appAttId3 = createSchedulingRequest(1024, 2, "queue2", "user1", 2); FSAppAttempt app3 = scheduler.getSchedulerApp(appAttId3); - + DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy(); + Set enabledResources = new HashSet(); + enabledResources.add(ResourceType.CPU); + enabledResources.add(ResourceType.MEMORY); + drfPolicy.setEnabledResourceTypes(enabledResources); drfPolicy.initialize(scheduler.getClusterResource()); scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy); scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy); @@ -2740,6 +2813,7 @@ public void testBasicDRFWithQueues() throws Exception { NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); scheduler.handle(updateEvent); + // (5/8, 3/7, 3/10), (1/8, 2/7, 3/10) Assert.assertEquals(1, app1.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(1, app3.getLiveContainers().size()); @@ -2748,7 +2822,7 @@ public void testBasicDRFWithQueues() throws Exception { scheduler.handle(updateEvent); Assert.assertEquals(1, app2.getLiveContainers().size()); } - + @Test public void testDRFHierarchicalQueues() throws Exception { scheduler.init(conf); @@ -2776,8 +2850,12 @@ public void testDRFHierarchicalQueues() throws Exception { "user1", 2); Thread.sleep(3); // so that start times will be different FSAppAttempt app4 = scheduler.getSchedulerApp(appAttId4); - + DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy(); + Set enabledResources = new HashSet(); + enabledResources.add(ResourceType.CPU); + enabledResources.add(ResourceType.MEMORY); + drfPolicy.setEnabledResourceTypes(enabledResources); drfPolicy.initialize(scheduler.getClusterResource()); scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy); scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy); @@ -2807,7 +2885,7 @@ public void testDRFHierarchicalQueues() throws Exception { // queue1.subqueue1 is behind again, so it gets one, which it gives to app2 scheduler.handle(updateEvent); Assert.assertEquals(1, app2.getLiveContainers().size()); - + // at this point, we've used all our CPU up, so nobody else should get a container scheduler.handle(updateEvent); @@ -2835,15 +2913,15 @@ public void testHostPortNodeName() throws Exception { NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); - ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", + ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 0); - ResourceRequest nodeRequest = createResourceRequest(1024, + ResourceRequest nodeRequest = createResourceRequest(1024, node1.getNodeID().getHost() + ":" + node1.getNodeID().getPort(), 1, 1, true); - ResourceRequest rackRequest = createResourceRequest(1024, + ResourceRequest rackRequest = createResourceRequest(1024, node1.getRackName(), 1, 1, false); - ResourceRequest anyRequest = createResourceRequest(1024, + ResourceRequest anyRequest = createResourceRequest(1024, ResourceRequest.ANY, 1, 1, false); createSchedulingRequestExistingApplication(nodeRequest, attId1); createSchedulingRequestExistingApplication(rackRequest, attId1); @@ -2851,19 +2929,19 @@ public void testHostPortNodeName() throws Exception { scheduler.update(); - NodeUpdateSchedulerEvent node1UpdateEvent = new + NodeUpdateSchedulerEvent node1UpdateEvent = new NodeUpdateSchedulerEvent(node1); - NodeUpdateSchedulerEvent node2UpdateEvent = new + NodeUpdateSchedulerEvent node2UpdateEvent = new NodeUpdateSchedulerEvent(node2); - // no matter how many heartbeats, node2 should never get a container + // no matter how many heartbeats, node2 should never get a container FSAppAttempt app = scheduler.getSchedulerApp(attId1); for (int i = 0; i < 10; i++) { scheduler.handle(node2UpdateEvent); assertEquals(0, app.getLiveContainers().size()); assertEquals(0, app.getReservedContainers().size()); } - // then node1 should get the container + // then node1 should get the container scheduler.handle(node1UpdateEvent); assertEquals(1, app.getLiveContainers().size()); } @@ -2878,16 +2956,16 @@ private void verifyAppRunnable(ApplicationAttemptId attId, boolean runnable) { assertEquals(runnable, runnableApps.contains(app)); assertEquals(!runnable, nonRunnableApps.contains(app)); } - + private void verifyQueueNumRunnable(String queueName, int numRunnableInQueue, - int numNonRunnableInQueue) { + int numNonRunnableInQueue) { FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue(queueName, false); assertEquals(numRunnableInQueue, queue.getRunnableAppSchedulables().size()); assertEquals(numNonRunnableInQueue, queue.getNonRunnableAppSchedulables().size()); } - + @Test public void testUserAndQueueMaxRunningApps() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); @@ -2924,7 +3002,7 @@ public void testUserAndQueueMaxRunningApps() throws Exception { ApplicationAttemptId attId4 = createSchedulingRequest(1024, "queue1", "user2"); verifyAppRunnable(attId4, false); verifyQueueNumRunnable("queue1", 2, 1); - + // Remove app 1 and both app 2 and app 4 should becomes runnable in its place AppAttemptRemovedSchedulerEvent appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(attId1, RMAppAttemptState.FINISHED, false); @@ -2933,13 +3011,13 @@ public void testUserAndQueueMaxRunningApps() throws Exception { verifyQueueNumRunnable("queue2", 1, 0); verifyAppRunnable(attId4, true); verifyQueueNumRunnable("queue1", 2, 0); - + // A new app to queue1 should not be runnable ApplicationAttemptId attId5 = createSchedulingRequest(1024, "queue1", "user2"); verifyAppRunnable(attId5, false); verifyQueueNumRunnable("queue1", 2, 1); } - + @Test public void testQueueMaxAMShare() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); @@ -2958,7 +3036,7 @@ public void testQueueMaxAMShare() throws Exception { scheduler.reinitialize(conf, resourceManager.getRMContext()); RMNode node = - MockNodes.newNodeInfo(1, Resources.createResource(20480, 20), + MockNodes.newNodeInfo(1, Resources.createResource(20480, 20, 20), 0, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); @@ -2973,9 +3051,9 @@ public void testQueueMaxAMShare() throws Exception { scheduler.update(); scheduler.handle(updateEvent); - Resource amResource1 = Resource.newInstance(1024, 1); - Resource amResource2 = Resource.newInstance(2048, 2); - Resource amResource3 = Resource.newInstance(1860, 2); + Resource amResource1 = Resource.newInstance(1024, 1, 1); + Resource amResource2 = Resource.newInstance(2048, 2, 2); + Resource amResource3 = Resource.newInstance(1860, 2, 2); int amPriority = RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority(); // Exceeds no limits ApplicationAttemptId attId1 = createAppAttemptId(1, 1); @@ -3101,7 +3179,7 @@ public void testQueueMaxAMShare() throws Exception { // Check amResource normalization ApplicationAttemptId attId6 = createAppAttemptId(6, 1); createApplicationWithAMResource(attId6, "queue1", "user1", amResource3); - createSchedulingRequestExistingApplication(1860, 2, amPriority, attId6); + createSchedulingRequestExistingApplication(1860, 2, 2, amPriority, attId6); FSAppAttempt app6 = scheduler.getSchedulerApp(attId6); scheduler.update(); scheduler.handle(updateEvent); @@ -3150,7 +3228,7 @@ public void testQueueMaxAMShareDefault() throws Exception { scheduler.reinitialize(conf, resourceManager.getRMContext()); RMNode node = - MockNodes.newNodeInfo(1, Resources.createResource(8192, 20), + MockNodes.newNodeInfo(1, Resources.createResource(8192, 20, 20), 0, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); @@ -3186,7 +3264,7 @@ public void testQueueMaxAMShareDefault() throws Exception { scheduler.handle(updateEvent); } - Resource amResource1 = Resource.newInstance(1024, 1); + Resource amResource1 = Resource.newInstance(1024, 1, 1); int amPriority = RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority(); // The fair share is 2048 MB, and the default maxAMShare is 0.5f, @@ -3269,7 +3347,7 @@ public void testMaxRunningAppsHierarchicalQueues() throws Exception { verifyAppRunnable(attId5, false); verifyQueueNumRunnable("queue1.sub3", 1, 1); clock.tick(10); - + // Even though the app was removed from sub3, the app from sub2 gets to go // because it came in first AppAttemptRemovedSchedulerEvent appRemovedEvent1 = @@ -3302,7 +3380,7 @@ public void testContinuousScheduling() throws Exception { scheduler = new FairScheduler(); Configuration conf = createConfiguration(); conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, - true); + true); scheduler.setRMContext(resourceManager.getRMContext()); scheduler.init(conf); scheduler.start(); @@ -3312,28 +3390,29 @@ public void testContinuousScheduling() throws Exception { // Add two nodes RMNode node1 = - MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, - "127.0.0.1"); + MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8, 10), 1, + "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2 = - MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 2, - "127.0.0.2"); + MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8, 10), 2, + "127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); // available resource Assert.assertEquals(scheduler.getClusterResource().getMemory(), 16 * 1024); Assert.assertEquals(scheduler.getClusterResource().getVirtualCores(), 16); + Assert.assertEquals(scheduler.getClusterResource().getVirtualDisks(), 20); // send application request ApplicationAttemptId appAttemptId = - createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++); + createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++); scheduler.addApplication(appAttemptId.getApplicationId(), "queue11", "user11", false); scheduler.addApplicationAttempt(appAttemptId, false, false); List ask = new ArrayList(); ResourceRequest request = - createResourceRequest(1024, 1, ResourceRequest.ANY, 1, 1, true); + createResourceRequest(1024, 1, 1, ResourceRequest.ANY, 1, 1, true); ask.add(request); scheduler.allocate(appAttemptId, ask, new ArrayList(), null, null); @@ -3348,20 +3427,22 @@ public void testContinuousScheduling() throws Exception { // check consumption Assert.assertEquals(1024, app.getCurrentConsumption().getMemory()); Assert.assertEquals(1, app.getCurrentConsumption().getVirtualCores()); + Assert.assertEquals(1, app.getCurrentConsumption().getVirtualDisks()); // another request request = - createResourceRequest(1024, 1, ResourceRequest.ANY, 2, 1, true); + createResourceRequest(1024, 1, 1, ResourceRequest.ANY, 2, 1, true); ask.clear(); ask.add(request); scheduler.allocate(appAttemptId, ask, new ArrayList(), null, null); // Wait until app gets resources while (app.getCurrentConsumption() - .equals(Resources.createResource(1024, 1))) { } + .equals(Resources.createResource(1024, 1, 1))) { } Assert.assertEquals(2048, app.getCurrentConsumption().getMemory()); Assert.assertEquals(2, app.getCurrentConsumption().getVirtualCores()); + Assert.assertEquals(2, app.getCurrentConsumption().getVirtualDisks()); // 2 containers should be assigned to 2 nodes Set nodes = new HashSet(); @@ -3405,7 +3486,7 @@ public void testContinuousSchedulingWithNodeRemoved() throws Exception { scheduler.continuousSchedulingAttempt(); } catch (Exception e) { fail("Exception happened when doing continuous scheduling. " + - e.toString()); + e.toString()); } } @@ -3426,10 +3507,10 @@ public void testDontAllowUndeclaredPools() throws Exception{ scheduler.start(); scheduler.reinitialize(conf, resourceManager.getRMContext()); QueueManager queueManager = scheduler.getQueueManager(); - + FSLeafQueue jerryQueue = queueManager.getLeafQueue("jerry", false); FSLeafQueue defaultQueue = queueManager.getLeafQueue("default", false); - + // Should get put into jerry createSchedulingRequest(1024, "jerry", "someuser"); assertEquals(1, jerryQueue.getRunnableAppSchedulables().size()); @@ -3438,13 +3519,13 @@ public void testDontAllowUndeclaredPools() throws Exception{ createSchedulingRequest(1024, "newqueue", "someuser"); assertEquals(1, jerryQueue.getRunnableAppSchedulables().size()); assertEquals(1, defaultQueue.getRunnableAppSchedulables().size()); - + // Would get put into someuser because of user-as-default-queue, but should // be forced into default createSchedulingRequest(1024, "default", "someuser"); assertEquals(1, jerryQueue.getRunnableAppSchedulables().size()); assertEquals(2, defaultQueue.getRunnableAppSchedulables().size()); - + // Should get put into jerry because of user-as-default-queue createSchedulingRequest(1024, "default", "jerry"); assertEquals(2, jerryQueue.getRunnableAppSchedulables().size()); @@ -3481,11 +3562,11 @@ public void testDefaultRuleInitializesProperlyWhenPolicyNotConfigured() } } } - + @Test(timeout=5000) public void testRecoverRequestAfterPreemption() throws Exception { conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, 10); - + MockClock clock = new MockClock(); scheduler.setClock(clock); scheduler.init(conf); @@ -3535,10 +3616,10 @@ public void testRecoverRequestAfterPreemption() throws Exception { // Create a preempt event and register for preemption scheduler.warnOrKillContainer(rmContainer); - + // Wait for few clock ticks clock.tick(5); - + // preempt now scheduler.warnOrKillContainer(rmContainer); @@ -3562,7 +3643,7 @@ public void testRecoverRequestAfterPreemption() throws Exception { // Now with updated ResourceRequest, a container is allocated for AM. Assert.assertTrue(containers.size() == 1); } - + @SuppressWarnings("resource") @Test public void testBlacklistNodes() throws Exception { @@ -3573,7 +3654,7 @@ public void testBlacklistNodes() throws Exception { final int GB = 1024; String host = "127.0.0.1"; RMNode node = - MockNodes.newNodeInfo(1, Resources.createResource(16 * GB, 16), + MockNodes.newNodeInfo(1, Resources.createResource(16 * GB, 16, 20), 0, host); NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); @@ -3617,7 +3698,7 @@ public void testBlacklistNodes() throws Exception { assertEquals("Incorrect number of containers allocated", 1, app .getLiveContainers().size()); } - + @Test public void testGetAppsInQueue() throws Exception { scheduler.init(conf); @@ -3625,12 +3706,12 @@ public void testGetAppsInQueue() throws Exception { scheduler.reinitialize(conf, resourceManager.getRMContext()); ApplicationAttemptId appAttId1 = - createSchedulingRequest(1024, 1, "queue1.subqueue1", "user1"); + createSchedulingRequest(1024, 1, 1, "queue1.subqueue1", "user1"); ApplicationAttemptId appAttId2 = - createSchedulingRequest(1024, 1, "queue1.subqueue2", "user1"); + createSchedulingRequest(1024, 1, 1, "queue1.subqueue2", "user1"); ApplicationAttemptId appAttId3 = - createSchedulingRequest(1024, 1, "default", "user1"); - + createSchedulingRequest(1024, 1, 1, "default", "user1"); + List apps = scheduler.getAppsInQueue("queue1.subqueue1"); assertEquals(1, apps.size()); @@ -3639,7 +3720,7 @@ public void testGetAppsInQueue() throws Exception { apps = scheduler.getAppsInQueue("root.queue1.subqueue1"); assertEquals(1, apps.size()); assertEquals(appAttId1, apps.get(0)); - + apps = scheduler.getAppsInQueue("user1"); assertEquals(1, apps.size()); assertEquals(appAttId3, apps.get(0)); @@ -3660,9 +3741,9 @@ public void testGetAppsInQueue() throws Exception { public void testAddAndRemoveAppFromFairScheduler() throws Exception { AbstractYarnScheduler scheduler = (AbstractYarnScheduler) resourceManager - .getResourceScheduler(); + .getResourceScheduler(); TestSchedulerUtils.verifyAppAddedAndRemovedFromScheduler( - scheduler.getSchedulerApplications(), scheduler, "default"); + scheduler.getSchedulerApplications(), scheduler, "default"); } @Test @@ -3676,34 +3757,34 @@ public void testMoveRunnableApp() throws Exception { FSLeafQueue targetQueue = queueMgr.getLeafQueue("queue2", true); ApplicationAttemptId appAttId = - createSchedulingRequest(1024, 1, "queue1", "user1", 3); + createSchedulingRequest(1024, 1, 1, "queue1", "user1", 3); ApplicationId appId = appAttId.getApplicationId(); - RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(1024, 1, 1)); NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); scheduler.handle(updateEvent); - - assertEquals(Resource.newInstance(1024, 1), oldQueue.getResourceUsage()); + + assertEquals(Resource.newInstance(1024, 1, 1), oldQueue.getResourceUsage()); scheduler.update(); - assertEquals(Resource.newInstance(3072, 3), oldQueue.getDemand()); - + assertEquals(Resource.newInstance(3072, 3, 3), oldQueue.getDemand()); + scheduler.moveApplication(appId, "queue2"); FSAppAttempt app = scheduler.getSchedulerApp(appAttId); assertSame(targetQueue, app.getQueue()); assertFalse(oldQueue.getRunnableAppSchedulables().contains(app)); assertTrue(targetQueue.getRunnableAppSchedulables().contains(app)); - assertEquals(Resource.newInstance(0, 0), oldQueue.getResourceUsage()); - assertEquals(Resource.newInstance(1024, 1), targetQueue.getResourceUsage()); + assertEquals(Resource.newInstance(0, 0, 0), oldQueue.getResourceUsage()); + assertEquals(Resource.newInstance(1024, 1, 1), targetQueue.getResourceUsage()); assertEquals(0, oldQueue.getNumRunnableApps()); assertEquals(1, targetQueue.getNumRunnableApps()); assertEquals(1, queueMgr.getRootQueue().getNumRunnableApps()); - + scheduler.update(); - assertEquals(Resource.newInstance(0, 0), oldQueue.getDemand()); - assertEquals(Resource.newInstance(3072, 3), targetQueue.getDemand()); + assertEquals(Resource.newInstance(0, 0, 0), oldQueue.getDemand()); + assertEquals(Resource.newInstance(3072, 3, 3), targetQueue.getDemand()); } - + @Test public void testMoveNonRunnableApp() throws Exception { scheduler.init(conf); @@ -3715,17 +3796,17 @@ public void testMoveNonRunnableApp() throws Exception { FSLeafQueue targetQueue = queueMgr.getLeafQueue("queue2", true); scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue1", 0); scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue2", 0); - + ApplicationAttemptId appAttId = - createSchedulingRequest(1024, 1, "queue1", "user1", 3); - + createSchedulingRequest(1024, 1, 1, "queue1", "user1", 3); + assertEquals(0, oldQueue.getNumRunnableApps()); scheduler.moveApplication(appAttId.getApplicationId(), "queue2"); assertEquals(0, oldQueue.getNumRunnableApps()); assertEquals(0, targetQueue.getNumRunnableApps()); assertEquals(0, queueMgr.getRootQueue().getNumRunnableApps()); } - + @Test public void testMoveMakesAppRunnable() throws Exception { scheduler.init(conf); @@ -3736,13 +3817,13 @@ public void testMoveMakesAppRunnable() throws Exception { FSLeafQueue oldQueue = queueMgr.getLeafQueue("queue1", true); FSLeafQueue targetQueue = queueMgr.getLeafQueue("queue2", true); scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue1", 0); - + ApplicationAttemptId appAttId = - createSchedulingRequest(1024, 1, "queue1", "user1", 3); - + createSchedulingRequest(1024, 1, 1, "queue1", "user1", 3); + FSAppAttempt app = scheduler.getSchedulerApp(appAttId); assertTrue(oldQueue.getNonRunnableAppSchedulables().contains(app)); - + scheduler.moveApplication(appAttId.getApplicationId(), "queue2"); assertFalse(oldQueue.getNonRunnableAppSchedulables().contains(app)); assertFalse(targetQueue.getNonRunnableAppSchedulables().contains(app)); @@ -3750,7 +3831,7 @@ public void testMoveMakesAppRunnable() throws Exception { assertEquals(1, targetQueue.getNumRunnableApps()); assertEquals(1, queueMgr.getRootQueue().getNumRunnableApps()); } - + @Test (expected = YarnException.class) public void testMoveWouldViolateMaxAppsConstraints() throws Exception { scheduler.init(conf); @@ -3760,13 +3841,13 @@ public void testMoveWouldViolateMaxAppsConstraints() throws Exception { QueueManager queueMgr = scheduler.getQueueManager(); queueMgr.getLeafQueue("queue2", true); scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue2", 0); - + ApplicationAttemptId appAttId = - createSchedulingRequest(1024, 1, "queue1", "user1", 3); - + createSchedulingRequest(1024, 1, 1, "queue1", "user1", 3); + scheduler.moveApplication(appAttId.getApplicationId(), "queue2"); } - + @Test (expected = YarnException.class) public void testMoveWouldViolateMaxResourcesConstraints() throws Exception { scheduler.init(conf); @@ -3777,21 +3858,21 @@ public void testMoveWouldViolateMaxResourcesConstraints() throws Exception { FSLeafQueue oldQueue = queueMgr.getLeafQueue("queue1", true); queueMgr.getLeafQueue("queue2", true); scheduler.getAllocationConfiguration().maxQueueResources.put("root.queue2", - Resource.newInstance(1024, 1)); + Resource.newInstance(1024, 1, 1)); ApplicationAttemptId appAttId = - createSchedulingRequest(1024, 1, "queue1", "user1", 3); - RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(2048, 2)); + createSchedulingRequest(1024, 1, 1, "queue1", "user1", 3); + RMNode node = MockNodes.newNodeInfo(1, Resources.createResource(2048, 2, 2)); NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); scheduler.handle(updateEvent); scheduler.handle(updateEvent); - - assertEquals(Resource.newInstance(2048, 2), oldQueue.getResourceUsage()); + + assertEquals(Resource.newInstance(2048, 2, 2), oldQueue.getResourceUsage()); scheduler.moveApplication(appAttId.getApplicationId(), "queue2"); } - + @Test (expected = YarnException.class) public void testMoveToNonexistentQueue() throws Exception { scheduler.init(conf); @@ -3799,9 +3880,9 @@ public void testMoveToNonexistentQueue() throws Exception { scheduler.reinitialize(conf, resourceManager.getRMContext()); scheduler.getQueueManager().getLeafQueue("queue1", true); - + ApplicationAttemptId appAttId = - createSchedulingRequest(1024, 1, "queue1", "user1", 3); + createSchedulingRequest(1024, 1, 1, "queue1", "user1", 3); scheduler.moveApplication(appAttId.getApplicationId(), "queue2"); } @@ -3905,4 +3986,4 @@ public void testPerfMetricsInited() { assertEquals("Incorrect number of perf metrics", 1, collector.getRecords().size()); } -} +} \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java index 82b50a6..baf198b 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java @@ -31,14 +31,18 @@ public class TestFairSchedulerConfiguration { @Test public void testParseResourceConfigValue() throws Exception { - assertEquals(BuilderUtils.newResource(1024, 2), - parseResourceConfigValue("2 vcores, 1024 mb")); - assertEquals(BuilderUtils.newResource(1024, 2), - parseResourceConfigValue("1024 mb, 2 vcores")); - assertEquals(BuilderUtils.newResource(1024, 2), - parseResourceConfigValue("2vcores,1024mb")); - assertEquals(BuilderUtils.newResource(1024, 2), - parseResourceConfigValue("1024mb,2vcores")); + assertEquals(BuilderUtils.newResource(1024, 2, 4), + parseResourceConfigValue("2 vcores, 1024 mb, 4 vdisks")); + assertEquals(BuilderUtils.newResource(1024, 2, 4), + parseResourceConfigValue("1024 mb, 2 vcores, 4 vdisks")); + assertEquals(BuilderUtils.newResource(1024, 2, 4), + parseResourceConfigValue("4 vdisks, 1024 mb, 2 vcores")); + assertEquals(BuilderUtils.newResource(1024, 2, 4), + parseResourceConfigValue("2vcores,1024mb, 4vdisks")); + assertEquals(BuilderUtils.newResource(1024, 2, 4), + parseResourceConfigValue("1024mb,2vcores, 4vdisks")); + assertEquals(BuilderUtils.newResource(1024, 2, 4), + parseResourceConfigValue("4vdisks,1024mb, 2vcores")); assertEquals(BuilderUtils.newResource(1024, 2), parseResourceConfigValue("1024 mb, 2 vcores")); assertEquals(BuilderUtils.newResource(1024, 2), @@ -61,6 +65,11 @@ public void testOnlyMemory() throws Exception { public void testOnlyCPU() throws Exception { parseResourceConfigValue("1024vcores"); } + + @Test(expected = AllocationConfigurationException.class) + public void testOnlyVdisks() throws Exception { + parseResourceConfigValue("4vdisks"); + } @Test(expected = AllocationConfigurationException.class) public void testGibberish() throws Exception { diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java index a5c20c1..41d4b5b 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java @@ -21,6 +21,8 @@ import static org.junit.Assert.assertTrue; import java.util.Comparator; +import java.util.HashSet; +import java.util.Set; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType; @@ -38,32 +40,50 @@ public class TestDominantResourceFairnessPolicy { private Comparator createComparator(int clusterMem, - int clusterCpu) { + int clusterCpu, int clusterVdisk) { + return createComparator( + BuilderUtils.newResource(clusterMem, clusterCpu, clusterVdisk)); + } + + private Comparator createComparator(Resource capacity) { + Set enabledResourceTypes = new HashSet(); + for (ResourceType type : ResourceType.values()) { + enabledResourceTypes.add(type); + } + return createComparator(capacity, enabledResourceTypes); + } + + private Comparator createComparator(Resource capacity, + Set enabledResourceTypes) { DominantResourceFairnessPolicy policy = new DominantResourceFairnessPolicy(); - policy.initialize(BuilderUtils.newResource(clusterMem, clusterCpu)); + policy.setEnabledResourceTypes(enabledResourceTypes); + policy.initialize(capacity); return policy.getComparator(); } - private Schedulable createSchedulable(int memUsage, int cpuUsage) { - return createSchedulable(memUsage, cpuUsage, ResourceWeights.NEUTRAL, 0, 0); + private Schedulable createSchedulable(int memUsage, int cpuUsage, + int vdiskUsage) { + return createSchedulable(memUsage, cpuUsage, vdiskUsage, + ResourceWeights.NEUTRAL, 0, 0, 0); } private Schedulable createSchedulable(int memUsage, int cpuUsage, - int minMemShare, int minCpuShare) { - return createSchedulable(memUsage, cpuUsage, ResourceWeights.NEUTRAL, - minMemShare, minCpuShare); + int vdiskUsage, int minMemShare, int minCpuShare, int minVdiskShare) { + return createSchedulable(memUsage, cpuUsage, vdiskUsage, + ResourceWeights.NEUTRAL, minMemShare, minCpuShare, minVdiskShare); } private Schedulable createSchedulable(int memUsage, int cpuUsage, - ResourceWeights weights) { - return createSchedulable(memUsage, cpuUsage, weights, 0, 0); + int vdiskUsage, ResourceWeights weights) { + return createSchedulable(memUsage, cpuUsage, vdiskUsage, weights, 0, 0, 0); } - private Schedulable createSchedulable(int memUsage, int cpuUsage, - ResourceWeights weights, int minMemShare, int minCpuShare) { - Resource usage = BuilderUtils.newResource(memUsage, cpuUsage); - Resource minShare = BuilderUtils.newResource(minMemShare, minCpuShare); + int vdiskUsage, ResourceWeights weights, int minMemShare, + int minCpuShare, int minVdiskShare) { + Resource usage = BuilderUtils.newResource(memUsage, cpuUsage, vdiskUsage); + Resource minShare = BuilderUtils + .newResource(minMemShare, minCpuShare, minVdiskShare); return new FakeSchedulable(minShare, Resources.createResource(Integer.MAX_VALUE, Integer.MAX_VALUE), weights, Resources.none(), usage, 0l); @@ -71,93 +91,142 @@ private Schedulable createSchedulable(int memUsage, int cpuUsage, @Test public void testSameDominantResource() { - assertTrue(createComparator(8000, 4).compare( - createSchedulable(1000, 1), - createSchedulable(2000, 1)) < 0); + assertTrue(createComparator(8000, 4, 4).compare( + createSchedulable(1000, 1, 1), + createSchedulable(2000, 1, 1)) < 0); } @Test public void testDifferentDominantResource() { - assertTrue(createComparator(8000, 8).compare( - createSchedulable(4000, 3), - createSchedulable(2000, 5)) < 0); + assertTrue(createComparator(8000, 8, 20).compare( + createSchedulable(4000, 3, 2), + createSchedulable(2000, 5, 3)) < 0); } - + + @Test + public void testOnlyOneResource() { + Resource capacity = BuilderUtils.newResource(8000, 8, 20); + // only CPU + Set enabledResourceTypes = new HashSet(); + enabledResourceTypes.add(ResourceType.CPU); + assertTrue(createComparator(capacity, enabledResourceTypes).compare( + createSchedulable(4000, 3, 15), + createSchedulable(2000, 5, 3)) < 0); + // only MEMORY + enabledResourceTypes.clear(); + enabledResourceTypes.add(ResourceType.MEMORY); + assertTrue(createComparator(capacity, enabledResourceTypes).compare( + createSchedulable(5000, 3, 15), + createSchedulable(2000, 4, 3)) > 0); + // only DISKIO + enabledResourceTypes.clear(); + enabledResourceTypes.add(ResourceType.DISKIO); + assertTrue(createComparator(capacity, enabledResourceTypes).compare( + createSchedulable(4000, 5, 2), + createSchedulable(2000, 2, 3)) < 0); + } + + @Test + public void testOnlyTwoResources() { + Resource capacity = BuilderUtils.newResource(8000, 8, 20); + // CPU & MEMORY + Set enabledResourceTypes = new HashSet(); + enabledResourceTypes.add(ResourceType.CPU); + enabledResourceTypes.add(ResourceType.MEMORY); + assertTrue(createComparator(capacity, enabledResourceTypes).compare( + createSchedulable(3000, 1, 2), + createSchedulable(2000, 2, 7)) > 0); + + // CPU & DISKIO + enabledResourceTypes.remove(ResourceType.MEMORY); + enabledResourceTypes.add(ResourceType.DISKIO); + assertTrue(createComparator(capacity, enabledResourceTypes).compare( + createSchedulable(3000, 1, 2), + createSchedulable(2000, 2, 7)) < 0); + + // MEMORY & DISKIO + enabledResourceTypes.remove(ResourceType.CPU); + enabledResourceTypes.add(ResourceType.MEMORY); + assertTrue(createComparator(capacity, enabledResourceTypes).compare( + createSchedulable(5000, 3, 2), + createSchedulable(2000, 5, 7)) > 0); + } + @Test public void testOneIsNeedy() { - assertTrue(createComparator(8000, 8).compare( - createSchedulable(2000, 5, 0, 6), - createSchedulable(4000, 3, 0, 0)) < 0); + assertTrue(createComparator(8000, 8, 20).compare( + createSchedulable(2000, 5, 1, 0, 6, 0), + createSchedulable(4000, 3, 1, 0, 0, 0)) < 0); } - + @Test public void testBothAreNeedy() { - assertTrue(createComparator(8000, 100).compare( + assertTrue(createComparator(8000, 100, 200).compare( // dominant share is 2000/8000 - createSchedulable(2000, 5), + createSchedulable(2000, 5, 10), // dominant share is 4000/8000 - createSchedulable(4000, 3)) < 0); - assertTrue(createComparator(8000, 100).compare( + createSchedulable(4000, 3, 6)) < 0); + assertTrue(createComparator(8000, 100, 200).compare( // dominant min share is 2/3 - createSchedulable(2000, 5, 3000, 6), + createSchedulable(2000, 5, 5, 3000, 6, 6), // dominant min share is 4/5 - createSchedulable(4000, 3, 5000, 4)) < 0); + createSchedulable(4000, 3, 3, 5000, 4, 4)) < 0); } - + @Test public void testEvenWeightsSameDominantResource() { - assertTrue(createComparator(8000, 8).compare( - createSchedulable(3000, 1, new ResourceWeights(2.0f)), - createSchedulable(2000, 1)) < 0); - assertTrue(createComparator(8000, 8).compare( - createSchedulable(1000, 3, new ResourceWeights(2.0f)), - createSchedulable(1000, 2)) < 0); + assertTrue(createComparator(8000, 8, 20).compare( + createSchedulable(3000, 1, 2, new ResourceWeights(2.0f)), + createSchedulable(2000, 1, 2)) < 0); + assertTrue(createComparator(8000, 8, 20).compare( + createSchedulable(1000, 3, 6, new ResourceWeights(2.0f)), + createSchedulable(1000, 2, 4)) < 0); } - + @Test public void testEvenWeightsDifferentDominantResource() { - assertTrue(createComparator(8000, 8).compare( - createSchedulable(1000, 3, new ResourceWeights(2.0f)), - createSchedulable(2000, 1)) < 0); - assertTrue(createComparator(8000, 8).compare( - createSchedulable(3000, 1, new ResourceWeights(2.0f)), - createSchedulable(1000, 2)) < 0); + assertTrue(createComparator(8000, 8, 20).compare( + createSchedulable(1000, 3, 6, new ResourceWeights(2.0f)), + createSchedulable(2000, 1, 2)) < 0); + assertTrue(createComparator(8000, 8, 20).compare( + createSchedulable(3000, 1, 2, new ResourceWeights(2.0f)), + createSchedulable(1000, 2, 4)) < 0); } @Test public void testUnevenWeightsSameDominantResource() { - assertTrue(createComparator(8000, 8).compare( - createSchedulable(3000, 1, new ResourceWeights(2.0f, 1.0f)), - createSchedulable(2000, 1)) < 0); - assertTrue(createComparator(8000, 8).compare( - createSchedulable(1000, 3, new ResourceWeights(1.0f, 2.0f)), - createSchedulable(1000, 2)) < 0); + assertTrue(createComparator(8000, 8, 20).compare( + createSchedulable(3000, 1, 2, new ResourceWeights(3.0f, 2.0f, 1.0f)), + createSchedulable(2000, 1, 2)) < 0); + assertTrue(createComparator(8000, 8, 20).compare( + createSchedulable(1000, 3, 6, new ResourceWeights(1.0f, 2.0f, 3.0f)), + createSchedulable(1000, 2, 4)) < 0); } - + @Test public void testUnevenWeightsDifferentDominantResource() { - assertTrue(createComparator(8000, 8).compare( - createSchedulable(1000, 3, new ResourceWeights(1.0f, 2.0f)), - createSchedulable(2000, 1)) < 0); - assertTrue(createComparator(8000, 8).compare( - createSchedulable(3000, 1, new ResourceWeights(2.0f, 1.0f)), - createSchedulable(1000, 2)) < 0); + assertTrue(createComparator(8000, 8, 20).compare( + createSchedulable(1000, 3, 6, new ResourceWeights(1.0f, 2.0f, 3.0f)), + createSchedulable(2000, 1, 2)) < 0); + assertTrue(createComparator(8000, 8, 20).compare( + createSchedulable(3000, 1, 2, new ResourceWeights(3.0f, 2.0f, 1.0f)), + createSchedulable(1000, 2, 4)) < 0); } - + @Test public void testCalculateShares() { - Resource used = Resources.createResource(10, 5); - Resource capacity = Resources.createResource(100, 10); - ResourceType[] resourceOrder = new ResourceType[2]; + Resource used = Resources.createResource(10, 5, 8); + Resource capacity = Resources.createResource(100, 10, 20); + ResourceType[] resourceOrder = new ResourceType[3]; ResourceWeights shares = new ResourceWeights(); - DominantResourceFairnessPolicy.DominantResourceFairnessComparator comparator = - new DominantResourceFairnessPolicy.DominantResourceFairnessComparator(); - comparator.calculateShares(used, capacity, shares, resourceOrder, - ResourceWeights.NEUTRAL); - + ((DominantResourceFairnessPolicy.DominantResourceFairnessComparator) + createComparator(capacity)).calculateShares( + used, capacity, shares, resourceOrder, ResourceWeights.NEUTRAL); assertEquals(.1, shares.getWeight(ResourceType.MEMORY), .00001); assertEquals(.5, shares.getWeight(ResourceType.CPU), .00001); + assertEquals(.4, shares.getWeight(ResourceType.DISKIO), .00001); assertEquals(ResourceType.CPU, resourceOrder[0]); - assertEquals(ResourceType.MEMORY, resourceOrder[1]); + assertEquals(ResourceType.DISKIO, resourceOrder[1]); + assertEquals(ResourceType.MEMORY, resourceOrder[2]); } } diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index b4c4c10..79b1d9b 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -1,20 +1,20 @@ /** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo; @@ -86,15 +86,15 @@ private final int GB = 1024; private ResourceManager resourceManager = null; - - private static final RecordFactory recordFactory = + + private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); - + @Before public void setUp() throws Exception { resourceManager = new ResourceManager(); Configuration conf = new Configuration(); - conf.setClass(YarnConfiguration.RM_SCHEDULER, + conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); resourceManager.init(conf); } @@ -103,16 +103,16 @@ public void setUp() throws Exception { public void tearDown() throws Exception { resourceManager.stop(); } - + private org.apache.hadoop.yarn.server.resourcemanager.NodeManager - registerNode(String hostName, int containerManagerPort, int nmHttpPort, - String rackName, Resource capability) throws IOException, - YarnException { + registerNode(String hostName, int containerManagerPort, int nmHttpPort, + String rackName, Resource capability) throws IOException, + YarnException { return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager( hostName, containerManagerPort, nmHttpPort, rackName, capability, resourceManager); } - + private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { ApplicationId appIdImpl = ApplicationId.newInstance(0, appId); ApplicationAttemptId attId = @@ -121,7 +121,7 @@ private ApplicationAttemptId createAppAttemptId(int appId, int attemptId) { } private ResourceRequest createResourceRequest(int memory, String host, - int priority, int numContainers) { + int priority, int numContainers) { ResourceRequest request = recordFactory .newRecordInstance(ResourceRequest.class); request.setCapability(Resources.createResource(memory)); @@ -139,7 +139,7 @@ public void testFifoSchedulerCapacityWhenNoNMs() { QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false); Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f); } - + @Test(timeout=5000) public void testAppAttemptMetrics() throws Exception { AsyncDispatcher dispatcher = new InlineDispatcher(); @@ -211,7 +211,7 @@ public void testNodeLocalAssignment() throws Exception { _appAttemptId); AppAddedSchedulerEvent appEvent = new AppAddedSchedulerEvent(appAttemptId.getApplicationId(), "queue1", - "user1"); + "user1"); scheduler.handle(appEvent); AppAttemptAddedSchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false); @@ -248,7 +248,7 @@ public void testNodeLocalAssignment() throws Exception { Assert.assertEquals(3, info.getLiveContainers().size()); scheduler.stop(); } - + @Test(timeout=2000) public void testUpdateResourceOnNode() throws Exception { AsyncDispatcher dispatcher = new InlineDispatcher(); @@ -279,20 +279,20 @@ public void testUpdateResourceOnNode() throws Exception { Resources.createResource(2048, 4), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node0); scheduler.handle(nodeEvent1); - + Method method = scheduler.getClass().getDeclaredMethod("getNodes"); @SuppressWarnings("unchecked") - Map schedulerNodes = + Map schedulerNodes = (Map) method.invoke(scheduler); assertEquals(schedulerNodes.values().size(), 1); - + Resource newResource = Resources.createResource(1024, 4); - - NodeResourceUpdateSchedulerEvent node0ResourceUpdate = new + + NodeResourceUpdateSchedulerEvent node0ResourceUpdate = new NodeResourceUpdateSchedulerEvent(node0, ResourceOption.newInstance( - newResource, RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)); + newResource, RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)); scheduler.handle(node0ResourceUpdate); - + // SchedulerNode's total resource and available resource are changed. assertEquals(schedulerNodes.get(node0.getNodeID()).getTotalResource() .getMemory(), 1024); @@ -300,14 +300,14 @@ public void testUpdateResourceOnNode() throws Exception { getAvailableResource().getMemory(), 1024); QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false); Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f); - + int _appId = 1; int _appAttemptId = 1; ApplicationAttemptId appAttemptId = createAppAttemptId(_appId, _appAttemptId); AppAddedSchedulerEvent appEvent = new AppAddedSchedulerEvent(appAttemptId.getApplicationId(), "queue1", - "user1"); + "user1"); scheduler.handle(appEvent); AppAttemptAddedSchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false); @@ -344,118 +344,118 @@ public void testUpdateResourceOnNode() throws Exception { queueInfo = scheduler.getQueueInfo(null, false, false); Assert.assertEquals(1.0f, queueInfo.getCurrentCapacity(), 0.0f); } - -// @Test + + // @Test public void testFifoScheduler() throws Exception { LOG.info("--- START: testFifoScheduler ---"); - + final int GB = 1024; - + // Register node1 String host_0 = "host_0"; - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = - registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(4 * GB, 1)); + org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = + registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(4 * GB, 1)); nm_0.heartbeat(); - + // Register node2 String host_1 = "host_1"; - org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = - registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, - Resources.createResource(2 * GB, 1)); + org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = + registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(2 * GB, 1)); nm_1.heartbeat(); // ResourceRequest priorities - Priority priority_0 = - org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(0); - Priority priority_1 = - org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(1); - + Priority priority_0 = + org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(0); + Priority priority_1 = + org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(1); + // Submit an application Application application_0 = new Application("user_0", resourceManager); application_0.submit(); - + application_0.addNodeManager(host_0, 1234, nm_0); application_0.addNodeManager(host_1, 1234, nm_1); Resource capability_0_0 = Resources.createResource(GB); application_0.addResourceRequestSpec(priority_1, capability_0_0); - + Resource capability_0_1 = Resources.createResource(2 * GB); application_0.addResourceRequestSpec(priority_0, capability_0_1); - Task task_0_0 = new Task(application_0, priority_1, + Task task_0_0 = new Task(application_0, priority_1, new String[] {host_0, host_1}); application_0.addTask(task_0_0); - + // Submit another application Application application_1 = new Application("user_1", resourceManager); application_1.submit(); - + application_1.addNodeManager(host_0, 1234, nm_0); application_1.addNodeManager(host_1, 1234, nm_1); - + Resource capability_1_0 = Resources.createResource(3 * GB); application_1.addResourceRequestSpec(priority_1, capability_1_0); - + Resource capability_1_1 = Resources.createResource(4 * GB); application_1.addResourceRequestSpec(priority_0, capability_1_1); - Task task_1_0 = new Task(application_1, priority_1, + Task task_1_0 = new Task(application_1, priority_1, new String[] {host_0, host_1}); application_1.addTask(task_1_0); - + // Send resource requests to the scheduler LOG.info("Send resource requests to the scheduler"); application_0.schedule(); application_1.schedule(); - + // Send a heartbeat to kick the tires on the Scheduler LOG.info("Send a heartbeat to kick the tires on the Scheduler... " + - "nm0 -> task_0_0 and task_1_0 allocated, used=4G " + - "nm1 -> nothing allocated"); + "nm0 -> task_0_0 and task_1_0 allocated, used=4G " + + "nm1 -> nothing allocated"); nm_0.heartbeat(); // task_0_0 and task_1_0 allocated, used=4G nm_1.heartbeat(); // nothing allocated - + // Get allocations from the scheduler application_0.schedule(); // task_0_0 checkApplicationResourceUsage(GB, application_0); application_1.schedule(); // task_1_0 checkApplicationResourceUsage(3 * GB, application_1); - + nm_0.heartbeat(); nm_1.heartbeat(); - + checkNodeResourceUsage(4*GB, nm_0); // task_0_0 (1G) and task_1_0 (3G) checkNodeResourceUsage(0*GB, nm_1); // no tasks, 2G available - + LOG.info("Adding new tasks..."); - - Task task_1_1 = new Task(application_1, priority_1, + + Task task_1_1 = new Task(application_1, priority_1, new String[] {ResourceRequest.ANY}); application_1.addTask(task_1_1); - Task task_1_2 = new Task(application_1, priority_1, + Task task_1_2 = new Task(application_1, priority_1, new String[] {ResourceRequest.ANY}); application_1.addTask(task_1_2); - Task task_1_3 = new Task(application_1, priority_0, + Task task_1_3 = new Task(application_1, priority_0, new String[] {ResourceRequest.ANY}); application_1.addTask(task_1_3); - + application_1.schedule(); - - Task task_0_1 = new Task(application_0, priority_1, + + Task task_0_1 = new Task(application_0, priority_1, new String[] {host_0, host_1}); application_0.addTask(task_0_1); - Task task_0_2 = new Task(application_0, priority_1, + Task task_0_2 = new Task(application_0, priority_1, new String[] {host_0, host_1}); application_0.addTask(task_0_2); - - Task task_0_3 = new Task(application_0, priority_0, + + Task task_0_3 = new Task(application_0, priority_0, new String[] {ResourceRequest.ANY}); application_0.addTask(task_0_3); @@ -464,10 +464,10 @@ public void testFifoScheduler() throws Exception { // Send a heartbeat to kick the tires on the Scheduler LOG.info("Sending hb from " + nm_0.getHostName()); nm_0.heartbeat(); // nothing new, used=4G - + LOG.info("Sending hb from " + nm_1.getHostName()); nm_1.heartbeat(); // task_0_3, used=2G - + // Get allocations from the scheduler LOG.info("Trying to allocate..."); application_0.schedule(); @@ -478,7 +478,7 @@ public void testFifoScheduler() throws Exception { nm_1.heartbeat(); checkNodeResourceUsage(4*GB, nm_0); checkNodeResourceUsage(2*GB, nm_1); - + // Complete tasks LOG.info("Finishing up task_0_0"); application_0.finishTask(task_0_0); // Now task_0_1 @@ -512,7 +512,7 @@ public void testFifoScheduler() throws Exception { checkApplicationResourceUsage(0 * GB, application_1); //checkNodeResourceUsage(2*GB, nm_0); // final over-commit, rm.node->1G, test.node->2G checkNodeResourceUsage(0*GB, nm_1); - + LOG.info("Finishing up task_0_1"); application_0.finishTask(task_0_1); application_0.schedule(); @@ -521,7 +521,7 @@ public void testFifoScheduler() throws Exception { nm_1.heartbeat(); checkApplicationResourceUsage(1 * GB, application_0); checkApplicationResourceUsage(0 * GB, application_1); - + LOG.info("Finishing up task_0_2"); application_0.finishTask(task_0_2); // now task_1_3 can go! application_0.schedule(); @@ -530,7 +530,7 @@ public void testFifoScheduler() throws Exception { nm_1.heartbeat(); checkApplicationResourceUsage(0 * GB, application_0); checkApplicationResourceUsage(4 * GB, application_1); - + LOG.info("Finishing up task_1_3"); application_1.finishTask(task_1_3); // now task_1_1 application_0.schedule(); @@ -539,7 +539,7 @@ public void testFifoScheduler() throws Exception { nm_1.heartbeat(); checkApplicationResourceUsage(0 * GB, application_0); checkApplicationResourceUsage(3 * GB, application_1); - + LOG.info("Finishing up task_1_1"); application_1.finishTask(task_1_1); application_0.schedule(); @@ -548,7 +548,7 @@ public void testFifoScheduler() throws Exception { nm_1.heartbeat(); checkApplicationResourceUsage(0 * GB, application_0); checkApplicationResourceUsage(3 * GB, application_1); - + LOG.info("--- END: testFifoScheduler ---"); } @@ -571,7 +571,7 @@ public void testBlackListNodes() throws Exception { appId, 1); SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId, "default", - "user"); + "user"); fs.handle(appEvent); SchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false); @@ -588,22 +588,22 @@ public void testBlackListNodes() throws Exception { Assert.assertFalse(fs.getApplicationAttempt(appAttemptId).isBlacklisted(host)); rm.stop(); } - + @Test public void testGetAppsInQueue() throws Exception { Application application_0 = new Application("user_0", resourceManager); application_0.submit(); - + Application application_1 = new Application("user_0", resourceManager); application_1.submit(); - + ResourceScheduler scheduler = resourceManager.getResourceScheduler(); - + List appsInDefault = scheduler.getAppsInQueue("default"); assertTrue(appsInDefault.contains(application_0.getApplicationAttemptId())); assertTrue(appsInDefault.contains(application_1.getApplicationAttemptId())); assertEquals(2, appsInDefault.size()); - + Assert.assertNull(scheduler.getAppsInQueue("someotherqueue")); } @@ -611,23 +611,23 @@ public void testGetAppsInQueue() throws Exception { public void testAddAndRemoveAppFromFiFoScheduler() throws Exception { Configuration conf = new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, - ResourceScheduler.class); + ResourceScheduler.class); MockRM rm = new MockRM(conf); @SuppressWarnings("unchecked") AbstractYarnScheduler fs = (AbstractYarnScheduler) rm - .getResourceScheduler(); + .getResourceScheduler(); TestSchedulerUtils.verifyAppAddedAndRemovedFromScheduler( - fs.getSchedulerApplications(), fs, "queue"); + fs.getSchedulerApplications(), fs, "queue"); } - private void checkApplicationResourceUsage(int expected, - Application application) { + private void checkApplicationResourceUsage(int expected, + Application application) { Assert.assertEquals(expected, application.getUsedResources().getMemory()); } - + private void checkNodeResourceUsage(int expected, - org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) { + org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) { Assert.assertEquals(expected, node.getUsed().getMemory()); node.checkResourceUsage(); } @@ -638,4 +638,4 @@ public static void main(String[] arg) throws Exception { t.testFifoScheduler(); t.tearDown(); } -} +} \ No newline at end of file diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java index de8d302..98629ba 100644 --- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java +++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java @@ -224,7 +224,7 @@ private void testNMTokens(Configuration conf) throws Exception { */ YarnRPC rpc = YarnRPC.create(conf); String user = "test"; - Resource r = Resource.newInstance(1024, 1); + Resource r = Resource.newInstance(1024, 1, 1); ApplicationId appId = ApplicationId.newInstance(1, 1); ApplicationAttemptId validAppAttemptId = @@ -657,7 +657,7 @@ private void testContainerToken(Configuration conf) throws IOException, yarnCluster.getResourceManager().getRMContext(). getContainerTokenSecretManager(); - Resource r = Resource.newInstance(1230, 2); + Resource r = Resource.newInstance(1230, 2, 2); Token containerToken = containerTokenSecretManager.createContainerToken(