diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 88b57f1..bd13597 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.api.records;
import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
@@ -53,7 +54,7 @@
@Public
@Stable
- public static Resource newInstance(int memory, int vCores) {
+ public static Resource newInstance(long memory, long vCores) {
Resource resource = Records.newRecord(Resource.class);
resource.setMemory(memory);
resource.setVirtualCores(vCores);
@@ -67,6 +68,14 @@ public static Resource newInstance(int memory, int vCores) {
@Public
@Stable
public abstract int getMemory();
+
+ /**
+ * Get memory of the resource.
+ * @return memory of the resource
+ */
+ @Public
+ @InterfaceStability.Evolving
+ public abstract long getMemoryLong();
/**
* Set memory of the resource.
@@ -74,7 +83,7 @@ public static Resource newInstance(int memory, int vCores) {
*/
@Public
@Stable
- public abstract void setMemory(int memory);
+ public abstract void setMemory(long memory);
/**
@@ -90,6 +99,10 @@ public static Resource newInstance(int memory, int vCores) {
@Public
@Evolving
public abstract int getVirtualCores();
+
+ @Public
+ @Evolving
+ public abstract long getVirtualCoresLong();
/**
* Set number of virtual cpu cores of the resource.
@@ -103,13 +116,13 @@ public static Resource newInstance(int memory, int vCores) {
*/
@Public
@Evolving
- public abstract void setVirtualCores(int vCores);
+ public abstract void setVirtualCores(long vCores);
@Override
public int hashCode() {
final int prime = 263167;
- int result = 3571;
- result = 939769357 + getMemory(); // prime * result = 939769357 initially
+
+ int result = 939769357 + getMemory(); // prime * result = 939769357 initially
result = prime * result + getVirtualCores();
return result;
}
@@ -123,7 +136,7 @@ public boolean equals(Object obj) {
if (!(obj instanceof Resource))
return false;
Resource other = (Resource) obj;
- if (getMemory() != other.getMemory() ||
+ if (getMemoryLong() != other.getMemoryLong() ||
getVirtualCores() != other.getVirtualCores()) {
return false;
}
@@ -132,6 +145,6 @@ public boolean equals(Object obj) {
@Override
public String toString() {
- return "";
+ return "";
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 2fe4eda..7090deb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -54,8 +54,8 @@ message ContainerIdProto {
}
message ResourceProto {
- optional int32 memory = 1;
- optional int32 virtual_cores = 2;
+ optional int64 memory = 1;
+ optional int64 virtual_cores = 2;
}
message ResourceUtilizationProto {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 2b85ba8..ba47887 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -221,7 +221,7 @@
@VisibleForTesting
protected int numTotalContainers = 1;
// Memory to request for the container on which the shell command will run
- private int containerMemory = 10;
+ private long containerMemory = 10;
// VirtualCores to request for the container on which the shell command will run
private int containerVirtualCores = 1;
// Priority of the request
@@ -594,7 +594,7 @@ public void run() throws YarnException, IOException, InterruptedException {
appMasterTrackingUrl);
// Dump out information about cluster capability as seen by the
// resource manager
- int maxMem = response.getMaximumResourceCapability().getMemory();
+ long maxMem = response.getMaximumResourceCapability().getMemoryLong();
LOG.info("Max mem capability of resources in this cluster " + maxMem);
int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
@@ -824,7 +824,7 @@ public void onContainersAllocated(List allocatedContainers) {
+ ":" + allocatedContainer.getNodeId().getPort()
+ ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress()
+ ", containerResourceMemory"
- + allocatedContainer.getResource().getMemory()
+ + allocatedContainer.getResource().getMemoryLong()
+ ", containerResourceVirtualCores"
+ allocatedContainer.getResource().getVirtualCores());
// + ", containerToken"
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index e864ad2..55a198b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -127,7 +127,7 @@
// Queue for App master
private String amQueue = "";
// Amt. of memory resource to request for to run the App Master
- private int amMemory = 10;
+ private long amMemory = 10;
// Amt. of virtual core resource to request for to run the App Master
private int amVCores = 1;
@@ -488,7 +488,7 @@ public boolean run() throws IOException, YarnException {
// the required resources from the RM for the app master
// Memory ask has to be a multiple of min and less than max.
// Dump out information about cluster capability as seen by the resource manager
- int maxMem = appResponse.getMaximumResourceCapability().getMemory();
+ long maxMem = appResponse.getMaximumResourceCapability().getMemoryLong();
LOG.info("Max mem capability of resources in this cluster " + maxMem);
// A resource ask cannot exceed the max.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
index 37a1fb6..d0711d3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
@@ -122,10 +122,10 @@
class ResourceReverseMemoryThenCpuComparator implements Comparator {
@Override
public int compare(Resource arg0, Resource arg1) {
- int mem0 = arg0.getMemory();
- int mem1 = arg1.getMemory();
- int cpu0 = arg0.getVirtualCores();
- int cpu1 = arg1.getVirtualCores();
+ long mem0 = arg0.getMemoryLong();
+ long mem1 = arg1.getMemoryLong();
+ long cpu0 = arg0.getVirtualCores();
+ long cpu1 = arg1.getVirtualCores();
if(mem0 == mem1) {
if(cpu0 == cpu1) {
return 0;
@@ -143,10 +143,10 @@ public int compare(Resource arg0, Resource arg1) {
}
static boolean canFit(Resource arg0, Resource arg1) {
- int mem0 = arg0.getMemory();
- int mem1 = arg1.getMemory();
- int cpu0 = arg0.getVirtualCores();
- int cpu1 = arg1.getVirtualCores();
+ long mem0 = arg0.getMemoryLong();
+ long mem1 = arg1.getMemoryLong();
+ long cpu0 = arg0.getVirtualCores();
+ long cpu1 = arg1.getVirtualCores();
return (mem0 <= mem1 && cpu0 <= cpu1);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java
index c112857..c94d2d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java
@@ -275,9 +275,9 @@ private void printNodeStatus(String nodeIdStr) throws YarnException,
nodeReportStr.println(nodeReport.getNumContainers());
nodeReportStr.print("\tMemory-Used : ");
nodeReportStr.println((nodeReport.getUsed() == null) ? "0MB"
- : (nodeReport.getUsed().getMemory() + "MB"));
+ : (nodeReport.getUsed().getMemoryLong() + "MB"));
nodeReportStr.print("\tMemory-Capacity : ");
- nodeReportStr.println(nodeReport.getCapability().getMemory() + "MB");
+ nodeReportStr.println(nodeReport.getCapability().getMemoryLong() + "MB");
nodeReportStr.print("\tCPU-Used : ");
nodeReportStr.println((nodeReport.getUsed() == null) ? "0 vcores"
: (nodeReport.getUsed().getVirtualCores() + " vcores"));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
index 211f5e8..3db3c6a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
@@ -158,7 +158,7 @@ public ColumnInformation(String header, String format, boolean display,
displayStringsMap.put(Columns.VCORES, String.valueOf(usedVirtualCores));
usedMemory =
appReport.getApplicationResourceUsageReport().getUsedResources()
- .getMemory() / 1024;
+ .getMemoryLong() / 1024;
displayStringsMap.put(Columns.MEM, String.valueOf(usedMemory) + "G");
reservedVirtualCores =
appReport.getApplicationResourceUsageReport().getReservedResources()
@@ -167,7 +167,7 @@ public ColumnInformation(String header, String format, boolean display,
String.valueOf(reservedVirtualCores));
reservedMemory =
appReport.getApplicationResourceUsageReport().getReservedResources()
- .getMemory() / 1024;
+ .getMemoryLong() / 1024;
displayStringsMap.put(Columns.RMEM, String.valueOf(reservedMemory) + "G");
attempts = appReport.getCurrentApplicationAttemptId().getAttemptId();
nodes = 0;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 2d11d8a..a0af729 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -1199,7 +1199,7 @@ public void testReservationAPIs() {
for(attempts = 10; attempts > 0; attempts--) {
if (cluster.getResourceManager().getRMContext().getReservationSystem()
.getPlan(ReservationSystemTestUtil.reservationQ).getTotalCapacity()
- .getMemory() > 0) {
+ .getMemoryLong() > 0) {
break;
}
try {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index a28c6ed..f4692e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -57,35 +57,45 @@ private void maybeInitBuilder() {
@Override
public int getMemory() {
+ return (int) getMemoryLong();
+ }
+
+ @Override
+ public long getMemoryLong() {
ResourceProtoOrBuilder p = viaProto ? proto : builder;
- return (p.getMemory());
+ return p.getMemory();
}
@Override
- public void setMemory(int memory) {
+ public void setMemory(long memory) {
maybeInitBuilder();
- builder.setMemory((memory));
+ builder.setMemory(memory);
}
@Override
public int getVirtualCores() {
+ return (int) getVirtualCoresLong();
+ }
+
+ @Override
+ public long getVirtualCoresLong() {
ResourceProtoOrBuilder p = viaProto ? proto : builder;
- return (p.getVirtualCores());
+ return p.getVirtualCores();
}
@Override
- public void setVirtualCores(int vCores) {
+ public void setVirtualCores(long vCores) {
maybeInitBuilder();
- builder.setVirtualCores((vCores));
+ builder.setVirtualCores(vCores);
}
@Override
public int compareTo(Resource other) {
- int diff = this.getMemory() - other.getMemory();
+ long diff = this.getMemoryLong() - other.getMemoryLong();
if (diff == 0) {
diff = this.getVirtualCores() - other.getVirtualCores();
}
- return diff;
+ return diff == 0 ? 0 : (diff > 0 ? 1 : -1);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 2fdf214..34490d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -28,13 +28,13 @@
@Override
public int compare(Resource unused, Resource lhs, Resource rhs) {
// Only consider memory
- return lhs.getMemory() - rhs.getMemory();
+ return Long.compare(lhs.getMemoryLong(), rhs.getMemoryLong());
}
@Override
- public int computeAvailableContainers(Resource available, Resource required) {
+ public long computeAvailableContainers(Resource available, Resource required) {
// Only consider memory
- return available.getMemory() / required.getMemory();
+ return available.getMemoryLong() / required.getMemoryLong();
}
@Override
@@ -44,7 +44,7 @@ public float divide(Resource unused,
}
public boolean isInvalidDivisor(Resource r) {
- if (r.getMemory() == 0.0f) {
+ if (r.getMemoryLong() == 0.0f) {
return true;
}
return false;
@@ -52,23 +52,23 @@ public boolean isInvalidDivisor(Resource r) {
@Override
public float ratio(Resource a, Resource b) {
- return (float)a.getMemory() / b.getMemory();
+ return (float)a.getMemoryLong() / b.getMemoryLong();
}
@Override
- public Resource divideAndCeil(Resource numerator, int denominator) {
+ public Resource divideAndCeil(Resource numerator, long denominator) {
return Resources.createResource(
- divideAndCeil(numerator.getMemory(), denominator));
+ divideAndCeil(numerator.getMemoryLong(), denominator));
}
@Override
public Resource normalize(Resource r, Resource minimumResource,
Resource maximumResource, Resource stepFactor) {
- int normalizedMemory = Math.min(
+ long normalizedMemory = Math.min(
roundUp(
- Math.max(r.getMemory(), minimumResource.getMemory()),
- stepFactor.getMemory()),
- maximumResource.getMemory());
+ Math.max(r.getMemoryLong(), minimumResource.getMemoryLong()),
+ stepFactor.getMemoryLong()),
+ maximumResource.getMemoryLong());
return Resources.createResource(normalizedMemory);
}
@@ -81,21 +81,21 @@ public Resource normalize(Resource r, Resource minimumResource,
@Override
public Resource roundUp(Resource r, Resource stepFactor) {
return Resources.createResource(
- roundUp(r.getMemory(), stepFactor.getMemory())
+ roundUp(r.getMemoryLong(), stepFactor.getMemoryLong())
);
}
@Override
public Resource roundDown(Resource r, Resource stepFactor) {
return Resources.createResource(
- roundDown(r.getMemory(), stepFactor.getMemory()));
+ roundDown(r.getMemoryLong(), stepFactor.getMemoryLong()));
}
@Override
public Resource multiplyAndNormalizeUp(Resource r, double by,
Resource stepFactor) {
return Resources.createResource(
- roundUp((int)(r.getMemory() * by + 0.5), stepFactor.getMemory())
+ roundUp((int)(r.getMemoryLong() * by + 0.5), stepFactor.getMemoryLong())
);
}
@@ -104,8 +104,8 @@ public Resource multiplyAndNormalizeDown(Resource r, double by,
Resource stepFactor) {
return Resources.createResource(
roundDown(
- (int)(r.getMemory() * by),
- stepFactor.getMemory()
+ (int)(r.getMemoryLong() * by),
+ stepFactor.getMemoryLong()
)
);
}
@@ -113,6 +113,6 @@ public Resource multiplyAndNormalizeDown(Resource r, double by,
@Override
public boolean fitsIn(Resource cluster,
Resource smaller, Resource bigger) {
- return smaller.getMemory() <= bigger.getMemory();
+ return smaller.getMemoryLong() <= bigger.getMemoryLong();
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index b5c9967..83eb5fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -54,15 +54,15 @@ public int compare(Resource clusterResource, Resource lhs, Resource rhs) {
}
if (isInvalidDivisor(clusterResource)) {
- if ((lhs.getMemory() < rhs.getMemory() && lhs.getVirtualCores() > rhs
+ if ((lhs.getMemoryLong() < rhs.getMemoryLong() && lhs.getVirtualCores() > rhs
.getVirtualCores())
- || (lhs.getMemory() > rhs.getMemory() && lhs.getVirtualCores() < rhs
+ || (lhs.getMemoryLong() > rhs.getMemoryLong() && lhs.getVirtualCores() < rhs
.getVirtualCores())) {
return 0;
- } else if (lhs.getMemory() > rhs.getMemory()
+ } else if (lhs.getMemoryLong() > rhs.getMemoryLong()
|| lhs.getVirtualCores() > rhs.getVirtualCores()) {
return 1;
- } else if (lhs.getMemory() < rhs.getMemory()
+ } else if (lhs.getMemoryLong() < rhs.getMemoryLong()
|| lhs.getVirtualCores() < rhs.getVirtualCores()) {
return -1;
}
@@ -100,20 +100,20 @@ protected float getResourceAsValue(
// Just use 'dominant' resource
return (dominant) ?
Math.max(
- (float)resource.getMemory() / clusterResource.getMemory(),
+ (float)resource.getMemoryLong() / clusterResource.getMemoryLong(),
(float)resource.getVirtualCores() / clusterResource.getVirtualCores()
)
:
Math.min(
- (float)resource.getMemory() / clusterResource.getMemory(),
+ (float)resource.getMemoryLong() / clusterResource.getMemoryLong(),
(float)resource.getVirtualCores() / clusterResource.getVirtualCores()
);
}
@Override
- public int computeAvailableContainers(Resource available, Resource required) {
+ public long computeAvailableContainers(Resource available, Resource required) {
return Math.min(
- available.getMemory() / required.getMemory(),
+ available.getMemoryLong() / required.getMemoryLong(),
available.getVirtualCores() / required.getVirtualCores());
}
@@ -127,7 +127,7 @@ public float divide(Resource clusterResource,
@Override
public boolean isInvalidDivisor(Resource r) {
- if (r.getMemory() == 0.0f || r.getVirtualCores() == 0.0f) {
+ if (r.getMemoryLong() == 0.0f || r.getVirtualCores() == 0.0f) {
return true;
}
return false;
@@ -136,15 +136,15 @@ public boolean isInvalidDivisor(Resource r) {
@Override
public float ratio(Resource a, Resource b) {
return Math.max(
- (float)a.getMemory()/b.getMemory(),
+ (float)a.getMemoryLong()/b.getMemoryLong(),
(float)a.getVirtualCores()/b.getVirtualCores()
);
}
@Override
- public Resource divideAndCeil(Resource numerator, int denominator) {
+ public Resource divideAndCeil(Resource numerator, long denominator) {
return Resources.createResource(
- divideAndCeil(numerator.getMemory(), denominator),
+ divideAndCeil(numerator.getMemoryLong(), denominator),
divideAndCeil(numerator.getVirtualCores(), denominator)
);
}
@@ -152,12 +152,12 @@ public Resource divideAndCeil(Resource numerator, int denominator) {
@Override
public Resource normalize(Resource r, Resource minimumResource,
Resource maximumResource, Resource stepFactor) {
- int normalizedMemory = Math.min(
+ long normalizedMemory = Math.min(
roundUp(
- Math.max(r.getMemory(), minimumResource.getMemory()),
- stepFactor.getMemory()),
- maximumResource.getMemory());
- int normalizedCores = Math.min(
+ Math.max(r.getMemoryLong(), minimumResource.getMemoryLong()),
+ stepFactor.getMemoryLong()),
+ maximumResource.getMemoryLong());
+ long normalizedCores = Math.min(
roundUp(
Math.max(r.getVirtualCores(), minimumResource.getVirtualCores()),
stepFactor.getVirtualCores()),
@@ -169,7 +169,7 @@ public Resource normalize(Resource r, Resource minimumResource,
@Override
public Resource roundUp(Resource r, Resource stepFactor) {
return Resources.createResource(
- roundUp(r.getMemory(), stepFactor.getMemory()),
+ roundUp(r.getMemoryLong(), stepFactor.getMemoryLong()),
roundUp(r.getVirtualCores(), stepFactor.getVirtualCores())
);
}
@@ -177,7 +177,7 @@ public Resource roundUp(Resource r, Resource stepFactor) {
@Override
public Resource roundDown(Resource r, Resource stepFactor) {
return Resources.createResource(
- roundDown(r.getMemory(), stepFactor.getMemory()),
+ roundDown(r.getMemoryLong(), stepFactor.getMemoryLong()),
roundDown(r.getVirtualCores(), stepFactor.getVirtualCores())
);
}
@@ -187,7 +187,7 @@ public Resource multiplyAndNormalizeUp(Resource r, double by,
Resource stepFactor) {
return Resources.createResource(
roundUp(
- (int)Math.ceil(r.getMemory() * by), stepFactor.getMemory()),
+ (int)Math.ceil(r.getMemoryLong() * by), stepFactor.getMemoryLong()),
roundUp(
(int)Math.ceil(r.getVirtualCores() * by),
stepFactor.getVirtualCores())
@@ -199,8 +199,8 @@ public Resource multiplyAndNormalizeDown(Resource r, double by,
Resource stepFactor) {
return Resources.createResource(
roundDown(
- (int)(r.getMemory() * by),
- stepFactor.getMemory()
+ (int)(r.getMemoryLong() * by),
+ stepFactor.getMemoryLong()
),
roundDown(
(int)(r.getVirtualCores() * by),
@@ -212,7 +212,7 @@ public Resource multiplyAndNormalizeDown(Resource r, double by,
@Override
public boolean fitsIn(Resource cluster,
Resource smaller, Resource bigger) {
- return smaller.getMemory() <= bigger.getMemory()
+ return smaller.getMemoryLong() <= bigger.getMemoryLong()
&& smaller.getVirtualCores() <= bigger.getVirtualCores();
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 3a31225..3fe8ade 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -31,18 +31,18 @@
public abstract int
compare(Resource clusterResource, Resource lhs, Resource rhs);
- public static int divideAndCeil(int a, int b) {
+ public static long divideAndCeil(long a, long b) {
if (b == 0) {
return 0;
}
return (a + (b - 1)) / b;
}
- public static int roundUp(int a, int b) {
+ public static long roundUp(long a, long b) {
return divideAndCeil(a, b) * b;
}
- public static int roundDown(int a, int b) {
+ public static long roundDown(long a, long b) {
return (a / b) * b;
}
@@ -54,7 +54,7 @@ public static int roundDown(int a, int b) {
* @param required required resources
* @return number of containers which can be allocated
*/
- public abstract int computeAvailableContainers(
+ public abstract long computeAvailableContainers(
Resource available, Resource required);
/**
@@ -169,7 +169,7 @@ public abstract float divide(
* @param denominator denominator
* @return resultant resource
*/
- public abstract Resource divideAndCeil(Resource numerator, int denominator);
+ public abstract Resource divideAndCeil(Resource numerator, long denominator);
/**
* Check if a smaller resource can be contained by bigger resource.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 558f96c..6060a9a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -36,7 +36,12 @@ public int getMemory() {
}
@Override
- public void setMemory(int memory) {
+ public long getMemoryLong() {
+ return 0;
+ }
+
+ @Override
+ public void setMemory(long memory) {
throw new RuntimeException("NONE cannot be modified!");
}
@@ -46,17 +51,22 @@ public int getVirtualCores() {
}
@Override
- public void setVirtualCores(int cores) {
+ public long getVirtualCoresLong() {
+ return 0;
+ }
+
+ @Override
+ public void setVirtualCores(long cores) {
throw new RuntimeException("NONE cannot be modified!");
}
@Override
public int compareTo(Resource o) {
- int diff = 0 - o.getMemory();
+ long diff = 0 - o.getMemoryLong();
if (diff == 0) {
diff = 0 - o.getVirtualCores();
}
- return diff;
+ return Long.signum(diff);
}
};
@@ -65,11 +75,16 @@ public int compareTo(Resource o) {
@Override
public int getMemory() {
+ return 0;
+ }
+
+ @Override
+ public long getMemoryLong() {
return Integer.MAX_VALUE;
}
@Override
- public void setMemory(int memory) {
+ public void setMemory(long memory) {
throw new RuntimeException("UNBOUNDED cannot be modified!");
}
@@ -79,26 +94,31 @@ public int getVirtualCores() {
}
@Override
- public void setVirtualCores(int cores) {
+ public long getVirtualCoresLong() {
+ return 0;
+ }
+
+ @Override
+ public void setVirtualCores(long cores) {
throw new RuntimeException("UNBOUNDED cannot be modified!");
}
@Override
public int compareTo(Resource o) {
- int diff = Integer.MAX_VALUE - o.getMemory();
+ long diff = Long.MAX_VALUE - o.getMemoryLong();
if (diff == 0) {
diff = Integer.MAX_VALUE - o.getVirtualCores();
}
- return diff;
+ return Long.signum(diff);
}
};
- public static Resource createResource(int memory) {
+ public static Resource createResource(long memory) {
return createResource(memory, (memory > 0) ? 1 : 0);
}
- public static Resource createResource(int memory, int cores) {
+ public static Resource createResource(long memory, long cores) {
Resource resource = Records.newRecord(Resource.class);
resource.setMemory(memory);
resource.setVirtualCores(cores);
@@ -114,11 +134,11 @@ public static Resource unbounded() {
}
public static Resource clone(Resource res) {
- return createResource(res.getMemory(), res.getVirtualCores());
+ return createResource(res.getMemoryLong(), res.getVirtualCores());
}
public static Resource addTo(Resource lhs, Resource rhs) {
- lhs.setMemory(lhs.getMemory() + rhs.getMemory());
+ lhs.setMemory(lhs.getMemoryLong() + rhs.getMemoryLong());
lhs.setVirtualCores(lhs.getVirtualCores() + rhs.getVirtualCores());
return lhs;
}
@@ -128,7 +148,7 @@ public static Resource add(Resource lhs, Resource rhs) {
}
public static Resource subtractFrom(Resource lhs, Resource rhs) {
- lhs.setMemory(lhs.getMemory() - rhs.getMemory());
+ lhs.setMemory(lhs.getMemoryLong() - rhs.getMemoryLong());
lhs.setVirtualCores(lhs.getVirtualCores() - rhs.getVirtualCores());
return lhs;
}
@@ -142,7 +162,7 @@ public static Resource negate(Resource resource) {
}
public static Resource multiplyTo(Resource lhs, double by) {
- lhs.setMemory((int)(lhs.getMemory() * by));
+ lhs.setMemory((int)(lhs.getMemoryLong() * by));
lhs.setVirtualCores((int)(lhs.getVirtualCores() * by));
return lhs;
}
@@ -157,7 +177,7 @@ public static Resource multiply(Resource lhs, double by) {
*/
public static Resource multiplyAndAddTo(
Resource lhs, Resource rhs, double by) {
- lhs.setMemory(lhs.getMemory() + (int)(rhs.getMemory() * by));
+ lhs.setMemory(lhs.getMemoryLong() + (int)(rhs.getMemoryLong() * by));
lhs.setVirtualCores(lhs.getVirtualCores()
+ (int)(rhs.getVirtualCores() * by));
return lhs;
@@ -175,7 +195,7 @@ public static Resource multiplyAndNormalizeDown(
public static Resource multiplyAndRoundDown(Resource lhs, double by) {
Resource out = clone(lhs);
- out.setMemory((int)(lhs.getMemory() * by));
+ out.setMemory((int)(lhs.getMemoryLong() * by));
out.setVirtualCores((int)(lhs.getVirtualCores() * by));
return out;
}
@@ -264,7 +284,7 @@ public static Resource max(
}
public static boolean fitsIn(Resource smaller, Resource bigger) {
- return smaller.getMemory() <= bigger.getMemory() &&
+ return smaller.getMemoryLong() <= bigger.getMemoryLong() &&
smaller.getVirtualCores() <= bigger.getVirtualCores();
}
@@ -274,12 +294,12 @@ public static boolean fitsIn(ResourceCalculator rc, Resource cluster,
}
public static Resource componentwiseMin(Resource lhs, Resource rhs) {
- return createResource(Math.min(lhs.getMemory(), rhs.getMemory()),
+ return createResource(Math.min(lhs.getMemoryLong(), rhs.getMemoryLong()),
Math.min(lhs.getVirtualCores(), rhs.getVirtualCores()));
}
public static Resource componentwiseMax(Resource lhs, Resource rhs) {
- return createResource(Math.max(lhs.getMemory(), rhs.getMemory()),
+ return createResource(Math.max(lhs.getMemoryLong(), rhs.getMemoryLong()),
Math.max(lhs.getVirtualCores(), rhs.getVirtualCores()));
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
index a70d143..5bd0c3f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
@@ -434,7 +434,7 @@ public static ApplicationResourceUsageReport newApplicationResourceUsageReport(
return report;
}
- public static Resource newResource(int memory, int vCores) {
+ public static Resource newResource(long memory, long vCores) {
Resource resource = recordFactory.newRecordInstance(Resource.class);
resource.setMemory(memory);
resource.setVirtualCores(vCores);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
index cad3b2e..eaa19fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
@@ -59,8 +59,8 @@
protected long elapsedTime;
protected String applicationTags;
protected int priority;
- private int allocatedCpuVcores;
- private int allocatedMemoryMB;
+ private long allocatedCpuVcores;
+ private long allocatedMemoryMB;
protected boolean unmanagedApplication;
private String appNodeLabelExpression;
private String amNodeLabelExpression;
@@ -100,7 +100,7 @@ public AppInfo(ApplicationReport app) {
allocatedCpuVcores = app.getApplicationResourceUsageReport()
.getUsedResources().getVirtualCores();
allocatedMemoryMB = app.getApplicationResourceUsageReport()
- .getUsedResources().getMemory();
+ .getUsedResources().getMemoryLong();
}
}
progress = app.getProgress() * 100; // in percent
@@ -152,11 +152,11 @@ public int getRunningContainers() {
return runningContainers;
}
- public int getAllocatedCpuVcores() {
+ public long getAllocatedCpuVcores() {
return allocatedCpuVcores;
}
- public int getAllocatedMemoryMB() {
+ public long getAllocatedMemoryMB() {
return allocatedMemoryMB;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
index d0d4df6..9b32da0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java
@@ -36,8 +36,8 @@
public class ContainerInfo {
protected String containerId;
- protected int allocatedMB;
- protected int allocatedVCores;
+ protected long allocatedMB;
+ protected long allocatedVCores;
protected String assignedNodeId;
protected int priority;
protected long startedTime;
@@ -56,7 +56,7 @@ public ContainerInfo() {
public ContainerInfo(ContainerReport container) {
containerId = container.getContainerId().toString();
if (container.getAllocatedResource() != null) {
- allocatedMB = container.getAllocatedResource().getMemory();
+ allocatedMB = container.getAllocatedResource().getMemoryLong();
allocatedVCores = container.getAllocatedResource().getVirtualCores();
}
if (container.getAssignedNode() != null) {
@@ -77,11 +77,11 @@ public String getContainerId() {
return containerId;
}
- public int getAllocatedMB() {
+ public long getAllocatedMB() {
return allocatedMB;
}
- public int getAllocatedVCores() {
+ public long getAllocatedVCores() {
return allocatedVCores;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
index c9427dd..939f08d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
@@ -180,7 +180,7 @@ public void testNodeHeartbeatResponsePBImplWithDecreasedContainers() {
assertEquals(1, copy.getContainersToDecrease().get(0)
.getId().getContainerId());
assertEquals(1024, copy.getContainersToDecrease().get(1)
- .getResource().getMemory());
+ .getResource().getMemoryLong());
}
/**
@@ -201,7 +201,7 @@ public void testRegisterNodeManagerRequestPBImpl() {
assertEquals(8080, copy.getHttpPort());
assertEquals(9090, copy.getNodeId().getPort());
- assertEquals(10000, copy.getResource().getMemory());
+ assertEquals(10000, copy.getResource().getMemoryLong());
assertEquals(2, copy.getResource().getVirtualCores());
}
@@ -273,7 +273,7 @@ public void testNodeStatusPBImpl() {
assertEquals(1, copy.getIncreasedContainers().get(0)
.getId().getContainerId());
assertEquals(4096, copy.getIncreasedContainers().get(1)
- .getResource().getMemory());
+ .getResource().getMemoryLong());
}
@Test
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 8c74bf5..c4ce904 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -404,7 +404,7 @@ protected Path getPidFilePath(ContainerId containerId) {
.getBoolean(
YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED,
YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED)) {
- memory = resource.getMemory();
+ memory = (int) resource.getMemoryLong();
}
if (conf.getBoolean(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 6767214..ca4e2ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -534,7 +534,7 @@ private void sendContainerMonitorStartEvent() {
long launchDuration = clock.getTime() - containerLaunchStartTime;
metrics.addContainerLaunchDuration(launchDuration);
- long pmemBytes = getResource().getMemory() * 1024 * 1024L;
+ long pmemBytes = getResource().getMemoryLong() * 1024 * 1024L;
float pmemRatio = daemonConf.getFloat(
YarnConfiguration.NM_VMEM_PMEM_RATIO,
YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
index 78113e5..d9e7e7b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
@@ -115,8 +115,8 @@ int getSwappiness() {
String cgroupId = container.getContainerId().toString();
//memory is in MB
long containerSoftLimit =
- (long) (container.getResource().getMemory() * this.softLimit);
- long containerHardLimit = container.getResource().getMemory();
+ (long) (container.getResource().getMemoryLong() * this.softLimit);
+ long containerHardLimit = container.getResource().getMemoryLong();
cGroupsHandler.createCGroup(MEMORY, cgroupId);
try {
cGroupsHandler.updateCGroupParam(MEMORY, cgroupId,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 0feac3b..6076591 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -646,7 +646,7 @@ private void updateContainerMetrics(ContainersMonitorEvent monitoringEvent) {
ChangeMonitoringContainerResourceEvent changeEvent =
(ChangeMonitoringContainerResourceEvent) monitoringEvent;
Resource resource = changeEvent.getResource();
- pmemLimitMBs = resource.getMemory();
+ pmemLimitMBs = (int) resource.getMemoryLong();
vmemLimitMBs = (int) (pmemLimitMBs * vmemRatio);
cpuVcores = resource.getVirtualCores();
usageMetrics.recordResourceLimit(
@@ -820,7 +820,7 @@ protected void onChangeMonitoringContainerResource(
}
LOG.info("Changing resource-monitoring for " + containerId);
updateContainerMetrics(monitoringEvent);
- long pmemLimit = changeEvent.getResource().getMemory() * 1024L * 1024L;
+ long pmemLimit = changeEvent.getResource().getMemoryLong() * 1024L * 1024L;
long vmemLimit = (long) (pmemLimit * vmemRatio);
int cpuVcores = changeEvent.getResource().getVirtualCores();
processTreeInfo.setResourceLimit(pmemLimit, vmemLimit, cpuVcores);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
index ef4e571..3905c56 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java
@@ -527,7 +527,7 @@ protected ProcessTreeInfo getPti() {
private ProcessTreeInfo createProcessTreeInfo(ContainerId containerId,
Resource resource, Configuration conf) {
- long pmemBytes = resource.getMemory() * 1024 * 1024L;
+ long pmemBytes = resource.getMemoryLong() * 1024 * 1024L;
float pmemRatio = conf.getFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO,
YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
long vmemBytes = (long) (pmemRatio * pmemBytes);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java
index a38d0b7..b9577ff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java
@@ -115,9 +115,9 @@ public void endRunningContainer() {
public void allocateContainer(Resource res) {
allocatedContainers.incr();
- allocatedMB = allocatedMB + res.getMemory();
+ allocatedMB = allocatedMB + res.getMemoryLong();
allocatedGB.set((int)Math.ceil(allocatedMB/1024d));
- availableMB = availableMB - res.getMemory();
+ availableMB = availableMB - res.getMemoryLong();
availableGB.set((int)Math.floor(availableMB/1024d));
allocatedVCores.incr(res.getVirtualCores());
availableVCores.decr(res.getVirtualCores());
@@ -125,16 +125,16 @@ public void allocateContainer(Resource res) {
public void releaseContainer(Resource res) {
allocatedContainers.decr();
- allocatedMB = allocatedMB - res.getMemory();
+ allocatedMB = allocatedMB - res.getMemoryLong();
allocatedGB.set((int)Math.ceil(allocatedMB/1024d));
- availableMB = availableMB + res.getMemory();
+ availableMB = availableMB + res.getMemoryLong();
availableGB.set((int)Math.floor(availableMB/1024d));
allocatedVCores.decr(res.getVirtualCores());
availableVCores.incr(res.getVirtualCores());
}
public void changeContainer(Resource before, Resource now) {
- int deltaMB = now.getMemory() - before.getMemory();
+ long deltaMB = now.getMemoryLong() - before.getMemoryLong();
int deltaVCores = now.getVirtualCores() - before.getVirtualCores();
allocatedMB = allocatedMB + deltaMB;
allocatedGB.set((int)Math.ceil(allocatedMB/1024d));
@@ -145,7 +145,7 @@ public void changeContainer(Resource before, Resource now) {
}
public void addResource(Resource res) {
- availableMB = availableMB + res.getMemory();
+ availableMB = availableMB + res.getMemoryLong();
availableGB.incr((int)Math.floor(availableMB/1024d));
availableVCores.incr(res.getVirtualCores());
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java
index cf022b9..6f302cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java
@@ -81,7 +81,7 @@ public ContainerInfo(final Context nmContext, final Container container,
this.user = container.getUser();
Resource res = container.getResource();
if (res != null) {
- this.totalMemoryNeededMB = res.getMemory();
+ this.totalMemoryNeededMB = res.getMemoryLong();
this.totalVCoresNeeded = res.getVirtualCores();
}
this.containerLogsShortLink = ujoin("containerlogs", this.id,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
index 2fcce1d..694c1b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java
@@ -189,7 +189,7 @@ public RegisterNodeManagerResponse registerNodeManager(
InetSocketAddress expected = NetUtils.getConnectAddress(
conf.getSocketAddr(YarnConfiguration.NM_ADDRESS, null, -1));
Assert.assertEquals(NetUtils.getHostPortString(expected), nodeId.toString());
- Assert.assertEquals(5 * 1024, resource.getMemory());
+ Assert.assertEquals(5 * 1024, resource.getMemoryLong());
registeredNodes.add(nodeId);
RegisterNodeManagerResponse response = recordFactory
@@ -917,7 +917,7 @@ public RegisterNodeManagerResponse registerNodeManager(
conf.getSocketAddr(YarnConfiguration.NM_ADDRESS, null, -1));
Assert.assertEquals(NetUtils.getHostPortString(expected),
nodeId.toString());
- Assert.assertEquals(5 * 1024, resource.getMemory());
+ Assert.assertEquals(5 * 1024, resource.getMemoryLong());
registeredNodes.add(nodeId);
RegisterNodeManagerResponse response = recordFactory
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java
index ef4a0d4..c97fd64 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java
@@ -94,9 +94,9 @@ public String getLiveNodeManagers() {
ni.getNodeManagerVersion());
if(report != null) {
info.put("NumContainers", report.getNumContainers());
- info.put("UsedMemoryMB", report.getUsedResource().getMemory());
+ info.put("UsedMemoryMB", report.getUsedResource().getMemoryLong());
info.put("AvailableMemoryMB",
- report.getAvailableResource().getMemory());
+ report.getAvailableResource().getMemoryLong());
}
nodesInfo.add(info);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index e19d55e..fbb4280 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -142,7 +142,7 @@ public static void checkSchedContainerChangeRequest(
// example, you cannot request target resource of a <10G, 10> container to
// <20G, 8>
if (increase) {
- if (originalResource.getMemory() > targetResource.getMemory()
+ if (originalResource.getMemoryLong() > targetResource.getMemoryLong()
|| originalResource.getVirtualCores() > targetResource
.getVirtualCores()) {
String msg =
@@ -153,7 +153,7 @@ public static void checkSchedContainerChangeRequest(
throw new InvalidResourceRequestException(msg);
}
} else {
- if (originalResource.getMemory() < targetResource.getMemory()
+ if (originalResource.getMemoryLong() < targetResource.getMemoryLong()
|| originalResource.getVirtualCores() < targetResource
.getVirtualCores()) {
String msg =
@@ -243,15 +243,15 @@ private static void validateIncreaseDecreaseRequest(RMContext rmContext,
return;
}
for (ContainerResourceChangeRequest request : requests) {
- if (request.getCapability().getMemory() < 0
- || request.getCapability().getMemory() > maximumAllocation
- .getMemory()) {
+ if (request.getCapability().getMemoryLong() < 0
+ || request.getCapability().getMemoryLong() > maximumAllocation
+ .getMemoryLong()) {
throw new InvalidResourceRequestException("Invalid "
+ (increase ? "increase" : "decrease") + " request"
+ ", requested memory < 0"
+ ", or requested memory > max configured" + ", requestedMemory="
- + request.getCapability().getMemory() + ", maxMemory="
- + maximumAllocation.getMemory());
+ + request.getCapability().getMemoryLong() + ", maxMemory="
+ + maximumAllocation.getMemoryLong());
}
if (request.getCapability().getVirtualCores() < 0
|| request.getCapability().getVirtualCores() > maximumAllocation
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index b0bc565..ca6d4f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -344,7 +344,7 @@ public RegisterNodeManagerResponse registerNodeManager(
}
// Check if this node has minimum allocations
- if (capability.getMemory() < minAllocMb
+ if (capability.getMemoryLong() < minAllocMb
|| capability.getVirtualCores() < minAllocVcores) {
String message =
"NodeManager from " + host
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
index 84a3b19..92791a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
@@ -447,7 +447,7 @@ private void publishContainerCreatedEvent(ContainerCreatedEvent event) {
TimelineEntity entity = createContainerEntity(event.getContainerId());
Map entityInfo = new HashMap();
entityInfo.put(ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO,
- event.getAllocatedResource().getMemory());
+ event.getAllocatedResource().getMemoryLong());
entityInfo.put(ContainerMetricsConstants.ALLOCATED_VCORE_ENTITY_INFO,
event.getAllocatedResource().getVirtualCores());
entityInfo.put(ContainerMetricsConstants.ALLOCATED_HOST_ENTITY_INFO,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
index 8b01a73..c97029e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
@@ -142,17 +142,17 @@ public void assignPreemption(float scalingFactor, ResourceCalculator rc,
void appendLogString(StringBuilder sb) {
sb.append(queueName).append(", ")
- .append(current.getMemory()).append(", ")
+ .append(current.getMemoryLong()).append(", ")
.append(current.getVirtualCores()).append(", ")
- .append(pending.getMemory()).append(", ")
+ .append(pending.getMemoryLong()).append(", ")
.append(pending.getVirtualCores()).append(", ")
- .append(guaranteed.getMemory()).append(", ")
+ .append(guaranteed.getMemoryLong()).append(", ")
.append(guaranteed.getVirtualCores()).append(", ")
- .append(idealAssigned.getMemory()).append(", ")
+ .append(idealAssigned.getMemoryLong()).append(", ")
.append(idealAssigned.getVirtualCores()).append(", ")
- .append(toBePreempted.getMemory()).append(", ")
+ .append(toBePreempted.getMemoryLong()).append(", ")
.append(toBePreempted.getVirtualCores() ).append(", ")
- .append(actuallyToBePreempted.getMemory()).append(", ")
+ .append(actuallyToBePreempted.getMemoryLong()).append(", ")
.append(actuallyToBePreempted.getVirtualCores());
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
index eaf2902..5a808ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java
@@ -160,7 +160,7 @@ public synchronized void synchronizePlan(Plan plan, boolean shouldReplan) {
}
Resource capToAssign = res.getResourcesAtTime(now);
float targetCapacity = 0f;
- if (planResources.getMemory() > 0
+ if (planResources.getMemoryLong() > 0
&& planResources.getVirtualCores() > 0) {
if (shouldResize) {
capToAssign =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
index f8b68e3..a77332c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java
@@ -246,7 +246,7 @@ public long getValidWindow() {
long vcores;
public IntegralResource(Resource resource) {
- this.memory = resource.getMemory();
+ this.memory = resource.getMemoryLong();
this.vcores = resource.getVirtualCores();
}
@@ -256,12 +256,12 @@ public IntegralResource(long mem, long vcores) {
}
public void add(Resource r) {
- memory += r.getMemory();
+ memory += r.getMemoryLong();
vcores += r.getVirtualCores();
}
public void subtract(Resource r) {
- memory -= r.getMemory();
+ memory -= r.getMemoryLong();
vcores -= r.getVirtualCores();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java
index aba4822..f656c59 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java
@@ -106,7 +106,7 @@ private static ReservationDefinitionProto convertToProtoFormat(
public static ResourceProto convertToProtoFormat(Resource e) {
return YarnProtos.ResourceProto.newBuilder()
- .setMemory(e.getMemory())
+ .setMemory(e.getMemoryLong())
.setVirtualCores(e.getVirtualCores())
.build();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java
index 5a46a4e..d6de509 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java
@@ -88,7 +88,7 @@ public long setEarliestStartTime(Plan plan,
// Weight = total memory consumption of stage
protected double calcWeight(ReservationRequest stage) {
- return (stage.getDuration() * stage.getCapability().getMemory())
+ return (stage.getDuration() * stage.getCapability().getMemoryLong())
* (stage.getNumContainers());
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index 16ab55d..3d111a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -708,7 +708,7 @@ private static void updateAttemptMetrics(RMContainerImpl container) {
}
long usedMillis = container.finishTime - container.creationTime;
- long memorySeconds = resource.getMemory()
+ long memorySeconds = resource.getMemoryLong()
* usedMillis / DateUtils.MILLIS_PER_SECOND;
long vcoreSeconds = resource.getVirtualCores()
* usedMillis / DateUtils.MILLIS_PER_SECOND;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
index 34b4267..7a2735a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java
@@ -56,8 +56,8 @@
private Resource staleClusterCapacity = null;
// Max allocation
- private int maxNodeMemory = -1;
- private int maxNodeVCores = -1;
+ private long maxNodeMemory = -1;
+ private long maxNodeVCores = -1;
private Resource configuredMaxAllocation;
private boolean forceConfiguredMaxAllocation = true;
private long configuredMaxAllocationWaitTime;
@@ -211,7 +211,7 @@ public Resource getMaxAllowedAllocation() {
}
return Resources.createResource(
- Math.min(configuredMaxAllocation.getMemory(), maxNodeMemory),
+ Math.min(configuredMaxAllocation.getMemoryLong(), maxNodeMemory),
Math.min(configuredMaxAllocation.getVirtualCores(), maxNodeVCores)
);
} finally {
@@ -224,7 +224,7 @@ private void updateMaxResources(SchedulerNode node, boolean add) {
writeLock.lock();
try {
if (add) { // added node
- int nodeMemory = totalResource.getMemory();
+ long nodeMemory = totalResource.getMemoryLong();
if (nodeMemory > maxNodeMemory) {
maxNodeMemory = nodeMemory;
}
@@ -233,7 +233,7 @@ private void updateMaxResources(SchedulerNode node, boolean add) {
maxNodeVCores = nodeVCores;
}
} else { // removed node
- if (maxNodeMemory == totalResource.getMemory()) {
+ if (maxNodeMemory == totalResource.getMemoryLong()) {
maxNodeMemory = -1;
}
if (maxNodeVCores == totalResource.getVirtualCores()) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index d9c7283..7a92305 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.metrics2.lib.MutableCounterInt;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Resource;
@@ -59,8 +60,8 @@
@Metric("# of apps killed") MutableCounterInt appsKilled;
@Metric("# of apps failed") MutableCounterInt appsFailed;
- @Metric("Allocated memory in MB") MutableGaugeInt allocatedMB;
- @Metric("Allocated CPU in virtual cores") MutableGaugeInt allocatedVCores;
+ @Metric("Allocated memory in MB") MutableGaugeLong allocatedMB;
+ @Metric("Allocated CPU in virtual cores") MutableGaugeLong allocatedVCores;
@Metric("# of allocated containers") MutableGaugeInt allocatedContainers;
@Metric("Aggregate # of allocated containers") MutableCounterLong aggregateContainersAllocated;
@Metric("Aggregate # of allocated node-local containers")
@@ -70,13 +71,13 @@
@Metric("Aggregate # of allocated off-switch containers")
MutableCounterLong aggregateOffSwitchContainersAllocated;
@Metric("Aggregate # of released containers") MutableCounterLong aggregateContainersReleased;
- @Metric("Available memory in MB") MutableGaugeInt availableMB;
- @Metric("Available CPU in virtual cores") MutableGaugeInt availableVCores;
- @Metric("Pending memory allocation in MB") MutableGaugeInt pendingMB;
- @Metric("Pending CPU allocation in virtual cores") MutableGaugeInt pendingVCores;
+ @Metric("Available memory in MB") MutableGaugeLong availableMB;
+ @Metric("Available CPU in virtual cores") MutableGaugeLong availableVCores;
+ @Metric("Pending memory allocation in MB") MutableGaugeLong pendingMB;
+ @Metric("Pending CPU allocation in virtual cores") MutableGaugeLong pendingVCores;
@Metric("# of pending containers") MutableGaugeInt pendingContainers;
- @Metric("# of reserved memory in MB") MutableGaugeInt reservedMB;
- @Metric("Reserved CPU in virtual cores") MutableGaugeInt reservedVCores;
+ @Metric("# of reserved memory in MB") MutableGaugeLong reservedMB;
+ @Metric("Reserved CPU in virtual cores") MutableGaugeLong reservedVCores;
@Metric("# of reserved containers") MutableGaugeInt reservedContainers;
@Metric("# of active users") MutableGaugeInt activeUsers;
@Metric("# of active applications") MutableGaugeInt activeApplications;
@@ -325,7 +326,7 @@ public void moveAppTo(AppSchedulingInfo app) {
* @param limit resource limit
*/
public void setAvailableResourcesToQueue(Resource limit) {
- availableMB.set(limit.getMemory());
+ availableMB.set(limit.getMemoryLong());
availableVCores.set(limit.getVirtualCores());
}
@@ -362,7 +363,7 @@ public void incrPendingResources(String user, int containers, Resource res) {
private void _incrPendingResources(int containers, Resource res) {
pendingContainers.incr(containers);
- pendingMB.incr(res.getMemory() * containers);
+ pendingMB.incr(res.getMemoryLong() * containers);
pendingVCores.incr(res.getVirtualCores() * containers);
}
@@ -379,7 +380,7 @@ public void decrPendingResources(String user, int containers, Resource res) {
private void _decrPendingResources(int containers, Resource res) {
pendingContainers.decr(containers);
- pendingMB.decr(res.getMemory() * containers);
+ pendingMB.decr(res.getMemoryLong() * containers);
pendingVCores.decr(res.getVirtualCores() * containers);
}
@@ -407,7 +408,7 @@ public void allocateResources(String user, int containers, Resource res,
allocatedContainers.incr(containers);
aggregateContainersAllocated.incr(containers);
- allocatedMB.incr(res.getMemory() * containers);
+ allocatedMB.incr(res.getMemoryLong() * containers);
allocatedVCores.incr(res.getVirtualCores() * containers);
if (decrPending) {
_decrPendingResources(containers, res);
@@ -428,10 +429,10 @@ public void allocateResources(String user, int containers, Resource res,
* @param res
*/
public void allocateResources(String user, Resource res) {
- allocatedMB.incr(res.getMemory());
+ allocatedMB.incr(res.getMemoryLong());
allocatedVCores.incr(res.getVirtualCores());
- pendingMB.decr(res.getMemory());
+ pendingMB.decr(res.getMemoryLong());
pendingVCores.decr(res.getVirtualCores());
QueueMetrics userMetrics = getUserMetrics(user);
@@ -446,7 +447,7 @@ public void allocateResources(String user, Resource res) {
public void releaseResources(String user, int containers, Resource res) {
allocatedContainers.decr(containers);
aggregateContainersReleased.incr(containers);
- allocatedMB.decr(res.getMemory() * containers);
+ allocatedMB.decr(res.getMemoryLong() * containers);
allocatedVCores.decr(res.getVirtualCores() * containers);
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
@@ -464,7 +465,7 @@ public void releaseResources(String user, int containers, Resource res) {
* @param res
*/
public void releaseResources(String user, Resource res) {
- allocatedMB.decr(res.getMemory());
+ allocatedMB.decr(res.getMemoryLong());
allocatedVCores.decr(res.getVirtualCores());
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
@@ -477,7 +478,7 @@ public void releaseResources(String user, Resource res) {
public void reserveResource(String user, Resource res) {
reservedContainers.incr();
- reservedMB.incr(res.getMemory());
+ reservedMB.incr(res.getMemoryLong());
reservedVCores.incr(res.getVirtualCores());
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
@@ -490,7 +491,7 @@ public void reserveResource(String user, Resource res) {
public void unreserveResource(String user, Resource res) {
reservedContainers.decr();
- reservedMB.decr(res.getMemory());
+ reservedMB.decr(res.getMemoryLong());
reservedVCores.decr(res.getVirtualCores());
QueueMetrics userMetrics = getUserMetrics(user);
if (userMetrics != null) {
@@ -563,11 +564,11 @@ public Resource getAllocatedResources() {
return BuilderUtils.newResource(allocatedMB.value(), allocatedVCores.value());
}
- public int getAllocatedMB() {
+ public long getAllocatedMB() {
return allocatedMB.value();
}
- public int getAllocatedVirtualCores() {
+ public long getAllocatedVirtualCores() {
return allocatedVCores.value();
}
@@ -575,19 +576,19 @@ public int getAllocatedContainers() {
return allocatedContainers.value();
}
- public int getAvailableMB() {
+ public long getAvailableMB() {
return availableMB.value();
}
- public int getAvailableVirtualCores() {
+ public long getAvailableVirtualCores() {
return availableVCores.value();
}
- public int getPendingMB() {
+ public long getPendingMB() {
return pendingMB.value();
}
- public int getPendingVirtualCores() {
+ public long getPendingVirtualCores() {
return pendingVCores.value();
}
@@ -595,11 +596,11 @@ public int getPendingContainers() {
return pendingContainers.value();
}
- public int getReservedMB() {
+ public long getReservedMB() {
return reservedMB.value();
}
- public int getReservedVirtualCores() {
+ public long getReservedVirtualCores() {
return reservedVCores.value();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 2542009..b5b5e91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -443,7 +443,7 @@ public synchronized void setHeadroom(Resource globalLimit) {
*/
public synchronized Resource getHeadroom() {
// Corner case to deal with applications being slightly over-limit
- if (resourceLimit.getMemory() < 0) {
+ if (resourceLimit.getMemoryLong() < 0) {
resourceLimit.setMemory(0);
}
@@ -479,7 +479,7 @@ public synchronized void showRequests() {
if (requests != null) {
LOG.debug("showRequests:" + " application=" + getApplicationId()
+ " headRoom=" + getHeadroom() + " currentConsumption="
- + attemptResourceUsage.getUsed().getMemory());
+ + attemptResourceUsage.getUsed().getMemoryLong());
for (ResourceRequest request : requests.values()) {
LOG.debug("showRequests:" + " application=" + getApplicationId()
+ " request=" + request);
@@ -681,7 +681,7 @@ synchronized AggregateAppResourceUsage getRunningAggregateAppResourceUsage() {
for (RMContainer rmContainer : this.liveContainers.values()) {
long usedMillis = currentTimeMillis - rmContainer.getCreationTime();
Resource resource = rmContainer.getContainer().getResource();
- memorySeconds += resource.getMemory() * usedMillis /
+ memorySeconds += resource.getMemoryLong() * usedMillis /
DateUtils.MILLIS_PER_SECOND;
vcoreSeconds += resource.getVirtualCores() * usedMillis
/ DateUtils.MILLIS_PER_SECOND;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index b460964..c6a5505 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
@@ -274,13 +274,13 @@ public static void normalizeAndvalidateRequest(ResourceRequest resReq,
private static void validateResourceRequest(ResourceRequest resReq,
Resource maximumResource, QueueInfo queueInfo, RMContext rmContext)
throws InvalidResourceRequestException {
- if (resReq.getCapability().getMemory() < 0 ||
- resReq.getCapability().getMemory() > maximumResource.getMemory()) {
+ if (resReq.getCapability().getMemoryLong() < 0 ||
+ resReq.getCapability().getMemoryLong() > maximumResource.getMemoryLong()) {
throw new InvalidResourceRequestException("Invalid resource request"
+ ", requested memory < 0"
+ ", or requested memory > max configured"
- + ", requestedMemory=" + resReq.getCapability().getMemory()
- + ", maxMemory=" + maximumResource.getMemory());
+ + ", requestedMemory=" + resReq.getCapability().getMemoryLong()
+ + ", maxMemory=" + maximumResource.getMemoryLong());
}
if (resReq.getCapability().getVirtualCores() < 0 ||
resReq.getCapability().getVirtualCores() >
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
index 51d39ff..67e19fa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
@@ -32,37 +33,37 @@
public class CSQueueMetrics extends QueueMetrics {
@Metric("AM memory limit in MB")
- MutableGaugeInt AMResourceLimitMB;
+ MutableGaugeLong AMResourceLimitMB;
@Metric("AM CPU limit in virtual cores")
- MutableGaugeInt AMResourceLimitVCores;
+ MutableGaugeLong AMResourceLimitVCores;
@Metric("Used AM memory limit in MB")
- MutableGaugeInt usedAMResourceMB;
+ MutableGaugeLong usedAMResourceMB;
@Metric("Used AM CPU limit in virtual cores")
- MutableGaugeInt usedAMResourceVCores;
+ MutableGaugeLong usedAMResourceVCores;
CSQueueMetrics(MetricsSystem ms, String queueName, Queue parent,
boolean enableUserMetrics, Configuration conf) {
super(ms, queueName, parent, enableUserMetrics, conf);
}
- public int getAMResourceLimitMB() {
+ public long getAMResourceLimitMB() {
return AMResourceLimitMB.value();
}
- public int getAMResourceLimitVCores() {
+ public long getAMResourceLimitVCores() {
return AMResourceLimitVCores.value();
}
- public int getUsedAMResourceMB() {
+ public long getUsedAMResourceMB() {
return usedAMResourceMB.value();
}
- public int getUsedAMResourceVCores() {
+ public long getUsedAMResourceVCores() {
return usedAMResourceVCores.value();
}
public void setAMResouceLimit(Resource res) {
- AMResourceLimitMB.set(res.getMemory());
+ AMResourceLimitMB.set(res.getMemoryLong());
AMResourceLimitVCores.set(res.getVirtualCores());
}
@@ -74,7 +75,7 @@ public void setAMResouceLimitForUser(String user, Resource res) {
}
public void incAMUsed(String user, Resource res) {
- usedAMResourceMB.incr(res.getMemory());
+ usedAMResourceMB.incr(res.getMemoryLong());
usedAMResourceVCores.incr(res.getVirtualCores());
CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user);
if (userMetrics != null) {
@@ -83,7 +84,7 @@ public void incAMUsed(String user, Resource res) {
}
public void decAMUsed(String user, Resource res) {
- usedAMResourceMB.decr(res.getMemory());
+ usedAMResourceMB.decr(res.getMemoryLong());
usedAMResourceVCores.decr(res.getVirtualCores());
CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user);
if (userMetrics != null) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityHeadroomProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityHeadroomProvider.java
index 95a12dc..ecb8017 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityHeadroomProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityHeadroomProvider.java
@@ -65,7 +65,7 @@ public Resource getHeadroom() {
}
}
// Corner case to deal with applications being slightly over-limit
- if (headroom.getMemory() < 0) {
+ if (headroom.getMemoryLong() < 0) {
headroom.setMemory(0);
}
return headroom;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 88e39de..c5c2621 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -677,7 +677,7 @@ public Resource getMaximumAllocation() {
*/
public Resource getMaximumAllocationPerQueue(String queue) {
String queuePrefix = getQueuePrefix(queue);
- int maxAllocationMbPerQueue = getInt(queuePrefix + MAXIMUM_ALLOCATION_MB,
+ long maxAllocationMbPerQueue = getInt(queuePrefix + MAXIMUM_ALLOCATION_MB,
(int)UNDEFINED);
int maxAllocationVcoresPerQueue = getInt(
queuePrefix + MAXIMUM_ALLOCATION_VCORES, (int)UNDEFINED);
@@ -690,7 +690,7 @@ public Resource getMaximumAllocationPerQueue(String queue) {
Resource clusterMax = getMaximumAllocation();
if (maxAllocationMbPerQueue == (int)UNDEFINED) {
LOG.info("max alloc mb per queue for " + queue + " is undefined");
- maxAllocationMbPerQueue = clusterMax.getMemory();
+ maxAllocationMbPerQueue = clusterMax.getMemoryLong();
}
if (maxAllocationVcoresPerQueue == (int)UNDEFINED) {
LOG.info("max alloc vcore per queue for " + queue + " is undefined");
@@ -698,7 +698,7 @@ public Resource getMaximumAllocationPerQueue(String queue) {
}
Resource result = Resources.createResource(maxAllocationMbPerQueue,
maxAllocationVcoresPerQueue);
- if (maxAllocationMbPerQueue > clusterMax.getMemory()
+ if (maxAllocationMbPerQueue > clusterMax.getMemoryLong()
|| maxAllocationVcoresPerQueue > clusterMax.getVirtualCores()) {
throw new IllegalArgumentException(
"Queue maximum allocation cannot be larger than the cluster setting"
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index fbcb91c..bdc9907 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -449,7 +449,7 @@ public synchronized void reinitialize(
// since we have already told running AM's the size
Resource oldMax = getMaximumAllocation();
Resource newMax = newlyParsedLeafQueue.getMaximumAllocation();
- if (newMax.getMemory() < oldMax.getMemory()
+ if (newMax.getMemoryLong() < oldMax.getMemoryLong()
|| newMax.getVirtualCores() < oldMax.getVirtualCores()) {
throw new IOException(
"Trying to reinitialize "
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
index a5ca2d8..b2d4bbe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
@@ -442,7 +442,7 @@ private ContainerAllocation assignContainer(Resource clusterResource,
priority, capability);
// Can we allocate a container on this node?
- int availableContainers =
+ long availableContainers =
rc.computeAvailableContainers(available, capability);
// How much need to unreserve equals to:
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index e426da6..a3965d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -191,7 +191,7 @@ private void subtractResourcesOnBlacklistedNodes(
Resources.subtractFrom(availableResources,
node.getUnallocatedResource());
}
- if (availableResources.getMemory() < 0) {
+ if (availableResources.getMemoryLong() < 0) {
availableResources.setMemory(0);
}
if (availableResources.getVirtualCores() < 0) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index d9fac90..f4a4514 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -128,7 +128,7 @@ public void updatePreemptionVariables() {
public Resource getDemand() {
readLock.lock();
try {
- return Resource.newInstance(demand.getMemory(), demand.getVirtualCores());
+ return Resource.newInstance(demand.getMemoryLong(), demand.getVirtualCores());
} finally {
readLock.unlock();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index a33084f..7d02cb6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -131,18 +131,18 @@ public QueueInfo getQueueInfo(boolean includeChildQueues, boolean recursive) {
QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class);
queueInfo.setQueueName(getQueueName());
- if (scheduler.getClusterResource().getMemory() == 0) {
+ if (scheduler.getClusterResource().getMemoryLong() == 0) {
queueInfo.setCapacity(0.0f);
} else {
- queueInfo.setCapacity((float) getFairShare().getMemory() /
- scheduler.getClusterResource().getMemory());
+ queueInfo.setCapacity((float) getFairShare().getMemoryLong() /
+ scheduler.getClusterResource().getMemoryLong());
}
- if (getFairShare().getMemory() == 0) {
+ if (getFairShare().getMemoryLong() == 0) {
queueInfo.setCurrentCapacity(0.0f);
} else {
- queueInfo.setCurrentCapacity((float) getResourceUsage().getMemory() /
- getFairShare().getMemory());
+ queueInfo.setCurrentCapacity((float) getResourceUsage().getMemoryLong() /
+ getFairShare().getMemoryLong());
}
ArrayList childQueueInfos = new ArrayList();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
index 82c422b..8e6d8fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
@@ -31,14 +32,14 @@
@Metrics(context="yarn")
public class FSQueueMetrics extends QueueMetrics {
- @Metric("Fair share of memory in MB") MutableGaugeInt fairShareMB;
- @Metric("Fair share of CPU in vcores") MutableGaugeInt fairShareVCores;
- @Metric("Steady fair share of memory in MB") MutableGaugeInt steadyFairShareMB;
- @Metric("Steady fair share of CPU in vcores") MutableGaugeInt steadyFairShareVCores;
- @Metric("Minimum share of memory in MB") MutableGaugeInt minShareMB;
- @Metric("Minimum share of CPU in vcores") MutableGaugeInt minShareVCores;
- @Metric("Maximum share of memory in MB") MutableGaugeInt maxShareMB;
- @Metric("Maximum share of CPU in vcores") MutableGaugeInt maxShareVCores;
+ @Metric("Fair share of memory in MB") MutableGaugeLong fairShareMB;
+ @Metric("Fair share of CPU in vcores") MutableGaugeLong fairShareVCores;
+ @Metric("Steady fair share of memory in MB") MutableGaugeLong steadyFairShareMB;
+ @Metric("Steady fair share of CPU in vcores") MutableGaugeLong steadyFairShareVCores;
+ @Metric("Minimum share of memory in MB") MutableGaugeLong minShareMB;
+ @Metric("Minimum share of CPU in vcores") MutableGaugeLong minShareVCores;
+ @Metric("Maximum share of memory in MB") MutableGaugeLong maxShareMB;
+ @Metric("Maximum share of CPU in vcores") MutableGaugeLong maxShareVCores;
FSQueueMetrics(MetricsSystem ms, String queueName, Queue parent,
boolean enableUserMetrics, Configuration conf) {
@@ -46,54 +47,54 @@
}
public void setFairShare(Resource resource) {
- fairShareMB.set(resource.getMemory());
+ fairShareMB.set(resource.getMemoryLong());
fairShareVCores.set(resource.getVirtualCores());
}
- public int getFairShareMB() {
+ public long getFairShareMB() {
return fairShareMB.value();
}
- public int getFairShareVirtualCores() {
+ public long getFairShareVirtualCores() {
return fairShareVCores.value();
}
public void setSteadyFairShare(Resource resource) {
- steadyFairShareMB.set(resource.getMemory());
+ steadyFairShareMB.set(resource.getMemoryLong());
steadyFairShareVCores.set(resource.getVirtualCores());
}
- public int getSteadyFairShareMB() {
+ public long getSteadyFairShareMB() {
return steadyFairShareMB.value();
}
- public int getSteadyFairShareVCores() {
+ public long getSteadyFairShareVCores() {
return steadyFairShareVCores.value();
}
public void setMinShare(Resource resource) {
- minShareMB.set(resource.getMemory());
+ minShareMB.set(resource.getMemoryLong());
minShareVCores.set(resource.getVirtualCores());
}
- public int getMinShareMB() {
+ public long getMinShareMB() {
return minShareMB.value();
}
- public int getMinShareVirtualCores() {
+ public long getMinShareVirtualCores() {
return minShareVCores.value();
}
public void setMaxShare(Resource resource) {
- maxShareMB.set(resource.getMemory());
+ maxShareMB.set(resource.getMemoryLong());
maxShareVCores.set(resource.getVirtualCores());
}
- public int getMaxShareMB() {
+ public long getMaxShareMB() {
return maxShareMB.value();
}
- public int getMaxShareVirtualCores() {
+ public long getMaxShareVirtualCores() {
return maxShareVCores.value();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 3df0600..fd530e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -470,7 +470,7 @@ protected void preemptResources(Resource toPreempt) {
}
private boolean isResourceGreaterThanNone(Resource toPreempt) {
- return (toPreempt.getMemory() > 0) || (toPreempt.getVirtualCores() > 0);
+ return (toPreempt.getMemoryLong() > 0) || (toPreempt.getVirtualCores() > 0);
}
protected void warnOrKillContainer(RMContainer container) {
@@ -555,7 +555,7 @@ public synchronized ResourceWeights getAppWeight(FSAppAttempt app) {
double weight = 1.0;
if (sizeBasedWeight) {
// Set weight based on current memory demand
- weight = Math.log1p(app.getDemand().getMemory()) / Math.log(2);
+ weight = Math.log1p(app.getDemand().getMemoryLong()) / Math.log(2);
}
weight *= app.getPriority().getPriority();
if (weightAdjuster != null) {
@@ -1185,7 +1185,7 @@ private boolean shouldAttemptPreemption() {
if (preemptionEnabled) {
Resource clusterResource = getClusterResource();
return (preemptionUtilizationThreshold < Math.max(
- (float) rootMetrics.getAllocatedMB() / clusterResource.getMemory(),
+ (float) rootMetrics.getAllocatedMB() / clusterResource.getMemoryLong(),
(float) rootMetrics.getAllocatedVirtualCores() /
clusterResource.getVirtualCores()));
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java
index f4fad32..e902ad0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java
@@ -124,15 +124,15 @@ private static void computeSharesInternal(
// have met all Schedulables' max shares.
int totalMaxShare = 0;
for (Schedulable sched : schedulables) {
- int maxShare = getResourceValue(sched.getMaxShare(), type);
- totalMaxShare = (int) Math.min((long)maxShare + (long)totalMaxShare,
+ long maxShare = getResourceValue(sched.getMaxShare(), type);
+ totalMaxShare = (int) Math.min(maxShare + (long)totalMaxShare,
Integer.MAX_VALUE);
if (totalMaxShare == Integer.MAX_VALUE) {
break;
}
}
- int totalResource = Math.max((getResourceValue(totalResources, type) -
+ long totalResource = Math.max((getResourceValue(totalResources, type) -
takenResources), 0);
totalResource = Math.min(totalMaxShare, totalResource);
@@ -207,7 +207,7 @@ private static int handleFixedFairShares(
int totalResource = 0;
for (Schedulable sched : schedulables) {
- int fixedShare = getFairShareIfFixed(sched, isSteadyShare, type);
+ long fixedShare = getFairShareIfFixed(sched, isSteadyShare, type);
if (fixedShare < 0) {
nonFixedSchedulables.add(sched);
} else {
@@ -229,7 +229,7 @@ private static int handleFixedFairShares(
* The fairshare is fixed if either the maxShare is 0, weight is 0,
* or the Schedulable is not active for instantaneous fairshare.
*/
- private static int getFairShareIfFixed(Schedulable sched,
+ private static long getFairShareIfFixed(Schedulable sched,
boolean isSteadyShare, ResourceType type) {
// Check if maxShare is 0
@@ -245,17 +245,17 @@ private static int getFairShareIfFixed(Schedulable sched,
// Check if weight is 0
if (sched.getWeights().getWeight(type) <= 0) {
- int minShare = getResourceValue(sched.getMinShare(), type);
+ long minShare = getResourceValue(sched.getMinShare(), type);
return (minShare <= 0) ? 0 : minShare;
}
return -1;
}
- private static int getResourceValue(Resource resource, ResourceType type) {
+ private static long getResourceValue(Resource resource, ResourceType type) {
switch (type) {
case MEMORY:
- return resource.getMemory();
+ return resource.getMemoryLong();
case CPU:
return resource.getVirtualCores();
default:
@@ -263,7 +263,7 @@ private static int getResourceValue(Resource resource, ResourceType type) {
}
}
- private static void setResourceValue(int val, Resource resource, ResourceType type) {
+ private static void setResourceValue(long val, Resource resource, ResourceType type) {
switch (type) {
case MEMORY:
resource.setMemory(val);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
index 45fbf98..567f4b5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
@@ -101,13 +101,13 @@ public boolean checkIfAMResourceUsageOverLimit(Resource usage, Resource maxAMRes
@Override
public Resource getHeadroom(Resource queueFairShare, Resource queueUsage,
Resource maxAvailable) {
- int queueAvailableMemory =
- Math.max(queueFairShare.getMemory() - queueUsage.getMemory(), 0);
+ long queueAvailableMemory =
+ Math.max(queueFairShare.getMemoryLong() - queueUsage.getMemoryLong(), 0);
int queueAvailableCPU =
Math.max(queueFairShare.getVirtualCores() - queueUsage
.getVirtualCores(), 0);
Resource headroom = Resources.createResource(
- Math.min(maxAvailable.getMemory(), queueAvailableMemory),
+ Math.min(maxAvailable.getMemoryLong(), queueAvailableMemory),
Math.min(maxAvailable.getVirtualCores(),
queueAvailableCPU));
return headroom;
@@ -180,8 +180,8 @@ public int compare(Schedulable s1, Schedulable s2) {
*/
void calculateShares(Resource resource, Resource pool,
ResourceWeights shares, ResourceType[] resourceOrder, ResourceWeights weights) {
- shares.setWeight(MEMORY, (float)resource.getMemory() /
- (pool.getMemory() * weights.getWeight(MEMORY)));
+ shares.setWeight(MEMORY, (float)resource.getMemoryLong() /
+ (pool.getMemoryLong() * weights.getWeight(MEMORY)));
shares.setWeight(CPU, (float)resource.getVirtualCores() /
(pool.getVirtualCores() * weights.getWeight(CPU)));
// sort order vector by resource share
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index 3b9f07f..78f1ddd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -82,13 +82,13 @@ public int compare(Schedulable s1, Schedulable s2) {
s1.getResourceUsage(), minShare1);
boolean s2Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
s2.getResourceUsage(), minShare2);
- minShareRatio1 = (double) s1.getResourceUsage().getMemory()
- / Resources.max(RESOURCE_CALCULATOR, null, minShare1, ONE).getMemory();
- minShareRatio2 = (double) s2.getResourceUsage().getMemory()
- / Resources.max(RESOURCE_CALCULATOR, null, minShare2, ONE).getMemory();
- useToWeightRatio1 = s1.getResourceUsage().getMemory() /
+ minShareRatio1 = (double) s1.getResourceUsage().getMemoryLong()
+ / Resources.max(RESOURCE_CALCULATOR, null, minShare1, ONE).getMemoryLong();
+ minShareRatio2 = (double) s2.getResourceUsage().getMemoryLong()
+ / Resources.max(RESOURCE_CALCULATOR, null, minShare2, ONE).getMemoryLong();
+ useToWeightRatio1 = s1.getResourceUsage().getMemoryLong() /
s1.getWeights().getWeight(ResourceType.MEMORY);
- useToWeightRatio2 = s2.getResourceUsage().getMemory() /
+ useToWeightRatio2 = s2.getResourceUsage().getMemoryLong() /
s2.getWeights().getWeight(ResourceType.MEMORY);
int res = 0;
if (s1Needy && !s2Needy)
@@ -124,10 +124,10 @@ public ResourceCalculator getResourceCalculator() {
@Override
public Resource getHeadroom(Resource queueFairShare,
Resource queueUsage, Resource maxAvailable) {
- int queueAvailableMemory = Math.max(
- queueFairShare.getMemory() - queueUsage.getMemory(), 0);
+ long queueAvailableMemory = Math.max(
+ queueFairShare.getMemoryLong() - queueUsage.getMemoryLong(), 0);
Resource headroom = Resources.createResource(
- Math.min(maxAvailable.getMemory(), queueAvailableMemory),
+ Math.min(maxAvailable.getMemoryLong(), queueAvailableMemory),
maxAvailable.getVirtualCores());
return headroom;
}
@@ -152,7 +152,7 @@ public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {
@Override
public boolean checkIfAMResourceUsageOverLimit(Resource usage, Resource maxAMResource) {
- return usage.getMemory() > maxAMResource.getMemory();
+ return usage.getMemoryLong() > maxAMResource.getMemoryLong();
}
@Override
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java
index a644e58..3ef714e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java
@@ -115,16 +115,16 @@ public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) {
@Override
public boolean checkIfAMResourceUsageOverLimit(Resource usage, Resource maxAMResource) {
- return usage.getMemory() > maxAMResource.getMemory();
+ return usage.getMemoryLong() > maxAMResource.getMemoryLong();
}
@Override
public Resource getHeadroom(Resource queueFairShare,
Resource queueUsage, Resource maxAvailable) {
- int queueAvailableMemory = Math.max(
- queueFairShare.getMemory() - queueUsage.getMemory(), 0);
+ long queueAvailableMemory = Math.max(
+ queueFairShare.getMemoryLong() - queueUsage.getMemoryLong(), 0);
Resource headroom = Resources.createResource(
- Math.min(maxAvailable.getMemory(), queueAvailableMemory),
+ Math.min(maxAvailable.getMemoryLong(), queueAvailableMemory),
maxAvailable.getVirtualCores());
return headroom;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index fba4c13..7344e28 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -143,11 +143,11 @@ public QueueInfo getQueueInfo(
queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName());
queueInfo.setCapacity(1.0f);
Resource clusterResource = getClusterResource();
- if (clusterResource.getMemory() == 0) {
+ if (clusterResource.getMemoryLong() == 0) {
queueInfo.setCurrentCapacity(0.0f);
} else {
- queueInfo.setCurrentCapacity((float) usedResource.getMemory()
- / clusterResource.getMemory());
+ queueInfo.setCurrentCapacity((float) usedResource.getMemoryLong()
+ / clusterResource.getMemoryLong());
}
queueInfo.setMaximumCapacity(1.0f);
queueInfo.setChildQueues(new ArrayList());
@@ -685,7 +685,7 @@ private int assignOffSwitchContainers(FiCaSchedulerNode node,
return assignedContainers;
}
- private int assignContainer(FiCaSchedulerNode node, FiCaSchedulerApp application,
+ private int assignContainer(FiCaSchedulerNode node, FiCaSchedulerApp application,
Priority priority, int assignableContainers,
ResourceRequest request, NodeType type) {
LOG.debug("assignContainers:" +
@@ -697,9 +697,10 @@ private int assignContainer(FiCaSchedulerNode node, FiCaSchedulerApp application
Resource capability = request.getCapability();
// TODO: A buggy application with this zero would crash the scheduler.
- int availableContainers = node.getUnallocatedResource().getMemory() /
- capability.getMemory();
- int assignedContainers =
+ int availableContainers =
+ (int) (node.getUnallocatedResource().getMemoryLong() /
+ capability.getMemoryLong());
+ int assignedContainers =
Math.min(assignableContainers, availableContainers);
if (assignedContainers > 0) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java
index 04cd53a..56e91be 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java
@@ -67,10 +67,10 @@ public FairOrderingPolicy() {
private double getMagnitude(SchedulableEntity r) {
double mag = r.getSchedulingResourceUsage().getCachedUsed(
- CommonNodeLabelsManager.ANY).getMemory();
+ CommonNodeLabelsManager.ANY).getMemoryLong();
if (sizeBasedWeight) {
double weight = Math.log1p(r.getSchedulingResourceUsage().getCachedDemand(
- CommonNodeLabelsManager.ANY).getMemory()) / Math.log(2);
+ CommonNodeLabelsManager.ANY).getMemoryLong()) / Math.log(2);
mag = mag / weight;
}
return mag;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
index 1099baf..d442064 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java
@@ -54,8 +54,8 @@
@Override public void render(Block html) {
info("\'" + sinfo.getQueueName() + "\' Queue Status").
_("Queue State:" , sinfo.getState()).
- _("Minimum Queue Memory Capacity:" , Integer.toString(sinfo.getMinQueueMemoryCapacity())).
- _("Maximum Queue Memory Capacity:" , Integer.toString(sinfo.getMaxQueueMemoryCapacity())).
+ _("Minimum Queue Memory Capacity:" , Long.toString(sinfo.getMinQueueMemoryCapacity())).
+ _("Maximum Queue Memory Capacity:" , Long.toString(sinfo.getMaxQueueMemoryCapacity())).
_("Number of Nodes:" , Integer.toString(sinfo.getNumNodes())).
_("Used Node Capacity:" , Integer.toString(sinfo.getUsedNodeCapacity())).
_("Available Node Capacity:" , Integer.toString(sinfo.getAvailNodeCapacity())).
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
index f257656..41b5fe7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java
@@ -115,7 +115,7 @@ public FairSchedulerAppsBlock(ResourceManager rm, ViewContext ctx,
AppInfo appInfo = new AppInfo(rm, app, true, WebAppUtils.getHttpSchemePrefix(conf));
String percent = StringUtils.format("%.1f", appInfo.getProgress());
ApplicationAttemptId attemptId = app.getCurrentAppAttempt().getAppAttemptId();
- int fairShare = fsinfo.getAppFairShare(attemptId);
+ long fairShare = fsinfo.getAppFairShare(attemptId);
if (fairShare == FairSchedulerInfo.INVALID_FAIR_SHARE) {
// FairScheduler#applications don't have the entry. Skip it.
continue;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 8036af4..faaf622 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -1504,14 +1504,14 @@ protected Resource createAppSubmissionContextResource(
String msg = "Requested more cores than configured max";
throw new BadRequestException(msg);
}
- if (newApp.getResource().getMemory() > rm.getConfig().getInt(
+ if (newApp.getResource().getMemoryLong() > rm.getConfig().getInt(
YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB)) {
String msg = "Requested more memory than configured max";
throw new BadRequestException(msg);
}
Resource r =
- Resource.newInstance(newApp.getResource().getMemory(), newApp
+ Resource.newInstance(newApp.getResource().getMemoryLong(), newApp
.getResource().getvCores());
return r;
}
@@ -1951,7 +1951,7 @@ private ReservationSubmissionRequest createReservationSubmissionRequest(
.getReservationRequest()) {
ResourceInfo rInfo = resReqInfo.getCapability();
Resource capability =
- Resource.newInstance(rInfo.getMemory(), rInfo.getvCores());
+ Resource.newInstance(rInfo.getMemoryLong(), rInfo.getvCores());
int numContainers = resReqInfo.getNumContainers();
int minConcurrency = resReqInfo.getMinConcurrency();
long duration = resReqInfo.getDuration();
@@ -2060,7 +2060,7 @@ private ReservationUpdateRequest createReservationUpdateRequest(
.getReservationRequest()) {
ResourceInfo rInfo = resReqInfo.getCapability();
Resource capability =
- Resource.newInstance(rInfo.getMemory(), rInfo.getvCores());
+ Resource.newInstance(rInfo.getMemoryLong(), rInfo.getvCores());
int numContainers = resReqInfo.getNumContainers();
int minConcurrency = resReqInfo.getMinConcurrency();
long duration = resReqInfo.getDuration();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index f51197b..1bfcf37 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -82,8 +82,8 @@
protected long elapsedTime;
protected String amContainerLogs;
protected String amHostHttpAddress;
- protected int allocatedMB;
- protected int allocatedVCores;
+ protected long allocatedMB;
+ protected long allocatedVCores;
protected int runningContainers;
protected long memorySeconds;
protected long vcoreSeconds;
@@ -91,8 +91,8 @@
protected float clusterUsagePercentage;
// preemption info fields
- protected int preemptedResourceMB;
- protected int preemptedResourceVCores;
+ protected long preemptedResourceMB;
+ protected long preemptedResourceVCores;
protected int numNonAMContainerPreempted;
protected int numAMContainerPreempted;
@@ -174,7 +174,7 @@ public AppInfo(ResourceManager rm, RMApp app, Boolean hasAccess,
.getApplicationResourceUsageReport();
if (resourceReport != null) {
Resource usedResources = resourceReport.getUsedResources();
- allocatedMB = usedResources.getMemory();
+ allocatedMB = usedResources.getMemoryLong();
allocatedVCores = usedResources.getVirtualCores();
runningContainers = resourceReport.getNumUsedContainers();
queueUsagePercentage = resourceReport.getQueueUsagePercentage();
@@ -190,7 +190,7 @@ public AppInfo(ResourceManager rm, RMApp app, Boolean hasAccess,
numAMContainerPreempted =
appMetrics.getNumAMContainersPreempted();
preemptedResourceMB =
- appMetrics.getResourcePreempted().getMemory();
+ appMetrics.getResourcePreempted().getMemoryLong();
numNonAMContainerPreempted =
appMetrics.getNumNonAMContainersPreempted();
preemptedResourceVCores =
@@ -302,19 +302,19 @@ public int getRunningContainers() {
return this.runningContainers;
}
- public int getAllocatedMB() {
+ public long getAllocatedMB() {
return this.allocatedMB;
}
- public int getAllocatedVCores() {
+ public long getAllocatedVCores() {
return this.allocatedVCores;
}
- public int getPreemptedMB() {
+ public long getPreemptedMB() {
return preemptedResourceMB;
}
- public int getPreemptedVCores() {
+ public long getPreemptedVCores() {
return preemptedResourceVCores;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java
index f97ff8a..9d4e569 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java
@@ -54,10 +54,10 @@ public FairSchedulerInfo(FairScheduler fs) {
* FairSchedulerInfo#INVALID_FAIR_SHARE if the scheduler does
* not know about this application attempt.
*/
- public int getAppFairShare(ApplicationAttemptId appAttemptId) {
+ public long getAppFairShare(ApplicationAttemptId appAttemptId) {
FSAppAttempt fsAppAttempt = scheduler.getSchedulerApp(appAttemptId);
return fsAppAttempt == null ?
- INVALID_FAIR_SHARE : fsAppAttempt.getFairShare().getMemory();
+ INVALID_FAIR_SHARE : fsAppAttempt.getFairShare().getMemoryLong();
}
public FairSchedulerQueueInfo getRootQueueInfo() {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
index e02df65..d12daa5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
@@ -81,8 +81,8 @@ public FairSchedulerQueueInfo(FSQueue queue, FairScheduler scheduler) {
clusterResources = new ResourceInfo(scheduler.getClusterResource());
usedResources = new ResourceInfo(queue.getResourceUsage());
- fractionMemUsed = (float)usedResources.getMemory() /
- clusterResources.getMemory();
+ fractionMemUsed = (float)usedResources.getMemoryLong() /
+ clusterResources.getMemoryLong();
steadyFairResources = new ResourceInfo(queue.getSteadyFairShare());
fairResources = new ResourceInfo(queue.getFairShare());
@@ -93,11 +93,11 @@ public FairSchedulerQueueInfo(FSQueue queue, FairScheduler scheduler) {
scheduler.getClusterResource()));
fractionMemSteadyFairShare =
- (float)steadyFairResources.getMemory() / clusterResources.getMemory();
- fractionMemFairShare = (float) fairResources.getMemory()
- / clusterResources.getMemory();
- fractionMemMinShare = (float)minResources.getMemory() / clusterResources.getMemory();
- fractionMemMaxShare = (float)maxResources.getMemory() / clusterResources.getMemory();
+ (float)steadyFairResources.getMemoryLong() / clusterResources.getMemoryLong();
+ fractionMemFairShare = (float) fairResources.getMemoryLong()
+ / clusterResources.getMemoryLong();
+ fractionMemMinShare = (float)minResources.getMemoryLong() / clusterResources.getMemoryLong();
+ fractionMemMaxShare = (float)maxResources.getMemoryLong() / clusterResources.getMemoryLong();
maxApps = allocConf.getQueueMaxApps(queueName);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java
index bd940d1..9cd14fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java
@@ -40,8 +40,8 @@
protected float capacity;
protected float usedCapacity;
protected QueueState qstate;
- protected int minQueueMemoryCapacity;
- protected int maxQueueMemoryCapacity;
+ protected long minQueueMemoryCapacity;
+ protected long maxQueueMemoryCapacity;
protected int numNodes;
protected int usedNodeCapacity;
protected int availNodeCapacity;
@@ -67,8 +67,8 @@ public FifoSchedulerInfo(final ResourceManager rm) {
this.usedCapacity = qInfo.getCurrentCapacity();
this.capacity = qInfo.getCapacity();
- this.minQueueMemoryCapacity = fs.getMinimumResourceCapability().getMemory();
- this.maxQueueMemoryCapacity = fs.getMaximumResourceCapability().getMemory();
+ this.minQueueMemoryCapacity = fs.getMinimumResourceCapability().getMemoryLong();
+ this.maxQueueMemoryCapacity = fs.getMaximumResourceCapability().getMemoryLong();
this.qstate = qInfo.getQueueState();
this.numNodes = rmContext.getRMNodes().size();
@@ -79,9 +79,9 @@ public FifoSchedulerInfo(final ResourceManager rm) {
for (RMNode ni : rmContext.getRMNodes().values()) {
SchedulerNodeReport report = fs.getNodeReport(ni.getNodeID());
- this.usedNodeCapacity += report.getUsedResource().getMemory();
- this.availNodeCapacity += report.getAvailableResource().getMemory();
- this.totalNodeCapacity += ni.getTotalCapability().getMemory();
+ this.usedNodeCapacity += report.getUsedResource().getMemoryLong();
+ this.availNodeCapacity += report.getAvailableResource().getMemoryLong();
+ this.totalNodeCapacity += ni.getTotalCapability().getMemoryLong();
this.numContainers += fs.getNodeReport(ni.getNodeID()).getNumContainers();
}
}
@@ -114,11 +114,11 @@ public String getQueueName() {
return this.qName;
}
- public int getMinQueueMemoryCapacity() {
+ public long getMinQueueMemoryCapacity() {
return this.minQueueMemoryCapacity;
}
- public int getMaxQueueMemoryCapacity() {
+ public long getMaxQueueMemoryCapacity() {
return this.maxQueueMemoryCapacity;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
index 0f877f8..ad7793d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java
@@ -63,8 +63,8 @@ public NodeInfo(RMNode ni, ResourceScheduler sched) {
this.availMemoryMB = 0;
if (report != null) {
this.numContainers = report.getNumContainers();
- this.usedMemoryMB = report.getUsedResource().getMemory();
- this.availMemoryMB = report.getAvailableResource().getMemory();
+ this.usedMemoryMB = report.getUsedResource().getMemoryLong();
+ this.availMemoryMB = report.getAvailableResource().getMemoryLong();
this.usedVirtualCores = report.getUsedResource().getVirtualCores();
this.availableVirtualCores = report.getAvailableResource().getVirtualCores();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
index 9510f5f..935a241 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
@@ -27,22 +27,22 @@
@XmlRootElement
@XmlAccessorType(XmlAccessType.FIELD)
public class ResourceInfo {
- int memory;
- int vCores;
+ long memory;
+ long vCores;
public ResourceInfo() {
}
public ResourceInfo(Resource res) {
- memory = res.getMemory();
+ memory = res.getMemoryLong();
vCores = res.getVirtualCores();
}
- public int getMemory() {
+ public long getMemoryLong() {
return memory;
}
- public int getvCores() {
+ public long getvCores() {
return vCores;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
index 17cf885..4e6e2bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
@@ -194,7 +194,7 @@ public synchronized void addResourceRequestSpec(
Resource currentSpec = requestSpec.put(priority, capability);
if (currentSpec != null) {
throw new IllegalStateException("Resource spec already exists for " +
- "priority " + priority.getPriority() + " - " + currentSpec.getMemory());
+ "priority " + priority.getPriority() + " - " + currentSpec.getMemoryLong());
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
index 4407fe9..249d2b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
@@ -215,7 +215,7 @@ public NodeHeartbeatResponse nodeHeartbeat(Map plans) {
for (Plan plan : plans) {
- if (plan.getTotalCapacity().getMemory() > 0) {
+ if (plan.getTotalCapacity().getMemoryLong() > 0) {
return true;
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
index e4befa6..11fd913 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java
@@ -388,11 +388,11 @@ private void verifyEnemyAppReport(ApplicationReport appReport) {
Assert.assertEquals("Enemy should not see app reserved containers",
-1, usageReport.getNumReservedContainers());
Assert.assertEquals("Enemy should not see app used resources",
- -1, usageReport.getUsedResources().getMemory());
+ -1, usageReport.getUsedResources().getMemoryLong());
Assert.assertEquals("Enemy should not see app reserved resources",
- -1, usageReport.getReservedResources().getMemory());
+ -1, usageReport.getReservedResources().getMemoryLong());
Assert.assertEquals("Enemy should not see app needed resources",
- -1, usageReport.getNeededResources().getMemory());
+ -1, usageReport.getNeededResources().getMemoryLong());
}
private void verifyInvalidQueueWithAcl() throws Exception {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
index 3fa377e..6703202 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java
@@ -531,7 +531,7 @@ public void testProcessingNMContainerStatusesOnNMRestart() throws Exception {
// 4. Verify Memory Usage by cluster, it should be 3072. AM memory +
// requested memory. 1024 + 2048=3072
ResourceScheduler rs = rm1.getRMContext().getScheduler();
- int allocatedMB = rs.getRootQueueMetrics().getAllocatedMB();
+ long allocatedMB = rs.getRootQueueMetrics().getAllocatedMB();
Assert.assertEquals(amMemory + containerMemory, allocatedMB);
// 5. Re-register NM by sending completed container status
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
index fcb48a0..4f84c87 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
@@ -404,7 +404,7 @@ private AggregateAppResourceUsage calculateContainerResourceMetrics(
Resource resource = rmContainer.getContainer().getResource();
long usedMillis =
rmContainer.getFinishTime() - rmContainer.getCreationTime();
- long memorySeconds = resource.getMemory()
+ long memorySeconds = resource.getMemoryLong()
* usedMillis / DateUtils.MILLIS_PER_SECOND;
long vcoreSeconds = resource.getVirtualCores()
* usedMillis / DateUtils.MILLIS_PER_SECOND;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
index b04b173..12f1e1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java
@@ -112,7 +112,7 @@ public void testGetNewAppId() throws Exception {
GetNewApplicationResponse resp = rm.getNewAppId();
assert (resp.getApplicationId().getId() != 0);
- assert (resp.getMaximumResourceCapability().getMemory() > 0);
+ assert (resp.getMaximumResourceCapability().getMemoryLong() > 0);
rm.stop();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
index abd59b2..905a42c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
@@ -711,7 +711,7 @@ public void innerTestHAWithRMHostName(boolean includeBindHost) {
}
private void verifyClusterMetrics(int activeNodes, int appsSubmitted,
- int appsPending, int containersPending, int availableMB,
+ int appsPending, int containersPending, long availableMB,
int activeApplications) throws Exception {
int timeoutSecs = 0;
QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
@@ -742,7 +742,7 @@ private void verifyClusterMetrics(int activeNodes, int appsSubmitted,
assertTrue(message, isAllMetricAssertionDone);
}
- private void assertMetric(String metricName, int expected, int actual) {
+ private void assertMetric(String metricName, long expected, long actual) {
assertEquals("Incorrect value for metric " + metricName, expected, actual);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
index 7c03574..5f556b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
@@ -869,13 +869,13 @@ public void testReconnectWithNewPortOnDecommissioningNode() {
public void testResourceUpdateOnRunningNode() {
RMNodeImpl node = getRunningNode();
Resource oldCapacity = node.getTotalCapability();
- assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096);
+ assertEquals("Memory resource is not match.", oldCapacity.getMemoryLong(), 4096);
assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(),
ResourceOption.newInstance(Resource.newInstance(2048, 2),
ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
Resource newCapacity = node.getTotalCapability();
- assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
+ assertEquals("Memory resource is not match.", newCapacity.getMemoryLong(), 2048);
assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);
Assert.assertEquals(NodeState.RUNNING, node.getState());
@@ -893,13 +893,13 @@ public void testDecommissioningOnRunningNode(){
public void testResourceUpdateOnNewNode() {
RMNodeImpl node = getNewNode(Resource.newInstance(4096, 4));
Resource oldCapacity = node.getTotalCapability();
- assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096);
+ assertEquals("Memory resource is not match.", oldCapacity.getMemoryLong(), 4096);
assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(),
ResourceOption.newInstance(Resource.newInstance(2048, 2),
ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
Resource newCapacity = node.getTotalCapability();
- assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
+ assertEquals("Memory resource is not match.", newCapacity.getMemoryLong(), 2048);
assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);
Assert.assertEquals(NodeState.NEW, node.getState());
@@ -913,13 +913,13 @@ public void testResourceUpdateOnRebootedNode() {
int initialUnHealthy = cm.getUnhealthyNMs();
int initialDecommissioning = cm.getNumDecommissioningNMs();
Resource oldCapacity = node.getTotalCapability();
- assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096);
+ assertEquals("Memory resource is not match.", oldCapacity.getMemoryLong(), 4096);
assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(), ResourceOption
.newInstance(Resource.newInstance(2048, 2),
ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
Resource newCapacity = node.getTotalCapability();
- assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
+ assertEquals("Memory resource is not match.", newCapacity.getMemoryLong(), 2048);
assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);
Assert.assertEquals(NodeState.REBOOTED, node.getState());
@@ -994,16 +994,16 @@ public void testContainerExpire() throws Exception {
public void testResourceUpdateOnDecommissioningNode() {
RMNodeImpl node = getDecommissioningNode();
Resource oldCapacity = node.getTotalCapability();
- assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096);
+ assertEquals("Memory resource is not match.", oldCapacity.getMemoryLong(), 4096);
assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(),
ResourceOption.newInstance(Resource.newInstance(2048, 2),
ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
Resource originalCapacity = node.getOriginalTotalCapability();
- assertEquals("Memory resource is not match.", originalCapacity.getMemory(), oldCapacity.getMemory());
+ assertEquals("Memory resource is not match.", originalCapacity.getMemoryLong(), oldCapacity.getMemoryLong());
assertEquals("CPU resource is not match.", originalCapacity.getVirtualCores(), oldCapacity.getVirtualCores());
Resource newCapacity = node.getTotalCapability();
- assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
+ assertEquals("Memory resource is not match.", newCapacity.getMemoryLong(), 2048);
assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);
Assert.assertEquals(NodeState.DECOMMISSIONING, node.getState());
@@ -1016,7 +1016,7 @@ public void testResourceUpdateOnDecommissioningNode() {
public void testResourceUpdateOnRecommissioningNode() {
RMNodeImpl node = getDecommissioningNode();
Resource oldCapacity = node.getTotalCapability();
- assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096);
+ assertEquals("Memory resource is not match.", oldCapacity.getMemoryLong(), 4096);
assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
node.handle(new RMNodeEvent(node.getNodeID(),
RMNodeEventType.RECOMMISSION));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestReservationSystemWithRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestReservationSystemWithRMHA.java
index 9a0f2c9..63388beb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestReservationSystemWithRMHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestReservationSystemWithRMHA.java
@@ -183,7 +183,7 @@ private void addNodeCapacityToPlan(MockRM rm, int memory, int vCores) {
.synchronizePlan(ReservationSystemTestUtil.reservationQ, false);
if (rm.getRMContext().getReservationSystem()
.getPlan(ReservationSystemTestUtil.reservationQ).getTotalCapacity()
- .getMemory() > 0) {
+ .getMemoryLong() > 0) {
break;
}
LOG.info("Waiting for node capacity to be added to plan");
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index 4259e6b..0ba72fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -1054,7 +1054,7 @@ public void handle(SchedulerEvent event) {
rm.drainEvents();
RMNode rmNode = rm.getRMContext().getRMNodes().get(nm1.getNodeId());
Assert.assertEquals(3, rmNode.getHttpPort());
- Assert.assertEquals(5120, rmNode.getTotalCapability().getMemory());
+ Assert.assertEquals(5120, rmNode.getTotalCapability().getMemoryLong());
Assert.assertEquals(5120 + 15360, metrics.getAvailableMB());
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index 370cbfe..fd5b5d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -214,7 +214,7 @@ public void testSchedulerRecovery() throws Exception {
// 2 running containers.
Resource usedResources = Resources.multiply(containerResource, 2);
Resource nmResource =
- Resource.newInstance(nm1.getMemory(), nm1.getvCores());
+ Resource.newInstance(nm1.getMemoryLong(), nm1.getvCores());
assertTrue(schedulerNode1.isValidContainer(amContainer.getContainerId()));
assertTrue(schedulerNode1.isValidContainer(runningContainer
@@ -359,7 +359,7 @@ public void testDynamicQueueRecovery() throws Exception {
// 2 running containers.
Resource usedResources = Resources.multiply(containerResource, 2);
Resource nmResource =
- Resource.newInstance(nm1.getMemory(), nm1.getvCores());
+ Resource.newInstance(nm1.getMemoryLong(), nm1.getvCores());
assertTrue(schedulerNode1.isValidContainer(amContainer.getContainerId()));
assertTrue(
@@ -418,15 +418,15 @@ private void checkCSQueue(MockRM rm,
// ************* check Queue metrics ************
QueueMetrics queueMetrics = queue.getMetrics();
- assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemory(),
- availableResources.getVirtualCores(), usedResource.getMemory(),
+ assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemoryLong(),
+ availableResources.getVirtualCores(), usedResource.getMemoryLong(),
usedResource.getVirtualCores());
// ************ check user metrics ***********
QueueMetrics userMetrics =
queueMetrics.getUserMetrics(app.getUser());
- assertMetrics(userMetrics, 1, 0, 1, 0, 2, availableResources.getMemory(),
- availableResources.getVirtualCores(), usedResource.getMemory(),
+ assertMetrics(userMetrics, 1, 0, 1, 0, 2, availableResources.getMemoryLong(),
+ availableResources.getVirtualCores(), usedResource.getMemoryLong(),
usedResource.getVirtualCores());
}
@@ -486,8 +486,8 @@ private void checkFSQueue(ResourceManager rm,
// ************ check queue metrics ****************
QueueMetrics queueMetrics = scheduler.getRootQueueMetrics();
- assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemory(),
- availableResources.getVirtualCores(), usedResources.getMemory(),
+ assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemoryLong(),
+ availableResources.getVirtualCores(), usedResources.getMemoryLong(),
usedResources.getVirtualCores());
}
@@ -636,7 +636,7 @@ public void testCapacitySchedulerRecovery() throws Exception {
// Calculate each queue's resource usage.
Resource containerResource = Resource.newInstance(1024, 1);
Resource nmResource =
- Resource.newInstance(nm1.getMemory(), nm1.getvCores());
+ Resource.newInstance(nm1.getMemoryLong(), nm1.getvCores());
Resource clusterResource = Resources.multiply(nmResource, 2);
Resource q1Resource = Resources.multiply(clusterResource, 0.5);
Resource q2Resource = Resources.multiply(clusterResource, 0.5);
@@ -661,8 +661,8 @@ public void testCapacitySchedulerRecovery() throws Exception {
q1UsedResource, 4);
QueueMetrics queue1Metrics = schedulerApp1_1.getQueue().getMetrics();
assertMetrics(queue1Metrics, 2, 0, 2, 0, 4,
- q1availableResources.getMemory(),
- q1availableResources.getVirtualCores(), q1UsedResource.getMemory(),
+ q1availableResources.getMemoryLong(),
+ q1availableResources.getVirtualCores(), q1UsedResource.getMemoryLong(),
q1UsedResource.getVirtualCores());
// assert queue B state.
@@ -672,8 +672,8 @@ public void testCapacitySchedulerRecovery() throws Exception {
q2UsedResource, 2);
QueueMetrics queue2Metrics = schedulerApp2.getQueue().getMetrics();
assertMetrics(queue2Metrics, 1, 0, 1, 0, 2,
- q2availableResources.getMemory(),
- q2availableResources.getVirtualCores(), q2UsedResource.getMemory(),
+ q2availableResources.getMemoryLong(),
+ q2availableResources.getVirtualCores(), q2UsedResource.getMemoryLong(),
q2UsedResource.getVirtualCores());
// assert parent queue state.
@@ -682,8 +682,8 @@ public void testCapacitySchedulerRecovery() throws Exception {
checkParentQueue(parentQueue, 6, totalUsedResource, (float) 6 / 16,
(float) 6 / 16);
assertMetrics(parentQueue.getMetrics(), 3, 0, 3, 0, 6,
- totalAvailableResource.getMemory(),
- totalAvailableResource.getVirtualCores(), totalUsedResource.getMemory(),
+ totalAvailableResource.getMemoryLong(),
+ totalAvailableResource.getVirtualCores(), totalUsedResource.getMemoryLong(),
totalUsedResource.getVirtualCores());
}
@@ -1101,8 +1101,8 @@ public Boolean get() {
private void assertMetrics(QueueMetrics qm, int appsSubmitted,
int appsPending, int appsRunning, int appsCompleted,
- int allocatedContainers, int availableMB, int availableVirtualCores,
- int allocatedMB, int allocatedVirtualCores) {
+ int allocatedContainers, long availableMB, long availableVirtualCores,
+ long allocatedMB, long allocatedVirtualCores) {
assertEquals(appsSubmitted, qm.getAppsSubmitted());
assertEquals(appsPending, qm.getAppsPending());
assertEquals(appsRunning, qm.getAppsRunning());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
index f2b02bc..b2ebe12 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
@@ -378,7 +378,7 @@ public void testPublishContainerMetrics() throws Exception {
entity.getOtherInfo().get(
ContainerMetricsConstants.ALLOCATED_PORT_ENTITY_INFO));
Assert.assertEquals(
- container.getAllocatedResource().getMemory(),
+ container.getAllocatedResource().getMemoryLong(),
entity.getOtherInfo().get(
ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO));
Assert.assertEquals(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
index 499a3d0..e666445 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java
@@ -929,13 +929,14 @@ public void testHierarchicalLarge3Levels() {
// which is likely triggered since we use small numbers for readability
//run with Logger.getRootLogger().setLevel(Level.DEBUG);
verify(mDisp, times(9)).handle(argThat(new IsPreemptionRequestFor(appC)));
- assertEquals(10, policy.getQueuePartitions().get("queueE").get("").preemptableExtra.getMemory());
+ assertEquals(10, policy.getQueuePartitions().get("queueE").get("").preemptableExtra.getMemoryLong());
//2nd level child(E) preempts 10, but parent A has only 9 extra
//check the parent can prempt only the extra from > 2 level child
TempQueuePerPartition tempQueueAPartition = policy.getQueuePartitions().get("queueA").get("");
- assertEquals(0, tempQueueAPartition.untouchableExtra.getMemory());
- int extraForQueueA = tempQueueAPartition.current.getMemory()- tempQueueAPartition.guaranteed.getMemory();
- assertEquals(extraForQueueA,tempQueueAPartition.preemptableExtra.getMemory());
+ assertEquals(0, tempQueueAPartition.untouchableExtra.getMemoryLong());
+ long extraForQueueA = tempQueueAPartition.current.getMemoryLong()
+ - tempQueueAPartition.guaranteed.getMemoryLong();
+ assertEquals(extraForQueueA,tempQueueAPartition.preemptableExtra.getMemoryLong());
}
static class IsPreemptionRequestFor
@@ -1064,12 +1065,12 @@ ParentQueue mockNested(Resource[] abs, int[] maxCap, Resource[] used,
when(root.getAbsoluteCapacity()).thenReturn(
Resources.divide(rc, tot, abs[0], tot));
when(root.getAbsoluteMaximumCapacity()).thenReturn(
- maxCap[0] / (float) tot.getMemory());
+ maxCap[0] / (float) tot.getMemoryLong());
when(root.getQueueResourceUsage()).thenReturn(resUsage);
QueueCapacities rootQc = new QueueCapacities(true);
rootQc.setAbsoluteUsedCapacity(Resources.divide(rc, tot, used[0], tot));
rootQc.setAbsoluteCapacity(Resources.divide(rc, tot, abs[0], tot));
- rootQc.setAbsoluteMaximumCapacity(maxCap[0] / (float) tot.getMemory());
+ rootQc.setAbsoluteMaximumCapacity(maxCap[0] / (float) tot.getMemoryLong());
when(root.getQueueCapacities()).thenReturn(rootQc);
when(root.getQueuePath()).thenReturn(CapacitySchedulerConfiguration.ROOT);
boolean preemptionDisabled = mockPreemptionStatus("root");
@@ -1094,13 +1095,13 @@ ParentQueue mockNested(Resource[] abs, int[] maxCap, Resource[] used,
when(q.getAbsoluteCapacity()).thenReturn(
Resources.divide(rc, tot, abs[i], tot));
when(q.getAbsoluteMaximumCapacity()).thenReturn(
- maxCap[i] / (float) tot.getMemory());
+ maxCap[i] / (float) tot.getMemoryLong());
// We need to make these fields to QueueCapacities
QueueCapacities qc = new QueueCapacities(false);
qc.setAbsoluteUsedCapacity(Resources.divide(rc, tot, used[i], tot));
qc.setAbsoluteCapacity(Resources.divide(rc, tot, abs[i], tot));
- qc.setAbsoluteMaximumCapacity(maxCap[i] / (float) tot.getMemory());
+ qc.setAbsoluteMaximumCapacity(maxCap[i] / (float) tot.getMemoryLong());
when(q.getQueueCapacities()).thenReturn(qc);
String parentPathName = p.getQueuePath();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
index 5ffae6e..29099ec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
@@ -867,7 +867,7 @@ private void checkAbsCapacities(CSQueue queue, String partition,
private void checkPendingResource(CSQueue queue, String partition, int pending) {
ResourceUsage ru = queue.getQueueResourceUsage();
- Assert.assertEquals(pending, ru.getPending(partition).getMemory());
+ Assert.assertEquals(pending, ru.getPending(partition).getMemoryLong());
}
private void buildEnv(String labelsConfig, String nodesConfig,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java
index 43fd588..e59df78 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java
@@ -577,7 +577,7 @@ private void checkNodeLabelInfo(List infos, String labelName, int a
for (RMNodeLabel info : infos) {
if (info.getLabelName().equals(labelName)) {
Assert.assertEquals(activeNMs, info.getNumActiveNMs());
- Assert.assertEquals(memory, info.getResource().getMemory());
+ Assert.assertEquals(memory, info.getResource().getMemoryLong());
return;
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
index 4aef7ae..926fcf8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java
@@ -90,7 +90,7 @@ public static void validateReservationQueue(
Assert.assertNotNull(plan);
Assert.assertTrue(plan instanceof InMemoryPlan);
Assert.assertEquals(planQName, plan.getQueueName());
- Assert.assertEquals(8192, plan.getTotalCapacity().getMemory());
+ Assert.assertEquals(8192, plan.getTotalCapacity().getMemoryLong());
Assert.assertTrue(
plan.getReservationAgent() instanceof AlignedPlannerWithGreedy);
Assert
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
index 85fafa7..7207522 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java
@@ -349,24 +349,24 @@ public void testPartialRemoval() {
// does not handle removal of "partial"
// allocations correctly.
Assert.assertEquals(102400, rleSparseVector.getCapacityAtTime(10)
- .getMemory());
- Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(13).getMemory());
- Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(19).getMemory());
+ .getMemoryLong());
+ Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(13).getMemoryLong());
+ Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(19).getMemoryLong());
Assert.assertEquals(102400, rleSparseVector.getCapacityAtTime(21)
- .getMemory());
+ .getMemoryLong());
Assert.assertEquals(2 * 102400, rleSparseVector.getCapacityAtTime(26)
- .getMemory());
+ .getMemoryLong());
ReservationInterval riRemove2 = new ReservationInterval(9, 13);
rleSparseVector.removeInterval(riRemove2, rr);
LOG.info(rleSparseVector.toString());
- Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(11).getMemory());
+ Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(11).getMemoryLong());
Assert.assertEquals(-102400, rleSparseVector.getCapacityAtTime(9)
- .getMemory());
- Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(13).getMemory());
+ .getMemoryLong());
+ Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(13).getMemoryLong());
Assert.assertEquals(102400, rleSparseVector.getCapacityAtTime(20)
- .getMemory());
+ .getMemoryLong());
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java
index eb0b0e2..9d2c7c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java
@@ -153,9 +153,9 @@ public void testReplanningPlanCapacityLoss() throws PlanningException {
// check resources at each moment in time no more exceed capacity
for (int i = 0; i < 20; i++) {
- int tot = 0;
+ long tot = 0;
for (ReservationAllocation r : plan.getReservationsAtTime(i)) {
- tot = r.getResourcesAtTime(i).getMemory();
+ tot = r.getResourcesAtTime(i).getMemoryLong();
}
assertTrue(tot <= 70 * 1024);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
index 4042a29..429fda0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java
@@ -132,38 +132,38 @@ private void testMaximumAllocationMemoryHelper(
Assert.assertEquals(6, expectedMaxMemory.length);
Assert.assertEquals(0, scheduler.getNumClusterNodes());
- int maxMemory = scheduler.getMaximumResourceCapability().getMemory();
+ long maxMemory = scheduler.getMaximumResourceCapability().getMemoryLong();
Assert.assertEquals(expectedMaxMemory[0], maxMemory);
RMNode node1 = MockNodes.newNodeInfo(
0, Resources.createResource(node1MaxMemory), 1, "127.0.0.2");
scheduler.handle(new NodeAddedSchedulerEvent(node1));
Assert.assertEquals(1, scheduler.getNumClusterNodes());
- maxMemory = scheduler.getMaximumResourceCapability().getMemory();
+ maxMemory = scheduler.getMaximumResourceCapability().getMemoryLong();
Assert.assertEquals(expectedMaxMemory[1], maxMemory);
scheduler.handle(new NodeRemovedSchedulerEvent(node1));
Assert.assertEquals(0, scheduler.getNumClusterNodes());
- maxMemory = scheduler.getMaximumResourceCapability().getMemory();
+ maxMemory = scheduler.getMaximumResourceCapability().getMemoryLong();
Assert.assertEquals(expectedMaxMemory[2], maxMemory);
RMNode node2 = MockNodes.newNodeInfo(
0, Resources.createResource(node2MaxMemory), 2, "127.0.0.3");
scheduler.handle(new NodeAddedSchedulerEvent(node2));
Assert.assertEquals(1, scheduler.getNumClusterNodes());
- maxMemory = scheduler.getMaximumResourceCapability().getMemory();
+ maxMemory = scheduler.getMaximumResourceCapability().getMemoryLong();
Assert.assertEquals(expectedMaxMemory[3], maxMemory);
RMNode node3 = MockNodes.newNodeInfo(
0, Resources.createResource(node3MaxMemory), 3, "127.0.0.4");
scheduler.handle(new NodeAddedSchedulerEvent(node3));
Assert.assertEquals(2, scheduler.getNumClusterNodes());
- maxMemory = scheduler.getMaximumResourceCapability().getMemory();
+ maxMemory = scheduler.getMaximumResourceCapability().getMemoryLong();
Assert.assertEquals(expectedMaxMemory[4], maxMemory);
scheduler.handle(new NodeRemovedSchedulerEvent(node3));
Assert.assertEquals(1, scheduler.getNumClusterNodes());
- maxMemory = scheduler.getMaximumResourceCapability().getMemory();
+ maxMemory = scheduler.getMaximumResourceCapability().getMemoryLong();
Assert.assertEquals(expectedMaxMemory[5], maxMemory);
scheduler.handle(new NodeRemovedSchedulerEvent(node2));
@@ -620,8 +620,8 @@ private void verifyMaximumResourceCapability(
final Resource schedulerMaximumResourceCapability = scheduler
.getMaximumResourceCapability();
- Assert.assertEquals(expectedMaximumResource.getMemory(),
- schedulerMaximumResourceCapability.getMemory());
+ Assert.assertEquals(expectedMaximumResource.getMemoryLong(),
+ schedulerMaximumResourceCapability.getMemoryLong());
Assert.assertEquals(expectedMaximumResource.getVirtualCores(),
schedulerMaximumResourceCapability.getVirtualCores());
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java
index 1ddeb0b..cbfe72b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java
@@ -135,7 +135,7 @@ private void internalTestModifyAndRead(String label) throws Exception {
}
void check(int mem, int cpu, Resource res) {
- Assert.assertEquals(mem, res.getMemory());
+ Assert.assertEquals(mem, res.getMemoryLong());
Assert.assertEquals(cpu, res.getVirtualCores());
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
index 3208819..f1a309d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java
@@ -112,37 +112,37 @@ public void testNormalizeRequest() {
ask.setCapability(Resources.createResource(-1024));
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource,
maxResource);
- assertEquals(minMemory, ask.getCapability().getMemory());
+ assertEquals(minMemory, ask.getCapability().getMemoryLong());
// case zero memory
ask.setCapability(Resources.createResource(0));
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource,
maxResource);
- assertEquals(minMemory, ask.getCapability().getMemory());
+ assertEquals(minMemory, ask.getCapability().getMemoryLong());
// case memory is a multiple of minMemory
ask.setCapability(Resources.createResource(2 * minMemory));
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource,
maxResource);
- assertEquals(2 * minMemory, ask.getCapability().getMemory());
+ assertEquals(2 * minMemory, ask.getCapability().getMemoryLong());
// case memory is not a multiple of minMemory
ask.setCapability(Resources.createResource(minMemory + 10));
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource,
maxResource);
- assertEquals(2 * minMemory, ask.getCapability().getMemory());
+ assertEquals(2 * minMemory, ask.getCapability().getMemoryLong());
// case memory is equal to max allowed
ask.setCapability(Resources.createResource(maxMemory));
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource,
maxResource);
- assertEquals(maxMemory, ask.getCapability().getMemory());
+ assertEquals(maxMemory, ask.getCapability().getMemoryLong());
// case memory is just less than max
ask.setCapability(Resources.createResource(maxMemory - 10));
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource,
maxResource);
- assertEquals(maxMemory, ask.getCapability().getMemory());
+ assertEquals(maxMemory, ask.getCapability().getMemoryLong());
// max is not a multiple of min
maxResource = Resources.createResource(maxMemory - 10, 0);
@@ -150,14 +150,14 @@ public void testNormalizeRequest() {
// multiple of minMemory > maxMemory, then reduce to maxMemory
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource,
maxResource);
- assertEquals(maxResource.getMemory(), ask.getCapability().getMemory());
+ assertEquals(maxResource.getMemoryLong(), ask.getCapability().getMemoryLong());
// ask is more than max
maxResource = Resources.createResource(maxMemory, 0);
ask.setCapability(Resources.createResource(maxMemory + 100));
SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource,
maxResource);
- assertEquals(maxResource.getMemory(), ask.getCapability().getMemory());
+ assertEquals(maxResource.getMemoryLong(), ask.getCapability().getMemoryLong());
}
@Test (timeout = 30000)
@@ -182,7 +182,7 @@ public void testNormalizeRequestWithDominantResourceCalculator() {
ask, resourceCalculator, clusterResource, minResource, maxResource);
assertEquals(minResource, ask.getCapability());
assertEquals(1, ask.getCapability().getVirtualCores());
- assertEquals(1024, ask.getCapability().getMemory());
+ assertEquals(1024, ask.getCapability().getMemoryLong());
// case non-zero memory & zero cores
ask.setCapability(Resources.createResource(1536, 0));
@@ -190,7 +190,7 @@ public void testNormalizeRequestWithDominantResourceCalculator() {
ask, resourceCalculator, clusterResource, minResource, maxResource);
assertEquals(Resources.createResource(2048, 1), ask.getCapability());
assertEquals(1, ask.getCapability().getVirtualCores());
- assertEquals(2048, ask.getCapability().getMemory());
+ assertEquals(2048, ask.getCapability().getMemoryLong());
}
@Test(timeout = 30000)
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
index e668d94..eac4784 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java
@@ -292,12 +292,12 @@ public void testLimitsComputation() throws Exception {
// Assert in metrics
assertEquals(queue.getMetrics().getAMResourceLimitMB(),
- amResourceLimit.getMemory());
+ amResourceLimit.getMemoryLong());
assertEquals(queue.getMetrics().getAMResourceLimitVCores(),
amResourceLimit.getVirtualCores());
assertEquals(
- (int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),
+ (int)(clusterResource.getMemoryLong() * queue.getAbsoluteCapacity()),
queue.getMetrics().getAvailableMB()
);
@@ -312,7 +312,7 @@ public void testLimitsComputation() throws Exception {
Resource.newInstance(96*GB, 1));
assertEquals(
- (int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),
+ (int)(clusterResource.getMemoryLong() * queue.getAbsoluteCapacity()),
queue.getMetrics().getAvailableMB()
);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
index d8161f8..edb7379 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
@@ -171,13 +171,13 @@ public void testApplicationPriorityAllocation() throws Exception {
7, 2 * GB, nm1);
Assert.assertEquals(7, allocated1.size());
- Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
+ Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemoryLong());
// check node report, 15 GB used (1 AM and 7 containers) and 1 GB available
SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
nm1.getNodeId());
- Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemoryLong());
// Submit the second app App2 with priority 8 (Higher than App1)
Priority appPriority2 = Priority.newInstance(8);
@@ -189,8 +189,8 @@ public void testApplicationPriorityAllocation() throws Exception {
// check node report, 16 GB used and 0 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemoryLong());
// get scheduler
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
@@ -210,8 +210,8 @@ public void testApplicationPriorityAllocation() throws Exception {
// check node report, 12 GB used and 4 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemoryLong());
// send updated request for App1
am1.allocate("127.0.0.1", 2 * GB, 10, new ArrayList());
@@ -226,8 +226,8 @@ public void testApplicationPriorityAllocation() throws Exception {
// check node report, 16 GB used and 0 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemoryLong());
rm.stop();
}
@@ -256,13 +256,13 @@ public void testPriorityWithPendingApplications() throws Exception {
7, 1 * GB, nm1);
Assert.assertEquals(7, allocated1.size());
- Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory());
+ Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemoryLong());
// check node report, 8 GB used (1 AM and 7 containers) and 0 GB available
SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
nm1.getNodeId());
- Assert.assertEquals(8 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(8 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemoryLong());
// Submit the second app App2 with priority 7
Priority appPriority2 = Priority.newInstance(7);
@@ -287,8 +287,8 @@ public void testPriorityWithPendingApplications() throws Exception {
// check node report, 1 GB used and 7 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(7 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(7 * GB, report_nm1.getAvailableResource().getMemoryLong());
rm.stop();
}
@@ -477,13 +477,13 @@ public void testApplicationPriorityAllocationWithChangeInPriority()
NUM_CONTAINERS, 2 * GB, nm1);
Assert.assertEquals(7, allocated1.size());
- Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
+ Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemoryLong());
// check node report, 15 GB used (1 AM and 7 containers) and 1 GB available
SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
nm1.getNodeId());
- Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemoryLong());
// Submit the second app App2 with priority 8 (Higher than App1)
Priority appPriority2 = Priority.newInstance(8);
@@ -495,8 +495,8 @@ public void testApplicationPriorityAllocationWithChangeInPriority()
// check node report, 16 GB used and 0 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemoryLong());
// get scheduler
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
@@ -518,8 +518,8 @@ public void testApplicationPriorityAllocationWithChangeInPriority()
// check node report, 12 GB used and 4 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemoryLong());
// add request for containers App1
am1.allocate("127.0.0.1", 2 * GB, 10, new ArrayList());
@@ -531,8 +531,8 @@ public void testApplicationPriorityAllocationWithChangeInPriority()
Assert.assertEquals(2, allocated2.size());
// check node report, 16 GB used and 0 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemoryLong());
// kill 1 more
counter = 0;
@@ -548,8 +548,8 @@ public void testApplicationPriorityAllocationWithChangeInPriority()
// check node report, 14 GB used and 2 GB available
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(14 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(14 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemoryLong());
// Change the priority of App1 to 3 (lowest)
Priority appPriority3 = Priority.newInstance(3);
@@ -617,7 +617,7 @@ protected Dispatcher createDispatcher() {
ResourceScheduler scheduler = rm1.getRMContext().getScheduler();
LeafQueue defaultQueue =
(LeafQueue) ((CapacityScheduler) scheduler).getQueue("default");
- int memory = defaultQueue.getAMResourceLimit().getMemory() / 2;
+ int memory = (int) (defaultQueue.getAMResourceLimit().getMemoryLong() / 2);
// App-1 with priority 5 submitted and running
Priority appPriority1 = Priority.newInstance(5);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 16ba607..9bb6d62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -508,12 +508,12 @@ private CSQueue findQueue(CSQueue root, String queuePath) {
private void checkApplicationResourceUsage(int expected,
Application application) {
- Assert.assertEquals(expected, application.getUsedResources().getMemory());
+ Assert.assertEquals(expected, application.getUsedResources().getMemoryLong());
}
private void checkNodeResourceUsage(int expected,
org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) {
- Assert.assertEquals(expected, node.getUsed().getMemory());
+ Assert.assertEquals(expected, node.getUsed().getMemoryLong());
node.checkResourceUsage();
}
@@ -562,14 +562,14 @@ public void testReconnectedNode() throws Exception {
cs.handle(new NodeAddedSchedulerEvent(n1));
cs.handle(new NodeAddedSchedulerEvent(n2));
- Assert.assertEquals(6 * GB, cs.getClusterResource().getMemory());
+ Assert.assertEquals(6 * GB, cs.getClusterResource().getMemoryLong());
// reconnect n1 with downgraded memory
n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 1);
cs.handle(new NodeRemovedSchedulerEvent(n1));
cs.handle(new NodeAddedSchedulerEvent(n1));
- Assert.assertEquals(4 * GB, cs.getClusterResource().getMemory());
+ Assert.assertEquals(4 * GB, cs.getClusterResource().getMemoryLong());
cs.stop();
}
@@ -829,8 +829,8 @@ public void testResourceOverCommit() throws Exception {
SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(
nm1.getNodeId());
// check node report, 2 GB used and 2 GB available
- Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemoryLong());
// add request for containers
am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
@@ -846,17 +846,17 @@ public void testResourceOverCommit() throws Exception {
List allocated1 = alloc1Response.getAllocatedContainers();
Assert.assertEquals(1, allocated1.size());
- Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
+ Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemoryLong());
Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
// check node report, 4 GB used and 0 GB available
- Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
- Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
+ Assert.assertEquals(0, report_nm1.getAvailableResource().getMemoryLong());
+ Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemoryLong());
// check container is assigned with 2 GB.
Container c1 = allocated1.get(0);
- Assert.assertEquals(2 * GB, c1.getResource().getMemory());
+ Assert.assertEquals(2 * GB, c1.getResource().getMemoryLong());
// update node resource to 2 GB, so resource is over-consumed.
Map nodeResourceMap =
@@ -870,8 +870,8 @@ public void testResourceOverCommit() throws Exception {
// Now, the used resource is still 4 GB, and available resource is minus value.
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemoryLong());
// Check container can complete successfully in case of resource over-commitment.
ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
@@ -887,9 +887,9 @@ public void testResourceOverCommit() throws Exception {
Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
+ Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemoryLong());
// As container return 2 GB back, the available resource becomes 0 again.
- Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemoryLong());
// Verify no NPE is trigger in schedule after resource is updated.
am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 1, 1);
@@ -1826,16 +1826,16 @@ public void testMoveAppQueueMetricsCheck() throws Exception {
assertEquals(1, newNumAppsA);
assertEquals(2, newNumAppsRoot);
// original consumption on a1
- assertEquals(3 * GB, origOldA1.getResourcesUsed().getMemory());
+ assertEquals(3 * GB, origOldA1.getResourcesUsed().getMemoryLong());
assertEquals(1, origOldA1.getResourcesUsed().getvCores());
- assertEquals(0, origNewA1.getResourcesUsed().getMemory()); // after the move
+ assertEquals(0, origNewA1.getResourcesUsed().getMemoryLong()); // after the move
assertEquals(0, origNewA1.getResourcesUsed().getvCores()); // after the move
// app moved here with live containers
- assertEquals(3 * GB, targetNewA2.getResourcesUsed().getMemory());
+ assertEquals(3 * GB, targetNewA2.getResourcesUsed().getMemoryLong());
assertEquals(1, targetNewA2.getResourcesUsed().getvCores());
// it was empty before the move
assertEquals(0, targetOldA2.getNumApplications());
- assertEquals(0, targetOldA2.getResourcesUsed().getMemory());
+ assertEquals(0, targetOldA2.getResourcesUsed().getMemoryLong());
assertEquals(0, targetOldA2.getResourcesUsed().getvCores());
// after the app moved here
assertEquals(1, targetNewA2.getNumApplications());
@@ -1849,7 +1849,7 @@ public void testMoveAppQueueMetricsCheck() throws Exception {
assertEquals(0, targetOldA2.getNumContainers());
// 1 user with 3GB
assertEquals(3 * GB, origOldA1.getUsers().getUsersList().get(0)
- .getResourcesUsed().getMemory());
+ .getResourcesUsed().getMemoryLong());
// 1 user with 1 core
assertEquals(1, origOldA1.getUsers().getUsersList().get(0)
.getResourcesUsed().getvCores());
@@ -1857,7 +1857,7 @@ public void testMoveAppQueueMetricsCheck() throws Exception {
assertEquals(0, origNewA1.getUsers().getUsersList().size());
// 1 user with 3GB
assertEquals(3 * GB, targetNewA2.getUsers().getUsersList().get(0)
- .getResourcesUsed().getMemory());
+ .getResourcesUsed().getMemoryLong());
// 1 user with 1 core
assertEquals(1, targetNewA2.getUsers().getUsersList().get(0)
.getResourcesUsed().getvCores());
@@ -2205,8 +2205,8 @@ public void testAppReservationWithDominantResourceCalculator() throws Exception
rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
// check node report
- Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(9 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(9 * GB, report_nm1.getAvailableResource().getMemoryLong());
// add request for containers
am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 1 * GB, 1, 1);
@@ -2305,19 +2305,19 @@ public void testRefreshQueuesMaxAllocationRefresh() throws Exception {
assertEquals("max allocation in CS",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
- cs.getMaximumResourceCapability().getMemory());
+ cs.getMaximumResourceCapability().getMemoryLong());
assertEquals("max allocation for A1",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
- conf.getMaximumAllocationPerQueue(A1).getMemory());
+ conf.getMaximumAllocationPerQueue(A1).getMemoryLong());
assertEquals("max allocation",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
- conf.getMaximumAllocation().getMemory());
+ conf.getMaximumAllocation().getMemoryLong());
CSQueue rootQueue = cs.getRootQueue();
CSQueue queueA = findQueue(rootQueue, A);
CSQueue queueA1 = findQueue(queueA, A1);
assertEquals("queue max allocation", ((LeafQueue) queueA1)
- .getMaximumAllocation().getMemory(), 8192);
+ .getMaximumAllocation().getMemoryLong(), 8192);
setMaxAllocMb(conf, A1, 4096);
@@ -2400,19 +2400,19 @@ public void testRefreshQueuesMaxAllocationRefreshLarger() throws Exception {
assertEquals("max capability MB in CS",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
- cs.getMaximumResourceCapability().getMemory());
+ cs.getMaximumResourceCapability().getMemoryLong());
assertEquals("max capability vcores in CS",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
cs.getMaximumResourceCapability().getVirtualCores());
assertEquals("max allocation MB A1",
4096,
- conf.getMaximumAllocationPerQueue(A1).getMemory());
+ conf.getMaximumAllocationPerQueue(A1).getMemoryLong());
assertEquals("max allocation vcores A1",
2,
conf.getMaximumAllocationPerQueue(A1).getVirtualCores());
assertEquals("cluster max allocation MB",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
- conf.getMaximumAllocation().getMemory());
+ conf.getMaximumAllocation().getMemoryLong());
assertEquals("cluster max allocation vcores",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
conf.getMaximumAllocation().getVirtualCores());
@@ -2421,7 +2421,7 @@ public void testRefreshQueuesMaxAllocationRefreshLarger() throws Exception {
CSQueue queueA = findQueue(rootQueue, A);
CSQueue queueA1 = findQueue(queueA, A1);
assertEquals("queue max allocation", ((LeafQueue) queueA1)
- .getMaximumAllocation().getMemory(), 4096);
+ .getMaximumAllocation().getMemoryLong(), 4096);
setMaxAllocMb(conf, A1, 6144);
setMaxAllocVcores(conf, A1, 3);
@@ -2429,22 +2429,22 @@ public void testRefreshQueuesMaxAllocationRefreshLarger() throws Exception {
// conf will have changed but we shouldn't be able to change max allocation
// for the actual queue
assertEquals("max allocation MB A1", 6144,
- conf.getMaximumAllocationPerQueue(A1).getMemory());
+ conf.getMaximumAllocationPerQueue(A1).getMemoryLong());
assertEquals("max allocation vcores A1", 3,
conf.getMaximumAllocationPerQueue(A1).getVirtualCores());
assertEquals("max allocation MB cluster",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
- conf.getMaximumAllocation().getMemory());
+ conf.getMaximumAllocation().getMemoryLong());
assertEquals("max allocation vcores cluster",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
conf.getMaximumAllocation().getVirtualCores());
assertEquals("queue max allocation MB", 6144,
- ((LeafQueue) queueA1).getMaximumAllocation().getMemory());
+ ((LeafQueue) queueA1).getMaximumAllocation().getMemoryLong());
assertEquals("queue max allocation vcores", 3,
((LeafQueue) queueA1).getMaximumAllocation().getVirtualCores());
assertEquals("max capability MB cluster",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
- cs.getMaximumResourceCapability().getMemory());
+ cs.getMaximumResourceCapability().getMemoryLong());
assertEquals("cluster max capability vcores",
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
cs.getMaximumResourceCapability().getVirtualCores());
@@ -2469,7 +2469,7 @@ public void testRefreshQueuesMaxAllocationCSError() throws Exception {
checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY);
assertEquals("max allocation MB in CS", 10240,
- cs.getMaximumResourceCapability().getMemory());
+ cs.getMaximumResourceCapability().getMemoryLong());
assertEquals("max allocation vcores in CS", 10,
cs.getMaximumResourceCapability().getVirtualCores());
@@ -2515,7 +2515,7 @@ public void testRefreshQueuesMaxAllocationCSLarger() throws Exception {
checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY);
assertEquals("max allocation MB in CS", 10240,
- cs.getMaximumResourceCapability().getMemory());
+ cs.getMaximumResourceCapability().getMemoryLong());
assertEquals("max allocation vcores in CS", 10,
cs.getMaximumResourceCapability().getVirtualCores());
@@ -2527,15 +2527,15 @@ public void testRefreshQueuesMaxAllocationCSLarger() throws Exception {
CSQueue queueB2 = findQueue(queueB, B2);
assertEquals("queue A1 max allocation MB", 4096,
- ((LeafQueue) queueA1).getMaximumAllocation().getMemory());
+ ((LeafQueue) queueA1).getMaximumAllocation().getMemoryLong());
assertEquals("queue A1 max allocation vcores", 4,
((LeafQueue) queueA1).getMaximumAllocation().getVirtualCores());
assertEquals("queue A2 max allocation MB", 10240,
- ((LeafQueue) queueA2).getMaximumAllocation().getMemory());
+ ((LeafQueue) queueA2).getMaximumAllocation().getMemoryLong());
assertEquals("queue A2 max allocation vcores", 10,
((LeafQueue) queueA2).getMaximumAllocation().getVirtualCores());
assertEquals("queue B2 max allocation MB", 10240,
- ((LeafQueue) queueB2).getMaximumAllocation().getMemory());
+ ((LeafQueue) queueB2).getMaximumAllocation().getMemoryLong());
assertEquals("queue B2 max allocation vcores", 10,
((LeafQueue) queueB2).getMaximumAllocation().getVirtualCores());
@@ -2545,19 +2545,19 @@ public void testRefreshQueuesMaxAllocationCSLarger() throws Exception {
// cluster level setting should change and any queues without
// per queue setting
assertEquals("max allocation MB in CS", 12288,
- cs.getMaximumResourceCapability().getMemory());
+ cs.getMaximumResourceCapability().getMemoryLong());
assertEquals("max allocation vcores in CS", 12,
cs.getMaximumResourceCapability().getVirtualCores());
assertEquals("queue A1 max MB allocation", 4096,
- ((LeafQueue) queueA1).getMaximumAllocation().getMemory());
+ ((LeafQueue) queueA1).getMaximumAllocation().getMemoryLong());
assertEquals("queue A1 max vcores allocation", 4,
((LeafQueue) queueA1).getMaximumAllocation().getVirtualCores());
assertEquals("queue A2 max MB allocation", 12288,
- ((LeafQueue) queueA2).getMaximumAllocation().getMemory());
+ ((LeafQueue) queueA2).getMaximumAllocation().getMemoryLong());
assertEquals("queue A2 max vcores allocation", 12,
((LeafQueue) queueA2).getMaximumAllocation().getVirtualCores());
assertEquals("queue B2 max MB allocation", 12288,
- ((LeafQueue) queueB2).getMaximumAllocation().getMemory());
+ ((LeafQueue) queueB2).getMaximumAllocation().getMemoryLong());
assertEquals("queue B2 max vcores allocation", 12,
((LeafQueue) queueB2).getMaximumAllocation().getVirtualCores());
}
@@ -2604,7 +2604,7 @@ public void testHierarchyQueuesCurrentLimits() throws Exception {
// Maximum resoure of b1 is 100 * 0.895 * 0.792 = 71 GB
// 2 GBs used by am, so it's 71 - 2 = 69G.
Assert.assertEquals(69 * GB,
- am1.doHeartbeat().getAvailableResources().getMemory());
+ am1.doHeartbeat().getAvailableResources().getMemoryLong());
RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "b2");
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
@@ -2620,7 +2620,7 @@ public void testHierarchyQueuesCurrentLimits() throws Exception {
// B1 uses 3 GB (2 * 1GB containers and 1 AM container)
// Available is 100 - 41 - 3 = 56 GB
Assert.assertEquals(56 * GB,
- am1.doHeartbeat().getAvailableResources().getMemory());
+ am1.doHeartbeat().getAvailableResources().getMemoryLong());
// Now we submit app3 to a1 (in higher level hierarchy), to see if headroom
// of app1 (in queue b1) updated correctly
@@ -2639,7 +2639,7 @@ public void testHierarchyQueuesCurrentLimits() throws Exception {
// A1 uses 25 GB (3 * 8GB containers and 1 AM container)
// Available is 100 - 41 - 4 - 25 = 30 GB
Assert.assertEquals(30 * GB,
- am1.doHeartbeat().getAvailableResources().getMemory());
+ am1.doHeartbeat().getAvailableResources().getMemoryLong());
}
@Test
@@ -2847,7 +2847,7 @@ private void checkPendingResource(MockRM rm, String queueName, int memory,
memory,
queue.getQueueResourceUsage()
.getPending(label == null ? RMNodeLabelsManager.NO_LABEL : label)
- .getMemory());
+ .getMemoryLong());
}
private void checkPendingResourceGreaterThanZero(MockRM rm, String queueName,
@@ -2856,7 +2856,7 @@ private void checkPendingResourceGreaterThanZero(MockRM rm, String queueName,
CSQueue queue = cs.getQueue(queueName);
Assert.assertTrue(queue.getQueueResourceUsage()
.getPending(label == null ? RMNodeLabelsManager.NO_LABEL : label)
- .getMemory() > 0);
+ .getMemoryLong() > 0);
}
// Test verifies AM Used resource for LeafQueue when AM ResourceRequest is
@@ -3056,7 +3056,7 @@ public void testHeadRoomCalculationWithDRC() throws Exception {
u0Priority, recordFactory)));
cs.handle(new NodeUpdateSchedulerEvent(node));
cs.handle(new NodeUpdateSchedulerEvent(node2));
- assertEquals(6*GB, fiCaApp1.getHeadroom().getMemory());
+ assertEquals(6*GB, fiCaApp1.getHeadroom().getMemoryLong());
assertEquals(15, fiCaApp1.getHeadroom().getVirtualCores());
// allocate container for app2 with 1GB memory and 1 vcore
@@ -3065,7 +3065,7 @@ public void testHeadRoomCalculationWithDRC() throws Exception {
u0Priority, recordFactory)));
cs.handle(new NodeUpdateSchedulerEvent(node));
cs.handle(new NodeUpdateSchedulerEvent(node2));
- assertEquals(9*GB, fiCaApp2.getHeadroom().getMemory());
+ assertEquals(9*GB, fiCaApp2.getHeadroom().getMemoryLong());
assertEquals(15, fiCaApp2.getHeadroom().getVirtualCores());
}
@@ -3170,7 +3170,7 @@ protected RMNodeLabelsManager createNodeLabelManager() {
FiCaSchedulerApp app = getFiCaSchedulerApp(rm, app1.getApplicationId());
Assert.assertEquals(2 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
checkPendingResource(rm, "a1", 2 * GB, null);
checkPendingResource(rm, "a", 2 * GB, null);
checkPendingResource(rm, "root", 2 * GB, null);
@@ -3184,7 +3184,7 @@ protected RMNodeLabelsManager createNodeLabelManager() {
null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
checkPendingResource(rm, "a1", 6 * GB, null);
checkPendingResource(rm, "a", 6 * GB, null);
checkPendingResource(rm, "root", 6 * GB, null);
@@ -3200,7 +3200,7 @@ protected RMNodeLabelsManager createNodeLabelManager() {
.newInstance(containerId3, Resources.createResource(2 * GB))),
null);
Assert.assertEquals(4 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
checkPendingResource(rm, "a1", 4 * GB, null);
checkPendingResource(rm, "a", 4 * GB, null);
checkPendingResource(rm, "root", 4 * GB, null);
@@ -3218,10 +3218,10 @@ private void verifyAMLimitForLeafQueue(CapacitySchedulerConfiguration config)
Resource amResourceLimit = queueA.getAMResourceLimit();
Resource amResource1 =
- Resource.newInstance(amResourceLimit.getMemory() + 1024,
+ Resource.newInstance(amResourceLimit.getMemoryLong() + 1024,
amResourceLimit.getVirtualCores() + 1);
Resource amResource2 =
- Resource.newInstance(amResourceLimit.getMemory() + 2048,
+ Resource.newInstance(amResourceLimit.getMemoryLong() + 2048,
amResourceLimit.getVirtualCores() + 1);
rm.submitApp(amResource1, "app-1", userName, null, queueName);
@@ -3341,23 +3341,23 @@ public void handle(Event event) {
application_0.schedule();
// Check the used resource is 1 GB 1 core
- Assert.assertEquals(1 * GB, nm_0.getUsed().getMemory());
+ Assert.assertEquals(1 * GB, nm_0.getUsed().getMemoryLong());
Resource usedResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getAllocatedResource();
- Assert.assertEquals(usedResource.getMemory(), 1 * GB);
+ Assert.assertEquals(usedResource.getMemoryLong(), 1 * GB);
Assert.assertEquals(usedResource.getVirtualCores(), 1);
// Check total resource of scheduler node is also changed to 1 GB 1 core
Resource totalResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getTotalResource();
- Assert.assertEquals(totalResource.getMemory(), 1 * GB);
+ Assert.assertEquals(totalResource.getMemoryLong(), 1 * GB);
Assert.assertEquals(totalResource.getVirtualCores(), 1);
// Check the available resource is 0/0
Resource availableResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource();
- Assert.assertEquals(availableResource.getMemory(), 0);
+ Assert.assertEquals(availableResource.getMemoryLong(), 0);
Assert.assertEquals(availableResource.getVirtualCores(), 0);
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
index cff79cd..db97977 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java
@@ -111,7 +111,7 @@ private void checkUsedResource(MockRM rm, String queueName, int memory,
CapacityScheduler scheduler = (CapacityScheduler) rm.getResourceScheduler();
CSQueue queue = scheduler.getQueue(queueName);
Assert.assertEquals(memory, queue.getQueueResourceUsage().getUsed(label)
- .getMemory());
+ .getMemoryLong());
}
private void checkUsedCapacity(MockRM rm, String queueName, int capacity,
@@ -128,7 +128,7 @@ private void checkAMUsedResource(MockRM rm, String queueName, int memory,
CapacityScheduler scheduler = (CapacityScheduler) rm.getResourceScheduler();
CSQueue queue = scheduler.getQueue(queueName);
Assert.assertEquals(memory, queue.getQueueResourceUsage().getAMUsed(label)
- .getMemory());
+ .getMemoryLong());
}
private void checkUserUsedResource(MockRM rm, String queueName,
@@ -137,7 +137,7 @@ private void checkUserUsedResource(MockRM rm, String queueName,
LeafQueue queue = (LeafQueue) scheduler.getQueue(queueName);
LeafQueue.User user = queue.getUser(userName);
Assert.assertEquals(memory,
- user.getResourceUsage().getUsed(partition).getMemory());
+ user.getResourceUsage().getUsed(partition).getMemoryLong());
}
@Test(timeout = 60000)
@@ -175,7 +175,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
rm.getResourceScheduler().getAppResourceUsageReport(
am1.getApplicationAttemptId());
Assert.assertEquals(1024, appResourceUsageReport.getUsedResources()
- .getMemory());
+ .getMemoryLong());
Assert.assertEquals(1, appResourceUsageReport.getUsedResources()
.getVirtualCores());
// request a container.
@@ -186,7 +186,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
rm.getResourceScheduler().getAppResourceUsageReport(
am1.getApplicationAttemptId());
Assert.assertEquals(2048, appResourceUsageReport.getUsedResources()
- .getMemory());
+ .getMemoryLong());
Assert.assertEquals(2, appResourceUsageReport.getUsedResources()
.getVirtualCores());
LeafQueue queue =
@@ -196,7 +196,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
for (UserInfo userInfo : users) {
if (userInfo.getUsername().equals("user")) {
ResourceInfo resourcesUsed = userInfo.getResourcesUsed();
- Assert.assertEquals(2048, resourcesUsed.getMemory());
+ Assert.assertEquals(2048, resourcesUsed.getMemoryLong());
Assert.assertEquals(2, resourcesUsed.getvCores());
}
}
@@ -271,9 +271,9 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkUserUsedResource(rm, "a", "user", "x", 0);
checkUserUsedResource(rm, "a", "user", "z", 1024);
Assert.assertEquals(0,
- app.getAppAttemptResourceUsage().getUsed("x").getMemory());
+ app.getAppAttemptResourceUsage().getUsed("x").getMemoryLong());
Assert.assertEquals(1024,
- app.getAppAttemptResourceUsage().getUsed("z").getMemory());
+ app.getAppAttemptResourceUsage().getUsed("z").getMemoryLong());
// change h1's label to y
mgr.replaceLabelsOnNode(ImmutableMap.of(nm1.getNodeId(), toSet("y")));
@@ -296,11 +296,11 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkUserUsedResource(rm, "a", "user", "y", 1024);
checkUserUsedResource(rm, "a", "user", "z", 0);
Assert.assertEquals(0,
- app.getAppAttemptResourceUsage().getUsed("x").getMemory());
+ app.getAppAttemptResourceUsage().getUsed("x").getMemoryLong());
Assert.assertEquals(1024,
- app.getAppAttemptResourceUsage().getUsed("y").getMemory());
+ app.getAppAttemptResourceUsage().getUsed("y").getMemoryLong());
Assert.assertEquals(0,
- app.getAppAttemptResourceUsage().getUsed("z").getMemory());
+ app.getAppAttemptResourceUsage().getUsed("z").getMemoryLong());
// change h1's label to no label
Set emptyLabels = new HashSet<>();
@@ -326,13 +326,13 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkUserUsedResource(rm, "a", "user", "z", 0);
checkUserUsedResource(rm, "a", "user", "", 2048);
Assert.assertEquals(0,
- app.getAppAttemptResourceUsage().getUsed("x").getMemory());
+ app.getAppAttemptResourceUsage().getUsed("x").getMemoryLong());
Assert.assertEquals(0,
- app.getAppAttemptResourceUsage().getUsed("y").getMemory());
+ app.getAppAttemptResourceUsage().getUsed("y").getMemoryLong());
Assert.assertEquals(0,
- app.getAppAttemptResourceUsage().getUsed("z").getMemory());
+ app.getAppAttemptResourceUsage().getUsed("z").getMemoryLong());
Assert.assertEquals(2048,
- app.getAppAttemptResourceUsage().getUsed("").getMemory());
+ app.getAppAttemptResourceUsage().getUsed("").getMemoryLong());
// Finish the two containers, we should see used resource becomes 0
cs.completedContainer(cs.getRMContainer(containerId2),
@@ -460,17 +460,17 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkUserUsedResource(rm, "a", "u2", "z", 2 * GB);
checkUserUsedResource(rm, "a", "u2", "", 1 * GB);
Assert.assertEquals(0,
- application1.getAppAttemptResourceUsage().getUsed("x").getMemory());
+ application1.getAppAttemptResourceUsage().getUsed("x").getMemoryLong());
Assert.assertEquals(1 * GB,
- application1.getAppAttemptResourceUsage().getUsed("z").getMemory());
+ application1.getAppAttemptResourceUsage().getUsed("z").getMemoryLong());
Assert.assertEquals(2 * GB,
- application1.getAppAttemptResourceUsage().getUsed("").getMemory());
+ application1.getAppAttemptResourceUsage().getUsed("").getMemoryLong());
Assert.assertEquals(0,
- application2.getAppAttemptResourceUsage().getUsed("x").getMemory());
+ application2.getAppAttemptResourceUsage().getUsed("x").getMemoryLong());
Assert.assertEquals(2 * GB,
- application2.getAppAttemptResourceUsage().getUsed("z").getMemory());
+ application2.getAppAttemptResourceUsage().getUsed("z").getMemoryLong());
Assert.assertEquals(1 * GB,
- application2.getAppAttemptResourceUsage().getUsed("").getMemory());
+ application2.getAppAttemptResourceUsage().getUsed("").getMemoryLong());
rm.close();
}
@@ -536,9 +536,9 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkUserUsedResource(rm, "a", "user", "x", 0);
checkUserUsedResource(rm, "a", "user", "z", 2048);
Assert.assertEquals(0,
- app.getAppAttemptResourceUsage().getAMUsed("x").getMemory());
+ app.getAppAttemptResourceUsage().getAMUsed("x").getMemoryLong());
Assert.assertEquals(1024,
- app.getAppAttemptResourceUsage().getAMUsed("z").getMemory());
+ app.getAppAttemptResourceUsage().getAMUsed("z").getMemoryLong());
// change h1's label to no label
Set emptyLabels = new HashSet<>();
@@ -555,11 +555,11 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkUserUsedResource(rm, "a", "user", "z", 0);
checkUserUsedResource(rm, "a", "user", "", 2048);
Assert.assertEquals(0,
- app.getAppAttemptResourceUsage().getAMUsed("x").getMemory());
+ app.getAppAttemptResourceUsage().getAMUsed("x").getMemoryLong());
Assert.assertEquals(0,
- app.getAppAttemptResourceUsage().getAMUsed("z").getMemory());
+ app.getAppAttemptResourceUsage().getAMUsed("z").getMemoryLong());
Assert.assertEquals(1024,
- app.getAppAttemptResourceUsage().getAMUsed("").getMemory());
+ app.getAppAttemptResourceUsage().getAMUsed("").getMemoryLong());
rm.close();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerPreemption.java
index 216ebab..5775815 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerPreemption.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerPreemption.java
@@ -171,9 +171,9 @@ public void testSimplePreemption() throws Exception {
// NM1/NM2 has available resource = 0G
Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
Assert.assertEquals(0 * GB, cs.getNode(nm2.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
// AM asks for a 1 * GB container
am2.allocate(Arrays.asList(ResourceRequest
@@ -250,9 +250,9 @@ public void testPreemptionConsidersNodeLocalityDelay()
// NM1/NM2 has available resource = 0G
Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
Assert.assertEquals(0 * GB, cs.getNode(nm2.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
// AM asks for a 1 * GB container with unknown host and unknown rack
am2.allocate(Arrays.asList(ResourceRequest
@@ -341,9 +341,9 @@ public void testPreemptionConsidersHardNodeLocality()
// NM1/NM2 has available resource = 0G
Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
Assert.assertEquals(0 * GB, cs.getNode(nm2.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
// AM asks for a 1 * GB container for h3 with hard locality,
// h3 doesn't exist in the cluster
@@ -438,7 +438,7 @@ public void testPreemptionPolicyShouldRespectAlreadyMarkedKillableContainers()
// NM1 has available resource = 0G
Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
am2.allocate("*", 1 * GB, 1, new ArrayList());
// Get edit policy and do one update
@@ -538,7 +538,7 @@ public void testPreemptionPolicyCleanupKillableContainersWhenNoPreemptionNeeded(
// NM1 has available resource = 0G
Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
am2.allocate("*", 3 * GB, 1, new ArrayList());
// Get edit policy and do one update
@@ -627,9 +627,9 @@ public void testPreemptionConsidersUserLimit()
// NM1/NM2 has available resource = 0G
Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
Assert.assertEquals(0 * GB, cs.getNode(nm2.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
// AM asks for a 1 * GB container
am2.allocate(Arrays.asList(ResourceRequest
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java
index 1612201..c7e0b64 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java
@@ -166,14 +166,14 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable {
private float computeQueueAbsoluteUsedCapacity(CSQueue queue,
int expectedMemory, Resource clusterResource) {
return (
- ((float)expectedMemory / (float)clusterResource.getMemory())
+ ((float)expectedMemory / (float)clusterResource.getMemoryLong())
);
}
private float computeQueueUsedCapacity(CSQueue queue,
int expectedMemory, Resource clusterResource) {
return (expectedMemory /
- (clusterResource.getMemory() * queue.getAbsoluteCapacity()));
+ (clusterResource.getMemoryLong() * queue.getAbsoluteCapacity()));
}
final static float DELTA = 0.0001f;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index f94c963..0c5d9e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -397,15 +397,15 @@ public void testExcessReservationWillBeUnreserved() throws Exception {
// NM1 has available resource = 2G (8G - 2 * 1G - 4G)
Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Usage of queue = 4G + 2 * 1G + 4G (reserved)
Assert.assertEquals(10 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getUsed().getMemory());
+ .getUsed().getMemoryLong());
Assert.assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getReserved().getMemory());
+ .getReserved().getMemoryLong());
Assert.assertEquals(4 * GB, leafQueue.getQueueResourceUsage().getReserved()
- .getMemory());
+ .getMemoryLong());
// Cancel asks of app2 and re-kick RM
am2.allocate("*", 4 * GB, 0, new ArrayList());
@@ -414,14 +414,14 @@ public void testExcessReservationWillBeUnreserved() throws Exception {
// App2's reservation will be cancelled
Assert.assertTrue(schedulerApp2.getReservedContainers().size() == 0);
Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
Assert.assertEquals(6 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getUsed().getMemory());
+ .getUsed().getMemoryLong());
Assert.assertEquals(0, cs.getRootQueue().getQueueResourceUsage()
- .getReserved().getMemory());
+ .getReserved().getMemoryLong());
Assert.assertEquals(0, leafQueue.getQueueResourceUsage().getReserved()
- .getMemory());
+ .getMemoryLong());
rm1.close();
}
@@ -480,15 +480,15 @@ public void testAllocationForReservedContainer() throws Exception {
// NM1 has available resource = 2G (8G - 2 * 1G - 4G)
Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Usage of queue = 4G + 2 * 1G + 4G (reserved)
Assert.assertEquals(10 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getUsed().getMemory());
+ .getUsed().getMemoryLong());
Assert.assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getReserved().getMemory());
+ .getReserved().getMemoryLong());
Assert.assertEquals(4 * GB, leafQueue.getQueueResourceUsage().getReserved()
- .getMemory());
+ .getMemoryLong());
// Mark one app1 container as killed/completed and re-kick RM
for (RMContainer container : schedulerApp1.getLiveContainers()) {
@@ -509,15 +509,15 @@ public void testAllocationForReservedContainer() throws Exception {
// NM1 has available resource = 2G (8G - 2 * 1G - 4G)
Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Usage of queue = 4G + 2 * 1G
Assert.assertEquals(6 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getUsed().getMemory());
+ .getUsed().getMemoryLong());
Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getReserved().getMemory());
+ .getReserved().getMemoryLong());
Assert.assertEquals(0 * GB, leafQueue.getQueueResourceUsage().getReserved()
- .getMemory());
+ .getMemoryLong());
rm1.close();
}
@@ -576,15 +576,15 @@ public void testReservedContainerMetricsOnDecommisionedNode() throws Exception {
// NM1 has available resource = 2G (8G - 2 * 1G - 4G)
Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId())
- .getUnallocatedResource().getMemory());
+ .getUnallocatedResource().getMemoryLong());
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Usage of queue = 4G + 2 * 1G + 4G (reserved)
Assert.assertEquals(10 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getUsed().getMemory());
+ .getUsed().getMemoryLong());
Assert.assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getReserved().getMemory());
+ .getReserved().getMemoryLong());
Assert.assertEquals(4 * GB, leafQueue.getQueueResourceUsage().getReserved()
- .getMemory());
+ .getMemoryLong());
// Remove the node
cs.handle(new NodeRemovedSchedulerEvent(rmNode1));
@@ -596,11 +596,11 @@ public void testReservedContainerMetricsOnDecommisionedNode() throws Exception {
// Usage and Reserved capacity of queue is 0
Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getUsed().getMemory());
+ .getUsed().getMemoryLong());
Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getReserved().getMemory());
+ .getReserved().getMemoryLong());
Assert.assertEquals(0 * GB, leafQueue.getQueueResourceUsage().getReserved()
- .getMemory());
+ .getMemoryLong());
rm1.close();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
index f04748d..39eff64 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
@@ -147,7 +147,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkPendingResource(rm1, "default", 2 * GB, null);
Assert.assertEquals(2 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// NM1 do 1 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
@@ -157,7 +157,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
// Pending resource should be deducted
checkPendingResource(rm1, "default", 0 * GB, null);
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
verifyContainerIncreased(am1.allocate(null, null), containerId1, 3 * GB);
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 17 * GB);
@@ -188,7 +188,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkUsedResource(rm1, "default", 3 * GB, null);
Assert.assertEquals(3 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
ContainerId containerId1 =
ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
@@ -202,7 +202,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
verifyContainerDecreased(response, containerId1, 1 * GB);
checkUsedResource(rm1, "default", 1 * GB, null);
Assert.assertEquals(1 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
// Check if decreased containers added to RMNode
RMNodeImpl rmNode =
@@ -272,7 +272,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// NM1 do 1 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
@@ -284,21 +284,21 @@ public RMNodeLabelsManager createNodeLabelManager() {
/* Check reservation statuses */
// Increase request should be reserved
Assert.assertTrue(rmContainer1.hasIncreaseReservation());
- Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemory());
+ Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemoryLong());
Assert.assertFalse(app.getReservedContainers().isEmpty());
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Pending resource will not be changed since it's not satisfied
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 9 * GB, null);
Assert.assertEquals(9 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(3 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
// Complete one container and do another allocation
am1.allocate(null, Arrays.asList(containerId2));
@@ -315,15 +315,15 @@ public RMNodeLabelsManager createNodeLabelManager() {
// Pending resource will be changed since it's satisfied
checkPendingResource(rm1, "default", 0 * GB, null);
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 7 * GB, null);
Assert.assertEquals(7 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
Assert.assertEquals(7 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 1 * GB);
rm1.close();
@@ -373,7 +373,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// NM1 do 1 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
@@ -390,15 +390,15 @@ public RMNodeLabelsManager createNodeLabelManager() {
// Pending resource will not be changed since it's not satisfied
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will *NOT* be updated
checkUsedResource(rm1, "default", 3 * GB, null);
Assert.assertEquals(3 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(3 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
rm1.close();
}
@@ -453,7 +453,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// NM1 do 1 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
@@ -465,21 +465,21 @@ public RMNodeLabelsManager createNodeLabelManager() {
/* Check reservation statuses */
// Increase request should be reserved
Assert.assertTrue(rmContainer1.hasIncreaseReservation());
- Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemory());
+ Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemoryLong());
Assert.assertFalse(app.getReservedContainers().isEmpty());
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Pending resource will not be changed since it's not satisfied
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 9 * GB, null);
Assert.assertEquals(9 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(3 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
// Complete one container and cancel increase request (via send a increase
// request, make target_capacity=existing_capacity)
@@ -501,15 +501,15 @@ public RMNodeLabelsManager createNodeLabelManager() {
// Pending resource will be changed since it's satisfied
checkPendingResource(rm1, "default", 0 * GB, null);
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 1 * GB, null);
Assert.assertEquals(1 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
Assert.assertEquals(1 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
rm1.close();
}
@@ -565,7 +565,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// NM1 do 1 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
@@ -577,21 +577,21 @@ public RMNodeLabelsManager createNodeLabelManager() {
/* Check reservation statuses */
// Increase request should be reserved
Assert.assertTrue(rmContainer1.hasIncreaseReservation());
- Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemory());
+ Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemoryLong());
Assert.assertFalse(app.getReservedContainers().isEmpty());
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Pending resource will not be changed since it's not satisfied
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 10 * GB, null);
Assert.assertEquals(10 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(4 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
// Complete one container and cancel increase request (via send a increase
// request, make target_capacity=existing_capacity)
@@ -611,15 +611,15 @@ public RMNodeLabelsManager createNodeLabelManager() {
// Pending resource will be changed since it's satisfied
checkPendingResource(rm1, "default", 0 * GB, null);
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 1 * GB, null);
Assert.assertEquals(1 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
Assert.assertEquals(1 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
rm1.close();
}
@@ -673,7 +673,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// NM1 do 1 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
@@ -685,21 +685,21 @@ public RMNodeLabelsManager createNodeLabelManager() {
/* Check reservation statuses */
// Increase request should be reserved
Assert.assertTrue(rmContainer2.hasIncreaseReservation());
- Assert.assertEquals(6 * GB, rmContainer2.getReservedResource().getMemory());
+ Assert.assertEquals(6 * GB, rmContainer2.getReservedResource().getMemoryLong());
Assert.assertFalse(app.getReservedContainers().isEmpty());
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Pending resource will not be changed since it's not satisfied
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 9 * GB, null);
Assert.assertEquals(9 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(3 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
// Complete container2, container will be unreserved and completed
am1.allocate(null, Arrays.asList(containerId2));
@@ -712,15 +712,15 @@ public RMNodeLabelsManager createNodeLabelManager() {
// Pending resource will be changed since it's satisfied
checkPendingResource(rm1, "default", 0 * GB, null);
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 1 * GB, null);
Assert.assertEquals(1 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
Assert.assertEquals(1 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
rm1.close();
}
@@ -771,7 +771,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// NM1 do 1 heartbeats
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
@@ -783,21 +783,21 @@ public RMNodeLabelsManager createNodeLabelManager() {
/* Check reservation statuses */
// Increase request should be reserved
Assert.assertTrue(rmContainer2.hasIncreaseReservation());
- Assert.assertEquals(6 * GB, rmContainer2.getReservedResource().getMemory());
+ Assert.assertEquals(6 * GB, rmContainer2.getReservedResource().getMemoryLong());
Assert.assertFalse(app.getReservedContainers().isEmpty());
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Pending resource will not be changed since it's not satisfied
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 9 * GB, null);
Assert.assertEquals(9 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(3 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
// Kill the application
cs.handle(new AppAttemptRemovedSchedulerEvent(am1.getApplicationAttemptId(),
@@ -811,15 +811,15 @@ public RMNodeLabelsManager createNodeLabelManager() {
// Pending resource will be changed since it's satisfied
checkPendingResource(rm1, "default", 0 * GB, null);
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 0 * GB, null);
Assert.assertEquals(0 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
rm1.close();
}
@@ -894,7 +894,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Get rmNode1
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
@@ -916,15 +916,15 @@ public RMNodeLabelsManager createNodeLabelManager() {
// There're still 3 pending increase requests
checkPendingResource(rm1, "default", 3 * GB, null);
Assert.assertEquals(3 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 10 * GB, null);
Assert.assertEquals(10 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
Assert.assertEquals(10 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
rm1.close();
}
@@ -975,7 +975,7 @@ public RMNodeLabelsManager createNodeLabelManager() {
checkPendingResource(rm1, "default", 6 * GB, null);
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Get rmNode1
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
@@ -997,15 +997,15 @@ public RMNodeLabelsManager createNodeLabelManager() {
// There're still 3 pending increase requests
checkPendingResource(rm1, "default", 3 * GB, null);
Assert.assertEquals(3 * GB,
- app.getAppAttemptResourceUsage().getPending().getMemory());
+ app.getAppAttemptResourceUsage().getPending().getMemoryLong());
// Queue/user/application's usage will be updated
checkUsedResource(rm1, "default", 10 * GB, null);
Assert.assertEquals(10 * GB, ((LeafQueue) cs.getQueue("default"))
- .getUser("user").getUsed().getMemory());
+ .getUser("user").getUsed().getMemoryLong());
Assert.assertEquals(0 * GB,
- app.getAppAttemptResourceUsage().getReserved().getMemory());
+ app.getAppAttemptResourceUsage().getReserved().getMemoryLong());
Assert.assertEquals(10 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
rm1.close();
}
@@ -1033,7 +1033,7 @@ public ResourceScheduler createScheduler() {
FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp(
rm, app1.getApplicationId());
Assert.assertEquals(3 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
// making sure container is launched
ContainerId containerId1 =
ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
@@ -1062,7 +1062,7 @@ private void checkPendingResource(MockRM rm, String queueName, int memory,
Assert.assertEquals(memory,
queue.getQueueResourceUsage()
.getPending(label == null ? RMNodeLabelsManager.NO_LABEL : label)
- .getMemory());
+ .getMemoryLong());
}
private void checkUsedResource(MockRM rm, String queueName, int memory,
@@ -1072,7 +1072,7 @@ private void checkUsedResource(MockRM rm, String queueName, int memory,
Assert.assertEquals(memory,
queue.getQueueResourceUsage()
.getUsed(label == null ? RMNodeLabelsManager.NO_LABEL : label)
- .getMemory());
+ .getMemoryLong());
}
private void verifyContainerIncreased(AllocateResponse response,
@@ -1082,7 +1082,7 @@ private void verifyContainerIncreased(AllocateResponse response,
for (Container c : increasedContainers) {
if (c.getId().equals(containerId)) {
found = true;
- Assert.assertEquals(mem, c.getResource().getMemory());
+ Assert.assertEquals(mem, c.getResource().getMemoryLong());
}
}
if (!found) {
@@ -1097,7 +1097,7 @@ private void verifyContainerDecreased(AllocateResponse response,
for (Container c : decreasedContainers) {
if (c.getId().equals(containerId)) {
found = true;
- Assert.assertEquals(mem, c.getResource().getMemory());
+ Assert.assertEquals(mem, c.getResource().getMemoryLong());
}
}
if (!found) {
@@ -1121,6 +1121,6 @@ private void verifyAvailableResourceOfSchedulerNode(MockRM rm, NodeId nodeId,
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
SchedulerNode node = cs.getNode(nodeId);
Assert
- .assertEquals(expectedMemory, node.getUnallocatedResource().getMemory());
+ .assertEquals(expectedMemory, node.getUnallocatedResource().getMemoryLong());
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
index 645086d..43d99dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java
@@ -98,7 +98,7 @@ public void testContainerIsRemovedFromAllocationExpirer()
FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp(
rm1, app1.getApplicationId());
Assert.assertEquals(2 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 18 * GB);
// Report container status
nm1.nodeHeartbeat(
@@ -129,11 +129,11 @@ public void testContainerIsRemovedFromAllocationExpirer()
// Verify container size is 3G
Assert.assertEquals(
3 * GB, rm1.getResourceScheduler().getRMContainer(containerId2)
- .getAllocatedResource().getMemory());
+ .getAllocatedResource().getMemoryLong());
// Verify total resource usage
checkUsedResource(rm1, "default", 4 * GB, null);
Assert.assertEquals(4 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
// Verify available resource
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 16 * GB);
rm1.stop();
@@ -172,7 +172,7 @@ public void testContainerIncreaseAllocationExpiration()
FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp(
rm1, app1.getApplicationId());
Assert.assertEquals(2 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 18 * GB);
nm1.nodeHeartbeat(
app1.getCurrentAppAttempt()
@@ -190,7 +190,7 @@ public void testContainerIncreaseAllocationExpiration()
// Verify resource usage
checkUsedResource(rm1, "default", 4 * GB, null);
Assert.assertEquals(4 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 16 * GB);
// Wait long enough for the increase token to expire, and for the roll
// back action to complete
@@ -198,11 +198,11 @@ public void testContainerIncreaseAllocationExpiration()
// Verify container size is 1G
Assert.assertEquals(
1 * GB, rm1.getResourceScheduler().getRMContainer(containerId2)
- .getAllocatedResource().getMemory());
+ .getAllocatedResource().getMemoryLong());
// Verify total resource usage is 2G
checkUsedResource(rm1, "default", 2 * GB, null);
Assert.assertEquals(2 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
// Verify available resource is rolled back to 18GB
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 18 * GB);
rm1.stop();
@@ -276,7 +276,7 @@ public void testConsecutiveContainerIncreaseAllocationExpiration()
FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp(
rm1, app1.getApplicationId());
Assert.assertEquals(6 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
// Verify available resource is now reduced to 14GB
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 14 * GB);
// Use the first token (3G)
@@ -287,11 +287,11 @@ public void testConsecutiveContainerIncreaseAllocationExpiration()
// Verify container size is rolled back to 3G
Assert.assertEquals(
3 * GB, rm1.getResourceScheduler().getRMContainer(containerId2)
- .getAllocatedResource().getMemory());
+ .getAllocatedResource().getMemoryLong());
// Verify total resource usage is 4G
checkUsedResource(rm1, "default", 4 * GB, null);
Assert.assertEquals(4 * GB,
- app.getAppAttemptResourceUsage().getUsed().getMemory());
+ app.getAppAttemptResourceUsage().getUsed().getMemoryLong());
// Verify available resource is rolled back to 14GB
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 16 * GB);
// Verify NM receives the decrease message (3G)
@@ -299,7 +299,7 @@ public void testConsecutiveContainerIncreaseAllocationExpiration()
nm1.nodeHeartbeat(true).getContainersToDecrease();
Assert.assertEquals(1, containersToDecrease.size());
Assert.assertEquals(
- 3 * GB, containersToDecrease.get(0).getResource().getMemory());
+ 3 * GB, containersToDecrease.get(0).getResource().getMemoryLong());
rm1.stop();
}
@@ -394,13 +394,13 @@ public void testDecreaseAfterIncreaseWithAllocationExpiration()
Thread.sleep(10000);
Assert.assertEquals(
2 * GB, rm1.getResourceScheduler().getRMContainer(containerId2)
- .getAllocatedResource().getMemory());
+ .getAllocatedResource().getMemoryLong());
Assert.assertEquals(
3 * GB, rm1.getResourceScheduler().getRMContainer(containerId3)
- .getAllocatedResource().getMemory());
+ .getAllocatedResource().getMemoryLong());
Assert.assertEquals(
4 * GB, rm1.getResourceScheduler().getRMContainer(containerId4)
- .getAllocatedResource().getMemory());
+ .getAllocatedResource().getMemoryLong());
// Verify NM receives 2 decrease message
List containersToDecrease =
nm1.nodeHeartbeat(true).getContainersToDecrease();
@@ -408,9 +408,9 @@ public void testDecreaseAfterIncreaseWithAllocationExpiration()
// Sort the list to make sure containerId3 is the first
Collections.sort(containersToDecrease);
Assert.assertEquals(
- 3 * GB, containersToDecrease.get(0).getResource().getMemory());
+ 3 * GB, containersToDecrease.get(0).getResource().getMemoryLong());
Assert.assertEquals(
- 4 * GB, containersToDecrease.get(1).getResource().getMemory());
+ 4 * GB, containersToDecrease.get(1).getResource().getMemoryLong());
rm1.stop();
}
@@ -421,7 +421,7 @@ private void checkUsedResource(MockRM rm, String queueName, int memory,
Assert.assertEquals(memory,
queue.getQueueResourceUsage()
.getUsed(label == null ? RMNodeLabelsManager.NO_LABEL : label)
- .getMemory());
+ .getMemoryLong());
}
private void verifyAvailableResourceOfSchedulerNode(MockRM rm, NodeId nodeId,
@@ -429,7 +429,7 @@ private void verifyAvailableResourceOfSchedulerNode(MockRM rm, NodeId nodeId,
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
SchedulerNode node = cs.getNode(nodeId);
Assert
- .assertEquals(expectedMemory, node.getUnallocatedResource().getMemory());
+ .assertEquals(expectedMemory, node.getUnallocatedResource().getMemoryLong());
}
private Container getContainer(
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 263b95b..cec27d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -326,7 +326,7 @@ public void testSingleQueueOneUserMetrics() throws Exception {
a.assignContainers(clusterResource, node_0, new ResourceLimits(
clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
assertEquals(
- (int)(node_0.getTotalResource().getMemory() * a.getCapacity()) - (1*GB),
+ (int)(node_0.getTotalResource().getMemoryLong() * a.getCapacity()) - (1*GB),
a.getMetrics().getAvailableMB());
}
@@ -408,7 +408,7 @@ public void testAppAttemptMetrics() throws Exception {
assertEquals(1, a.getMetrics().getAppsSubmitted());
assertEquals(1, a.getMetrics().getAppsPending());
assertEquals(1, a.getUser(user_0).getActiveApplications());
- assertEquals(app_1.getAMResource().getMemory(), a.getMetrics()
+ assertEquals(app_1.getAMResource().getMemoryLong(), a.getMetrics()
.getUsedAMResourceMB());
assertEquals(app_1.getAMResource().getVirtualCores(), a.getMetrics()
.getUsedAMResourceVCores());
@@ -516,9 +516,9 @@ public void testSingleQueueWithOneUser() throws Exception {
// Only 1 container
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(1*GB, a.getUsedResources().getMemory());
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, a.getMetrics().getReservedMB());
assertEquals(1*GB, a.getMetrics().getAllocatedMB());
assertEquals(0*GB, a.getMetrics().getAvailableMB());
@@ -527,18 +527,18 @@ public void testSingleQueueWithOneUser() throws Exception {
// you can get one container more than user-limit
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, a.getMetrics().getReservedMB());
assertEquals(2*GB, a.getMetrics().getAllocatedMB());
// Can't allocate 3rd due to user-limit
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, a.getMetrics().getReservedMB());
assertEquals(2*GB, a.getMetrics().getAllocatedMB());
@@ -546,18 +546,18 @@ public void testSingleQueueWithOneUser() throws Exception {
a.setUserLimitFactor(10);
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(3*GB, a.getUsedResources().getMemory());
- assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(3*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, a.getMetrics().getReservedMB());
assertEquals(3*GB, a.getMetrics().getAllocatedMB());
// One more should work, for app_1, due to user-limit-factor
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(4*GB, a.getUsedResources().getMemory());
- assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, a.getMetrics().getReservedMB());
assertEquals(4*GB, a.getMetrics().getAllocatedMB());
@@ -566,9 +566,9 @@ public void testSingleQueueWithOneUser() throws Exception {
a.setMaxCapacity(0.5f);
a.assignContainers(clusterResource, node_0, new ResourceLimits(
clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(4*GB, a.getUsedResources().getMemory());
- assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, a.getMetrics().getReservedMB());
assertEquals(4*GB, a.getMetrics().getAllocatedMB());
@@ -580,9 +580,9 @@ public void testSingleQueueWithOneUser() throws Exception {
ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),
RMContainerEventType.KILL, null, true);
}
- assertEquals(1*GB, a.getUsedResources().getMemory());
- assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, a.getMetrics().getReservedMB());
assertEquals(1*GB, a.getMetrics().getAllocatedMB());
@@ -595,12 +595,12 @@ public void testSingleQueueWithOneUser() throws Exception {
RMContainerEventType.KILL, null, true);
}
- assertEquals(0*GB, a.getUsedResources().getMemory());
- assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, a.getMetrics().getReservedMB());
assertEquals(0*GB, a.getMetrics().getAllocatedMB());
- assertEquals((int)(a.getCapacity() * node_0.getTotalResource().getMemory()),
+ assertEquals((int)(a.getCapacity() * node_0.getTotalResource().getMemoryLong()),
a.getMetrics().getAvailableMB());
}
@@ -665,27 +665,27 @@ public void testUserLimits() throws Exception {
// 1 container to user_0
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(3*GB, a.getUsedResources().getMemory());
- assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(3*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
// Allocate one container to app_1. Even if app_0
// submit earlier, it cannot get this container assigned since user_0
// exceeded user-limit already.
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(4*GB, a.getUsedResources().getMemory());
- assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(4*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
// Allocate one container to app_0, before allocating this container,
// user-limit = ceil((4 + 1) / 2) = 3G. app_0's used resource (3G) <=
// user-limit.
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(7*GB, a.getUsedResources().getMemory());
- assertEquals(6*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(7*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(6*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
// app_0 doesn't have outstanding resources, there's only one active user.
assertEquals("There should only be 1 active user!",
@@ -744,7 +744,7 @@ public void testComputeUserLimitAndSetHeadroom(){
"", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
//maxqueue 16G, userlimit 13G, - 4G used = 9G
- assertEquals(9*GB,app_0.getHeadroom().getMemory());
+ assertEquals(9*GB,app_0.getHeadroom().getMemoryLong());
//test case 2
final ApplicationAttemptId appAttemptId_2 =
@@ -762,13 +762,13 @@ public void testComputeUserLimitAndSetHeadroom(){
qb.computeUserLimitAndSetHeadroom(app_0, clusterResource,
"", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(8*GB, qb.getUsedResources().getMemory());
- assertEquals(4*GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(8*GB, qb.getUsedResources().getMemoryLong());
+ assertEquals(4*GB, app_0.getCurrentConsumption().getMemoryLong());
//maxqueue 16G, userlimit 13G, - 4G used = 9G BUT
//maxqueue 16G - used 8G (4 each app/user) = 8G max headroom (the new logic)
- assertEquals(8*GB, app_0.getHeadroom().getMemory());
- assertEquals(4*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(8*GB, app_2.getHeadroom().getMemory());
+ assertEquals(8*GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(4*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(8*GB, app_2.getHeadroom().getMemoryLong());
//test case 3
qb.finishApplication(app_0.getApplicationId(), user_0);
@@ -805,10 +805,10 @@ public void testComputeUserLimitAndSetHeadroom(){
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
qb.computeUserLimitAndSetHeadroom(app_3, clusterResource,
"", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(4*GB, qb.getUsedResources().getMemory());
+ assertEquals(4*GB, qb.getUsedResources().getMemoryLong());
//maxqueue 16G, userlimit 7G, used (by each user) 2G, headroom 5G (both)
- assertEquals(5*GB, app_3.getHeadroom().getMemory());
- assertEquals(5*GB, app_1.getHeadroom().getMemory());
+ assertEquals(5*GB, app_3.getHeadroom().getMemoryLong());
+ assertEquals(5*GB, app_1.getHeadroom().getMemoryLong());
//test case 4
final ApplicationAttemptId appAttemptId_4 =
TestUtils.getMockApplicationAttemptId(4, 0);
@@ -830,13 +830,13 @@ public void testComputeUserLimitAndSetHeadroom(){
//app3 is user1, active from last test case
//maxqueue 16G, userlimit 13G, used 2G, would be headroom 10G BUT
//10G in use, so max possible headroom is 6G (new logic)
- assertEquals(6*GB, app_3.getHeadroom().getMemory());
+ assertEquals(6*GB, app_3.getHeadroom().getMemoryLong());
//testcase3 still active - 2+2+6=10
- assertEquals(10*GB, qb.getUsedResources().getMemory());
+ assertEquals(10*GB, qb.getUsedResources().getMemoryLong());
//app4 is user 0
//maxqueue 16G, userlimit 13G, used 8G, headroom 5G
//(8G used is 6G from this test case - app4, 2 from last test case, app_1)
- assertEquals(5*GB, app_4.getHeadroom().getMemory());
+ assertEquals(5*GB, app_4.getHeadroom().getMemoryLong());
}
@Test
@@ -892,16 +892,16 @@ public void testUserHeadroomMultiApp() throws Exception {
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(1*GB, a.getUsedResources().getMemory());
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
//Now, headroom is the same for all apps for a given user + queue combo
//and a change to any app's headroom is reflected for all the user's apps
//once those apps are active/have themselves calculated headroom for
//allocation at least one time
- assertEquals(2*GB, app_0.getHeadroom().getMemory());
- assertEquals(0*GB, app_1.getHeadroom().getMemory());//not yet active
- assertEquals(0*GB, app_2.getHeadroom().getMemory());//not yet active
+ assertEquals(2*GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(0*GB, app_1.getHeadroom().getMemoryLong());//not yet active
+ assertEquals(0*GB, app_2.getHeadroom().getMemoryLong());//not yet active
app_1.updateResourceRequests(Collections.singletonList(
TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, true,
@@ -909,12 +909,12 @@ public void testUserHeadroomMultiApp() throws Exception {
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2*GB, a.getUsedResources().getMemory());
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_0.getHeadroom().getMemory());
- assertEquals(1*GB, app_1.getHeadroom().getMemory());//now active
- assertEquals(0*GB, app_2.getHeadroom().getMemory());//not yet active
+ assertEquals(2*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(1*GB, app_1.getHeadroom().getMemoryLong());//now active
+ assertEquals(0*GB, app_2.getHeadroom().getMemoryLong());//not yet active
//Complete container and verify that headroom is updated, for both apps
//for the user
@@ -925,8 +925,8 @@ public void testUserHeadroomMultiApp() throws Exception {
ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),
RMContainerEventType.KILL, null, true);
- assertEquals(2*GB, app_0.getHeadroom().getMemory());
- assertEquals(2*GB, app_1.getHeadroom().getMemory());
+ assertEquals(2*GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(2*GB, app_1.getHeadroom().getMemoryLong());
}
@Test
@@ -998,23 +998,23 @@ public void testHeadroomWithMaxCap() throws Exception {
// 1 container to user_0
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
// TODO, fix headroom in the future patch
- assertEquals(1*GB, app_0.getHeadroom().getMemory());
+ assertEquals(1*GB, app_0.getHeadroom().getMemoryLong());
// User limit = 4G, 2 in use
- assertEquals(0*GB, app_1.getHeadroom().getMemory());
+ assertEquals(0*GB, app_1.getHeadroom().getMemoryLong());
// the application is not yet active
// Again one to user_0 since he hasn't exceeded user limit yet
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(3*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_0.getHeadroom().getMemory()); // 4G - 3G
- assertEquals(1*GB, app_1.getHeadroom().getMemory()); // 4G - 3G
+ assertEquals(3*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_0.getHeadroom().getMemoryLong()); // 4G - 3G
+ assertEquals(1*GB, app_1.getHeadroom().getMemoryLong()); // 4G - 3G
// Submit requests for app_1 and set max-cap
a.setMaxCapacity(.1f);
@@ -1027,12 +1027,12 @@ public void testHeadroomWithMaxCap() throws Exception {
// and no more containers to queue since it's already at max-cap
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(3*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_0.getHeadroom().getMemory());
- assertEquals(0*GB, app_1.getHeadroom().getMemory());
+ assertEquals(3*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(0*GB, app_1.getHeadroom().getMemoryLong());
// Check headroom for app_2
app_1.updateResourceRequests(Collections.singletonList( // unset
@@ -1041,7 +1041,7 @@ public void testHeadroomWithMaxCap() throws Exception {
assertEquals(1, a.getActiveUsersManager().getNumActiveUsers());
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(0*GB, app_2.getHeadroom().getMemory()); // hit queue max-cap
+ assertEquals(0*GB, app_2.getHeadroom().getMemoryLong()); // hit queue max-cap
}
@Test
@@ -1112,25 +1112,25 @@ public void testSingleQueueWithMultipleUsers() throws Exception {
// Only 1 container
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(1*GB, a.getUsedResources().getMemory());
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
// Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
// you can get one container more than user-limit
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
// Can't allocate 3rd due to user-limit
a.setUserLimit(25);
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
// Submit resource requests for other apps now to 'activate' them
@@ -1147,32 +1147,32 @@ public void testSingleQueueWithMultipleUsers() throws Exception {
a.setUserLimitFactor(10);
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(5*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(5*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemoryLong());
// Now allocations should goto app_0 since
// user_0 is at user-limit not above it
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(6*GB, a.getUsedResources().getMemory());
- assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(6*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemoryLong());
// Test max-capacity
// Now - no more allocs since we are at max-cap
a.setMaxCapacity(0.5f);
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(6*GB, a.getUsedResources().getMemory());
- assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(6*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemoryLong());
// Revert max-capacity and user-limit-factor
// Now, allocations should goto app_3 since it's under user-limit
@@ -1180,20 +1180,20 @@ public void testSingleQueueWithMultipleUsers() throws Exception {
a.setUserLimitFactor(1);
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(7*GB, a.getUsedResources().getMemory());
- assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(7*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_3.getCurrentConsumption().getMemoryLong());
// Now we should assign to app_3 again since user_2 is under user-limit
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(8*GB, a.getUsedResources().getMemory());
- assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(2*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(8*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(2*GB, app_3.getCurrentConsumption().getMemoryLong());
// 8. Release each container from app_0
for (RMContainer rmContainer : app_0.getLiveContainers()) {
@@ -1203,11 +1203,11 @@ public void testSingleQueueWithMultipleUsers() throws Exception {
ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),
RMContainerEventType.KILL, null, true);
}
- assertEquals(5*GB, a.getUsedResources().getMemory());
- assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(3*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(2*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(5*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(3*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(2*GB, app_3.getCurrentConsumption().getMemoryLong());
// 9. Release each container from app_2
for (RMContainer rmContainer : app_2.getLiveContainers()) {
@@ -1217,11 +1217,11 @@ public void testSingleQueueWithMultipleUsers() throws Exception {
ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),
RMContainerEventType.KILL, null, true);
}
- assertEquals(2*GB, a.getUsedResources().getMemory());
- assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(2*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(2*GB, app_3.getCurrentConsumption().getMemoryLong());
// 10. Release each container from app_3
for (RMContainer rmContainer : app_3.getLiveContainers()) {
@@ -1231,11 +1231,11 @@ public void testSingleQueueWithMultipleUsers() throws Exception {
ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),
RMContainerEventType.KILL, null, true);
}
- assertEquals(0*GB, a.getUsedResources().getMemory());
- assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemoryLong());
}
@Test
@@ -1289,9 +1289,9 @@ public void testReservation() throws Exception {
// Only 1 container
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(1*GB, a.getUsedResources().getMemory());
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, a.getMetrics().getReservedMB());
assertEquals(1*GB, a.getMetrics().getAllocatedMB());
assertEquals(0*GB, a.getMetrics().getAvailableMB());
@@ -1300,20 +1300,20 @@ public void testReservation() throws Exception {
// you can get one container more than user-limit
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, a.getMetrics().getReservedMB());
assertEquals(2*GB, a.getMetrics().getAllocatedMB());
// Now, reservation should kick in for app_1
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(6*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
- assertEquals(2*GB, node_0.getAllocatedResource().getMemory());
+ assertEquals(6*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemoryLong());
+ assertEquals(2*GB, node_0.getAllocatedResource().getMemoryLong());
assertEquals(4*GB, a.getMetrics().getReservedMB());
assertEquals(2*GB, a.getMetrics().getAllocatedMB());
@@ -1326,11 +1326,11 @@ public void testReservation() throws Exception {
RMContainerEventType.KILL, null, true);
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(5*GB, a.getUsedResources().getMemory());
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
- assertEquals(1*GB, node_0.getAllocatedResource().getMemory());
+ assertEquals(5*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemoryLong());
+ assertEquals(1*GB, node_0.getAllocatedResource().getMemoryLong());
assertEquals(4*GB, a.getMetrics().getReservedMB());
assertEquals(1*GB, a.getMetrics().getAllocatedMB());
@@ -1343,11 +1343,11 @@ public void testReservation() throws Exception {
RMContainerEventType.KILL, null, true);
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(4*GB, a.getUsedResources().getMemory());
- assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(4*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentReservation().getMemory());
- assertEquals(4*GB, node_0.getAllocatedResource().getMemory());
+ assertEquals(4*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(4*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentReservation().getMemoryLong());
+ assertEquals(4*GB, node_0.getAllocatedResource().getMemoryLong());
assertEquals(0*GB, a.getMetrics().getReservedMB());
assertEquals(4*GB, a.getMetrics().getAllocatedMB());
}
@@ -1417,26 +1417,26 @@ public void testReservationExchange() throws Exception {
// Only 1 container
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(1*GB, a.getUsedResources().getMemory());
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
// Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
// you can get one container more than user-limit
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
// Now, reservation should kick in for app_1
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(6*GB, a.getUsedResources().getMemory());
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
- assertEquals(2*GB, node_0.getAllocatedResource().getMemory());
+ assertEquals(6*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemoryLong());
+ assertEquals(2*GB, node_0.getAllocatedResource().getMemoryLong());
// Now free 1 container from app_0 i.e. 1G, and re-reserve it
RMContainer rmContainer = app_0.getLiveContainers().iterator().next();
@@ -1447,31 +1447,31 @@ public void testReservationExchange() throws Exception {
RMContainerEventType.KILL, null, true);
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(5*GB, a.getUsedResources().getMemory());
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
- assertEquals(1*GB, node_0.getAllocatedResource().getMemory());
+ assertEquals(5*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemoryLong());
+ assertEquals(1*GB, node_0.getAllocatedResource().getMemoryLong());
assertEquals(1, app_1.getReReservations(priority));
// Re-reserve
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(5*GB, a.getUsedResources().getMemory());
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
- assertEquals(1*GB, node_0.getAllocatedResource().getMemory());
+ assertEquals(5*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemoryLong());
+ assertEquals(1*GB, node_0.getAllocatedResource().getMemoryLong());
assertEquals(2, app_1.getReReservations(priority));
// Try to schedule on node_1 now, should *move* the reservation
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(9*GB, a.getUsedResources().getMemory());
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(4*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
- assertEquals(4*GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(9*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(4*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(4*GB, app_1.getCurrentReservation().getMemoryLong());
+ assertEquals(4*GB, node_1.getAllocatedResource().getMemoryLong());
// Doesn't change yet... only when reservation is cancelled or a different
// container is reserved
assertEquals(2, app_1.getReReservations(priority));
@@ -1485,11 +1485,11 @@ public void testReservationExchange() throws Exception {
RMContainerEventType.KILL, null, true);
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(4*GB, a.getUsedResources().getMemory());
- assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(4*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentReservation().getMemory());
- assertEquals(0*GB, node_0.getAllocatedResource().getMemory());
+ assertEquals(4*GB, a.getUsedResources().getMemoryLong());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(4*GB, app_1.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentReservation().getMemoryLong());
+ assertEquals(0*GB, node_0.getAllocatedResource().getMemoryLong());
}
private void verifyContainerAllocated(CSAssignment assignment, NodeType nodeType) {
@@ -2447,9 +2447,9 @@ public void testFifoAssignment() throws Exception {
// app_1 will get containers as it has high priority
a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- Assert.assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ Assert.assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
app_0_requests_0.clear();
app_0_requests_0.add(
@@ -2465,12 +2465,12 @@ public void testFifoAssignment() throws Exception {
//app_1 will still get assigned first as priority is more.
a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- Assert.assertEquals(2*GB, app_1.getCurrentConsumption().getMemory());
- Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ Assert.assertEquals(2*GB, app_1.getCurrentConsumption().getMemoryLong());
+ Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
//and only then will app_2
a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- Assert.assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
+ Assert.assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
}
@Test
public void testConcurrentAccess() throws Exception {
@@ -2594,9 +2594,9 @@ public void testFairAssignment() throws Exception {
// app_0 will get containers as its submitted first.
a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
+ Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- Assert.assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ Assert.assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
app_0_requests_0.clear();
app_0_requests_0.add(
@@ -2613,12 +2613,12 @@ public void testFairAssignment() throws Exception {
//Since it already has more resources, app_0 will not get
//assigned first, but app_1 will
a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- Assert.assertEquals(2*GB, app_1.getCurrentConsumption().getMemory());
+ Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ Assert.assertEquals(2*GB, app_1.getCurrentConsumption().getMemoryLong());
//and only then will app_0
a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- Assert.assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
+ Assert.assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
}
@@ -2773,10 +2773,10 @@ public void testGetTotalPendingResourcesConsideringUserLimitOneUser()
// all users (only user_0) queue 'e' should be able to consume 1GB.
// The first container should be assigned to app_0 with no headroom left
// even though user_0's apps are still asking for a total of 4GB.
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// Assign 2nd container of 1GB
e.assignContainers(clusterResource, node_0,
@@ -2786,19 +2786,19 @@ public void testGetTotalPendingResourcesConsideringUserLimitOneUser()
// scheduler will assign one container more than user-limit-factor.
// This container also went to app_0. Still with no neadroom even though
// app_0 and app_1 are asking for a cumulative 3GB.
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// Can't allocate 3rd container due to user-limit. Headroom still 0.
e.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// Increase user-limit-factor from 1GB to 10GB (1% * 10 * 100GB = 10GB).
// Pending for both app_0 and app_1 are still 3GB, so user-limit-factor
@@ -2806,16 +2806,16 @@ public void testGetTotalPendingResourcesConsideringUserLimitOneUser()
// getTotalPendingResourcesConsideringUserLimit()
e.setUserLimitFactor(10.0f);
assertEquals(3*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
e.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
// app_0 is now satisified, app_1 is still asking for 2GB.
- assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(2*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// Get the last 2 containers for app_1, no more pending requests.
e.assignContainers(clusterResource, node_0,
@@ -2824,10 +2824,10 @@ public void testGetTotalPendingResourcesConsideringUserLimitOneUser()
e.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource),
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(2*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(3*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(2*GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// Release each container from app_0
for (RMContainer rmContainer : app_0.getLiveContainers()) {
@@ -2929,14 +2929,14 @@ public void testGetTotalPendingResourcesConsideringUserLimitTwoUsers()
// With queue capacity set at 1% of 100GB and user-limit-factor set to 1.0,
// queue 'e' should be able to consume 1GB per user.
assertEquals(2*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// None of the apps have assigned resources
// user_0's apps:
- assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
// user_1's apps:
- assertEquals(0*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemoryLong());
// Assign 1st Container of 1GB
e.assignContainers(clusterResource, node_0,
@@ -2945,13 +2945,13 @@ public void testGetTotalPendingResourcesConsideringUserLimitTwoUsers()
// The first container was assigned to user_0's app_0. Queues total headroom
// has 1GB left for user_1.
assertEquals(1*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// user_0's apps:
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_1.getCurrentConsumption().getMemoryLong());
// user_1's apps:
- assertEquals(0*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemoryLong());
// Assign 2nd container of 1GB
e.assignContainers(clusterResource, node_0,
@@ -2962,13 +2962,13 @@ public void testGetTotalPendingResourcesConsideringUserLimitTwoUsers()
// this container went to user_0's app_1. so, headroom for queue 'e'e is
// still 1GB for user_1
assertEquals(1*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// user_0's apps:
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
// user_1's apps:
- assertEquals(0*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(0*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemoryLong());
// Assign 3rd container.
e.assignContainers(clusterResource, node_0,
@@ -2977,13 +2977,13 @@ public void testGetTotalPendingResourcesConsideringUserLimitTwoUsers()
// Container was allocated to user_1's app_2 since user_1, Now, no headroom
// is left.
assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// user_0's apps:
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_1.getCurrentConsumption().getMemoryLong());
// user_1's apps:
- assertEquals(1*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemoryLong());
// Assign 4th container.
e.assignContainers(clusterResource, node_0,
@@ -2992,16 +2992,16 @@ public void testGetTotalPendingResourcesConsideringUserLimitTwoUsers()
// Allocated to user_1's app_2 since scheduler allocates 1 container
// above user resource limit. Available headroom still 0.
assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// user_0's apps:
- int app_0_consumption = app_0.getCurrentConsumption().getMemory();
+ long app_0_consumption = app_0.getCurrentConsumption().getMemoryLong();
assertEquals(1*GB, app_0_consumption);
- int app_1_consumption = app_1.getCurrentConsumption().getMemory();
+ long app_1_consumption = app_1.getCurrentConsumption().getMemoryLong();
assertEquals(1*GB, app_1_consumption);
// user_1's apps:
- int app_2_consumption = app_2.getCurrentConsumption().getMemory();
+ long app_2_consumption = app_2.getCurrentConsumption().getMemoryLong();
assertEquals(2*GB, app_2_consumption);
- int app_3_consumption = app_3.getCurrentConsumption().getMemory();
+ long app_3_consumption = app_3.getCurrentConsumption().getMemoryLong();
assertEquals(0*GB, app_3_consumption);
// Attempt to assign 5th container. Will be a no-op.
@@ -3011,13 +3011,13 @@ public void testGetTotalPendingResourcesConsideringUserLimitTwoUsers()
// Cannot allocate 5th container because both users are above their allowed
// user resource limit. Values should be the same as previously.
assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// user_0's apps:
- assertEquals(app_0_consumption, app_0.getCurrentConsumption().getMemory());
- assertEquals(app_1_consumption, app_1.getCurrentConsumption().getMemory());
+ assertEquals(app_0_consumption, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(app_1_consumption, app_1.getCurrentConsumption().getMemoryLong());
// user_1's apps:
- assertEquals(app_2_consumption, app_2.getCurrentConsumption().getMemory());
- assertEquals(app_3_consumption, app_3.getCurrentConsumption().getMemory());
+ assertEquals(app_2_consumption, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(app_3_consumption, app_3.getCurrentConsumption().getMemoryLong());
// Increase user-limit-factor from 1GB to 10GB (1% * 10 * 100GB = 10GB).
// Pending for both user_0 and user_1 are still 1GB each, so user-limit-
@@ -3029,13 +3029,13 @@ public void testGetTotalPendingResourcesConsideringUserLimitTwoUsers()
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
// Next container goes to user_0's app_1, since it still wanted 1GB.
assertEquals(1*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
// user_0's apps:
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(2*GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(2*GB, app_1.getCurrentConsumption().getMemoryLong());
// user_1's apps:
- assertEquals(2*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(0*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(0*GB, app_3.getCurrentConsumption().getMemoryLong());
e.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource),
@@ -3043,12 +3043,12 @@ public void testGetTotalPendingResourcesConsideringUserLimitTwoUsers()
// Last container goes to user_1's app_3, since it still wanted 1GB.
// user_0's apps:
assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit(
- clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory());
- assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(2*GB, app_1.getCurrentConsumption().getMemory());
+ clusterResource, RMNodeLabelsManager.NO_LABEL).getMemoryLong());
+ assertEquals(1*GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(2*GB, app_1.getCurrentConsumption().getMemoryLong());
// user_1's apps:
- assertEquals(2*GB, app_2.getCurrentConsumption().getMemory());
- assertEquals(1*GB, app_3.getCurrentConsumption().getMemory());
+ assertEquals(2*GB, app_2.getCurrentConsumption().getMemoryLong());
+ assertEquals(1*GB, app_3.getCurrentConsumption().getMemoryLong());
// Release each container from app_0
for (RMContainer rmContainer : app_0.getLiveContainers()) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index bf389b0..b7487b7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -522,22 +522,22 @@ public RMNodeLabelsManager createNodeLabelManager() {
Assert.assertEquals(2, schedulerApp1.getLiveContainers().size());
Assert.assertTrue(schedulerApp1.getReservedContainers().size() > 0);
Assert.assertEquals(9 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getUsed("x").getMemory());
+ .getUsed("x").getMemoryLong());
Assert.assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getReserved("x").getMemory());
+ .getReserved("x").getMemoryLong());
Assert.assertEquals(4 * GB,
- leafQueue.getQueueResourceUsage().getReserved("x").getMemory());
+ leafQueue.getQueueResourceUsage().getReserved("x").getMemoryLong());
// Cancel asks of app2 and re-kick RM
am1.allocate("*", 4 * GB, 0, new ArrayList());
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
Assert.assertEquals(5 * GB, cs.getRootQueue().getQueueResourceUsage()
- .getUsed("x").getMemory());
+ .getUsed("x").getMemoryLong());
Assert.assertEquals(0, cs.getRootQueue().getQueueResourceUsage()
- .getReserved("x").getMemory());
+ .getReserved("x").getMemoryLong());
Assert.assertEquals(0, leafQueue.getQueueResourceUsage().getReserved("x")
- .getMemory());
+ .getMemoryLong());
rm1.close();
}
@@ -549,7 +549,7 @@ private void checkPendingResource(MockRM rm, int priority,
app.getAppSchedulingInfo().getResourceRequest(
Priority.newInstance(priority), "*");
Assert.assertEquals(memory,
- rr.getCapability().getMemory() * rr.getNumContainers());
+ rr.getCapability().getMemoryLong() * rr.getNumContainers());
}
private void checkLaunchedContainerNumOnNode(MockRM rm, NodeId nodeId,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
index 23dc860..2e7caf4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
@@ -174,14 +174,14 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable {
private float computeQueueAbsoluteUsedCapacity(CSQueue queue,
int expectedMemory, Resource clusterResource) {
return (
- ((float)expectedMemory / (float)clusterResource.getMemory())
+ ((float)expectedMemory / (float)clusterResource.getMemoryLong())
);
}
private float computeQueueUsedCapacity(CSQueue queue,
int expectedMemory, Resource clusterResource) {
return (expectedMemory /
- (clusterResource.getMemory() * queue.getAbsoluteCapacity()));
+ (clusterResource.getMemoryLong() * queue.getAbsoluteCapacity()));
}
final static float DELTA = 0.0001f;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueCapacities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueCapacities.java
index 356ed46..d6d1f45 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueCapacities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueCapacities.java
@@ -117,7 +117,7 @@ private void internalTestModifyAndRead(String label) throws Exception {
}
void check(int mem, int cpu, Resource res) {
- Assert.assertEquals(mem, res.getMemory());
+ Assert.assertEquals(mem, res.getMemoryLong());
Assert.assertEquals(cpu, res.getVirtualCores());
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
index 632b547..f101d25 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java
@@ -266,88 +266,88 @@ public void testReservation() throws Exception {
// Only AM
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2 * GB, a.getUsedResources().getMemory());
- assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(2 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
assertEquals(22 * GB, a.getMetrics().getAvailableMB());
- assertEquals(2 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
// Only 1 map - simulating reduce
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(5 * GB, a.getUsedResources().getMemory());
- assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(5 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(5 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
assertEquals(19 * GB, a.getMetrics().getAvailableMB());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
// Only 1 map to other node - simulating reduce
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(8 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(8 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(16 * GB, a.getMetrics().getAvailableMB());
- assertEquals(16 * GB, app_0.getHeadroom().getMemory());
+ assertEquals(16 * GB, app_0.getHeadroom().getMemoryLong());
assertEquals(null, node_0.getReservedContainer());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
assertEquals(2, app_0.getTotalRequiredResources(priorityReduce));
// try to assign reducer (5G on node 0 and should reserve)
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(13 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(13 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(5 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(11 * GB, a.getMetrics().getAvailableMB());
- assertEquals(11 * GB, app_0.getHeadroom().getMemory());
+ assertEquals(11 * GB, app_0.getHeadroom().getMemoryLong());
assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource()
- .getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ .getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
assertEquals(2, app_0.getTotalRequiredResources(priorityReduce));
// assign reducer to node 2
a.assignContainers(clusterResource, node_2,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(18 * GB, a.getUsedResources().getMemory());
- assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(18 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(13 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(5 * GB, a.getMetrics().getReservedMB());
assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
assertEquals(6 * GB, a.getMetrics().getAvailableMB());
- assertEquals(6 * GB, app_0.getHeadroom().getMemory());
+ assertEquals(6 * GB, app_0.getHeadroom().getMemoryLong());
assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource()
- .getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(5 * GB, node_2.getAllocatedResource().getMemory());
+ .getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(5 * GB, node_2.getAllocatedResource().getMemoryLong());
assertEquals(1, app_0.getTotalRequiredResources(priorityReduce));
// node_1 heartbeat and unreserves from node_0 in order to allocate
// on node_1
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(18 * GB, a.getUsedResources().getMemory());
- assertEquals(18 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(18 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(18 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(18 * GB, a.getMetrics().getAllocatedMB());
assertEquals(6 * GB, a.getMetrics().getAvailableMB());
- assertEquals(6 * GB, app_0.getHeadroom().getMemory());
+ assertEquals(6 * GB, app_0.getHeadroom().getMemoryLong());
assertEquals(null, node_0.getReservedContainer());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(8 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(5 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(8 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(5 * GB, node_2.getAllocatedResource().getMemoryLong());
assertEquals(0, app_0.getTotalRequiredResources(priorityReduce));
}
@@ -428,27 +428,27 @@ public void testReservationLimitOtherUsers() throws Exception {
// Only AM
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2 * GB, a.getUsedResources().getMemory());
- assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(0 * GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(2 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2 * GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(0 * GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
assertEquals(22 * GB, a.getMetrics().getAvailableMB());
- assertEquals(2 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(4 * GB, a.getUsedResources().getMemory());
- assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(2 * GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(4 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2 * GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(2 * GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(4 * GB, a.getMetrics().getAllocatedMB());
assertEquals(20 * GB, a.getMetrics().getAvailableMB());
- assertEquals(2 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(2 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(2 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
// Add a few requests to each app
app_0.updateResourceRequests(Collections.singletonList(TestUtils
@@ -461,29 +461,29 @@ public void testReservationLimitOtherUsers() throws Exception {
// add a reservation for app_0
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(12 * GB, a.getUsedResources().getMemory());
- assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(2 * GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(12 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2 * GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(2 * GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(8 * GB, a.getMetrics().getReservedMB());
assertEquals(4 * GB, a.getMetrics().getAllocatedMB());
assertEquals(12 * GB, a.getMetrics().getAvailableMB());
- assertEquals(2 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(2 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(2 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
// next assignment is beyond user limit for user_0 but it should assign to
// app_1 for user_1
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(14 * GB, a.getUsedResources().getMemory());
- assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(4 * GB, app_1.getCurrentConsumption().getMemory());
+ assertEquals(14 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2 * GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(4 * GB, app_1.getCurrentConsumption().getMemoryLong());
assertEquals(8 * GB, a.getMetrics().getReservedMB());
assertEquals(6 * GB, a.getMetrics().getAllocatedMB());
assertEquals(10 * GB, a.getMetrics().getAvailableMB());
- assertEquals(2 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(4 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(4 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
}
@Test
@@ -563,89 +563,89 @@ public void testReservationNoContinueLook() throws Exception {
// Only AM
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2 * GB, a.getUsedResources().getMemory());
- assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(2 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
assertEquals(22 * GB, a.getMetrics().getAvailableMB());
- assertEquals(2 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
// Only 1 map - simulating reduce
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(5 * GB, a.getUsedResources().getMemory());
- assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(5 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(5 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
assertEquals(19 * GB, a.getMetrics().getAvailableMB());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
// Only 1 map to other node - simulating reduce
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(8 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(8 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(16 * GB, a.getMetrics().getAvailableMB());
- assertEquals(16 * GB, app_0.getHeadroom().getMemory());
+ assertEquals(16 * GB, app_0.getHeadroom().getMemoryLong());
assertEquals(null, node_0.getReservedContainer());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
assertEquals(2, app_0.getTotalRequiredResources(priorityReduce));
// try to assign reducer (5G on node 0 and should reserve)
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(13 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(13 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(5 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(11 * GB, a.getMetrics().getAvailableMB());
- assertEquals(11 * GB, app_0.getHeadroom().getMemory());
+ assertEquals(11 * GB, app_0.getHeadroom().getMemoryLong());
assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource()
- .getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ .getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
assertEquals(2, app_0.getTotalRequiredResources(priorityReduce));
// assign reducer to node 2
a.assignContainers(clusterResource, node_2,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(18 * GB, a.getUsedResources().getMemory());
- assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(18 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(13 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(5 * GB, a.getMetrics().getReservedMB());
assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
assertEquals(6 * GB, a.getMetrics().getAvailableMB());
- assertEquals(6 * GB, app_0.getHeadroom().getMemory());
+ assertEquals(6 * GB, app_0.getHeadroom().getMemoryLong());
assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource()
- .getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(5 * GB, node_2.getAllocatedResource().getMemory());
+ .getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(5 * GB, node_2.getAllocatedResource().getMemoryLong());
assertEquals(1, app_0.getTotalRequiredResources(priorityReduce));
// node_1 heartbeat and won't unreserve from node_0, potentially stuck
// if AM doesn't handle
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(18 * GB, a.getUsedResources().getMemory());
- assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(18 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(13 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(5 * GB, a.getMetrics().getReservedMB());
assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
assertEquals(6 * GB, a.getMetrics().getAvailableMB());
- assertEquals(6 * GB, app_0.getHeadroom().getMemory());
+ assertEquals(6 * GB, app_0.getHeadroom().getMemoryLong());
assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource()
- .getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(5 * GB, node_2.getAllocatedResource().getMemory());
+ .getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(5 * GB, node_2.getAllocatedResource().getMemoryLong());
assertEquals(1, app_0.getTotalRequiredResources(priorityReduce));
}
@@ -723,66 +723,66 @@ public void testAssignContainersNeedToUnreserve() throws Exception {
// Only AM
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2 * GB, a.getUsedResources().getMemory());
- assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(2 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
assertEquals(14 * GB, a.getMetrics().getAvailableMB());
- assertEquals(2 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
// Only 1 map - simulating reduce
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(5 * GB, a.getUsedResources().getMemory());
- assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(5 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(5 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
assertEquals(11 * GB, a.getMetrics().getAvailableMB());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
// Only 1 map to other node - simulating reduce
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(8 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(8 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(8 * GB, a.getMetrics().getAvailableMB());
- assertEquals(8 * GB, app_0.getHeadroom().getMemory());
+ assertEquals(8 * GB, app_0.getHeadroom().getMemoryLong());
assertEquals(null, node_0.getReservedContainer());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
assertEquals(2, app_0.getTotalRequiredResources(priorityReduce));
// try to assign reducer (5G on node 0 and should reserve)
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(13 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(13 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(5 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(3 * GB, a.getMetrics().getAvailableMB());
- assertEquals(3 * GB, app_0.getHeadroom().getMemory());
+ assertEquals(3 * GB, app_0.getHeadroom().getMemoryLong());
assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource()
- .getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
+ .getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
assertEquals(2, app_0.getTotalRequiredResources(priorityReduce));
// could allocate but told need to unreserve first
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(13 * GB, a.getUsedResources().getMemory());
- assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(13 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(13 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
assertEquals(3 * GB, a.getMetrics().getAvailableMB());
- assertEquals(3 * GB, app_0.getHeadroom().getMemory());
+ assertEquals(3 * GB, app_0.getHeadroom().getMemoryLong());
assertEquals(null, node_0.getReservedContainer());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(8 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(8 * GB, node_1.getAllocatedResource().getMemoryLong());
assertEquals(1, app_0.getTotalRequiredResources(priorityReduce));
}
@@ -986,50 +986,50 @@ public void testAssignToQueue() throws Exception {
// Only AM
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2 * GB, a.getUsedResources().getMemory());
- assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(2 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
assertEquals(14 * GB, a.getMetrics().getAvailableMB());
- assertEquals(2 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
// Only 1 map - simulating reduce
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(5 * GB, a.getUsedResources().getMemory());
- assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(5 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(5 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
assertEquals(11 * GB, a.getMetrics().getAvailableMB());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
// Only 1 map to other node - simulating reduce
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(8 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(8 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(8 * GB, a.getMetrics().getAvailableMB());
assertEquals(null, node_0.getReservedContainer());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
// now add in reservations and make sure it continues if config set
// allocate to queue so that the potential new capacity is greater then
// absoluteMaxCapacity
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(13 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(13 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(5 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(3 * GB, a.getMetrics().getAvailableMB());
- assertEquals(3 * GB, app_0.getHeadroom().getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(3 * GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
ResourceLimits limits =
new ResourceLimits(Resources.createResource(13 * GB));
@@ -1042,7 +1042,7 @@ public void testAssignToQueue() throws Exception {
// 16GB total, 13GB consumed (8 allocated, 5 reserved). asking for 5GB so we would have to
// unreserve 2GB to get the total 5GB needed.
// also note vcore checks not enabled
- assertEquals(0, limits.getHeadroom().getMemory());
+ assertEquals(0, limits.getHeadroom().getMemoryLong());
refreshQueuesTurnOffReservationsContLook(a, csConf);
@@ -1160,52 +1160,52 @@ public void testAssignToUser() throws Exception {
// Only AM
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2 * GB, a.getUsedResources().getMemory());
- assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(2 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
assertEquals(14 * GB, a.getMetrics().getAvailableMB());
- assertEquals(2 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
// Only 1 map - simulating reduce
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(5 * GB, a.getUsedResources().getMemory());
- assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(5 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(5 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
assertEquals(11 * GB, a.getMetrics().getAvailableMB());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
// Only 1 map to other node - simulating reduce
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(8 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(8 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(8 * GB, a.getMetrics().getAvailableMB());
assertEquals(null, node_0.getReservedContainer());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
// now add in reservations and make sure it continues if config set
// allocate to queue so that the potential new capacity is greater then
// absoluteMaxCapacity
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(13 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
- assertEquals(5 * GB, app_0.getCurrentReservation().getMemory());
+ assertEquals(13 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
+ assertEquals(5 * GB, app_0.getCurrentReservation().getMemoryLong());
assertEquals(5 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(3 * GB, a.getMetrics().getAvailableMB());
- assertEquals(3 * GB, app_0.getHeadroom().getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
+ assertEquals(3 * GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
// not over the limit
Resource limit = Resources.createResource(14 * GB, 0);
@@ -1312,39 +1312,39 @@ public void testReservationsNoneAvailable() throws Exception {
// Only AM
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(2 * GB, a.getUsedResources().getMemory());
- assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(2 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(2 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(2 * GB, a.getMetrics().getAllocatedMB());
assertEquals(22 * GB, a.getMetrics().getAvailableMB());
- assertEquals(2 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
// Only 1 map - simulating reduce
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(5 * GB, a.getUsedResources().getMemory());
- assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(5 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(5 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(5 * GB, a.getMetrics().getAllocatedMB());
assertEquals(19 * GB, a.getMetrics().getAvailableMB());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
// Only 1 map to other node - simulating reduce
a.assignContainers(clusterResource, node_1,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(8 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(8 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(16 * GB, a.getMetrics().getAvailableMB());
- assertEquals(16 * GB, app_0.getHeadroom().getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(16 * GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
// try to assign reducer (5G on node 0), but tell it's resource limits <
// used (8G) + required (5G). It will not reserved since it has to unreserve
@@ -1352,72 +1352,72 @@ public void testReservationsNoneAvailable() throws Exception {
// unreserve resource to reserve container.
a.assignContainers(clusterResource, node_0,
new ResourceLimits(Resources.createResource(10 * GB)), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(8 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(8 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(16 * GB, a.getMetrics().getAvailableMB());
// app_0's headroom = limit (10G) - used (8G) = 2G
- assertEquals(2 * GB, app_0.getHeadroom().getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
// try to assign reducer (5G on node 0), but tell it's resource limits <
// used (8G) + required (5G). It will not reserved since it has to unreserve
// some resource. Unfortunately, there's nothing to unreserve.
a.assignContainers(clusterResource, node_2,
new ResourceLimits(Resources.createResource(10 * GB)), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(8 * GB, a.getUsedResources().getMemory());
- assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(8 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(8 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(8 * GB, a.getMetrics().getAllocatedMB());
assertEquals(16 * GB, a.getMetrics().getAvailableMB());
// app_0's headroom = limit (10G) - used (8G) = 2G
- assertEquals(2 * GB, app_0.getHeadroom().getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(0 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(2 * GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(0 * GB, node_2.getAllocatedResource().getMemoryLong());
// let it assign 5G to node_2
a.assignContainers(clusterResource, node_2,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(13 * GB, a.getUsedResources().getMemory());
- assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(13 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(13 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(0 * GB, a.getMetrics().getReservedMB());
assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
assertEquals(11 * GB, a.getMetrics().getAvailableMB());
- assertEquals(11 * GB, app_0.getHeadroom().getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(5 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(11 * GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(5 * GB, node_2.getAllocatedResource().getMemoryLong());
// reserve 8G node_0
a.assignContainers(clusterResource, node_0,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(21 * GB, a.getUsedResources().getMemory());
- assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(21 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(13 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(8 * GB, a.getMetrics().getReservedMB());
assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
assertEquals(3 * GB, a.getMetrics().getAvailableMB());
- assertEquals(3 * GB, app_0.getHeadroom().getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(5 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(3 * GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(5 * GB, node_2.getAllocatedResource().getMemoryLong());
// try to assign (8G on node 2). No room to allocate,
// continued to try due to having reservation above,
// but hits queue limits so can't reserve anymore.
a.assignContainers(clusterResource, node_2,
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
- assertEquals(21 * GB, a.getUsedResources().getMemory());
- assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory());
+ assertEquals(21 * GB, a.getUsedResources().getMemoryLong());
+ assertEquals(13 * GB, app_0.getCurrentConsumption().getMemoryLong());
assertEquals(8 * GB, a.getMetrics().getReservedMB());
assertEquals(13 * GB, a.getMetrics().getAllocatedMB());
assertEquals(3 * GB, a.getMetrics().getAvailableMB());
- assertEquals(3 * GB, app_0.getHeadroom().getMemory());
- assertEquals(5 * GB, node_0.getAllocatedResource().getMemory());
- assertEquals(3 * GB, node_1.getAllocatedResource().getMemory());
- assertEquals(5 * GB, node_2.getAllocatedResource().getMemory());
+ assertEquals(3 * GB, app_0.getHeadroom().getMemoryLong());
+ assertEquals(5 * GB, node_0.getAllocatedResource().getMemoryLong());
+ assertEquals(3 * GB, node_1.getAllocatedResource().getMemoryLong());
+ assertEquals(5 * GB, node_2.getAllocatedResource().getMemoryLong());
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java
index eeec940..6a7bcc1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java
@@ -112,14 +112,14 @@ private void checkAppResourceUsage(String partition, ApplicationId appId,
FiCaSchedulerApp app =
cs.getSchedulerApplications().get(appId).getCurrentAppAttempt();
Assert.assertEquals(expectedMemUsage, app.getAppAttemptResourceUsage()
- .getUsed(partition).getMemory());
+ .getUsed(partition).getMemoryLong());
}
private void checkQueueResourceUsage(String partition, String queueName, MockRM rm, int expectedMemUsage) {
CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
CSQueue queue = cs.getQueue(queueName);
Assert.assertEquals(expectedMemUsage, queue.getQueueResourceUsage()
- .getUsed(partition).getMemory());
+ .getUsed(partition).getMemoryLong());
}
@Test
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
index 0e1d904..f389539 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
@@ -275,8 +275,8 @@ protected void checkAppConsumption(FSAppAttempt app, Resource resource)
}
// available resource
- Assert.assertEquals(resource.getMemory(),
- app.getCurrentConsumption().getMemory());
+ Assert.assertEquals(resource.getMemoryLong(),
+ app.getCurrentConsumption().getMemoryLong());
Assert.assertEquals(resource.getVirtualCores(),
app.getCurrentConsumption().getVirtualCores());
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java
index 9d8dd07..13ade81 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java
@@ -196,7 +196,7 @@ public void testCPU() {
private void verifyMemoryShares(int... shares) {
Assert.assertEquals(scheds.size(), shares.length);
for (int i = 0; i < shares.length; i++) {
- Assert.assertEquals(shares[i], scheds.get(i).getFairShare().getMemory());
+ Assert.assertEquals(shares[i], scheds.get(i).getFairShare().getMemoryLong());
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
index 2e7b3f8..667baf0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
@@ -125,7 +125,7 @@ public void testSortedNodes() throws Exception {
scheduler.handle(nodeEvent2);
// available resource
- Assert.assertEquals(scheduler.getClusterResource().getMemory(), 16 * 1024);
+ Assert.assertEquals(scheduler.getClusterResource().getMemoryLong(), 16 * 1024);
Assert.assertEquals(scheduler.getClusterResource().getVirtualCores(), 16);
// send application request
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java
index e733b1c..8d656a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java
@@ -238,9 +238,9 @@ public void testHeadroom() {
Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
.getInstance(DominantResourceFairnessPolicy.class));
verifyHeadroom(schedulerApp,
- min(queueStarvation.getMemory(),
- clusterAvailable.getMemory(),
- queueMaxResourcesAvailable.getMemory()),
+ min(queueStarvation.getMemoryLong(),
+ clusterAvailable.getMemoryLong(),
+ queueMaxResourcesAvailable.getMemoryLong()),
min(queueStarvation.getVirtualCores(),
clusterAvailable.getVirtualCores(),
queueMaxResourcesAvailable.getVirtualCores())
@@ -250,9 +250,9 @@ public void testHeadroom() {
Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
.getInstance(FairSharePolicy.class));
verifyHeadroom(schedulerApp,
- min(queueStarvation.getMemory(),
- clusterAvailable.getMemory(),
- queueMaxResourcesAvailable.getMemory()),
+ min(queueStarvation.getMemoryLong(),
+ clusterAvailable.getMemoryLong(),
+ queueMaxResourcesAvailable.getMemoryLong()),
Math.min(
clusterAvailable.getVirtualCores(),
queueMaxResourcesAvailable.getVirtualCores())
@@ -261,9 +261,9 @@ public void testHeadroom() {
Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy
.getInstance(FifoPolicy.class));
verifyHeadroom(schedulerApp,
- min(queueStarvation.getMemory(),
- clusterAvailable.getMemory(),
- queueMaxResourcesAvailable.getMemory()),
+ min(queueStarvation.getMemoryLong(),
+ clusterAvailable.getMemoryLong(),
+ queueMaxResourcesAvailable.getMemoryLong()),
Math.min(
clusterAvailable.getVirtualCores(),
queueMaxResourcesAvailable.getVirtualCores())
@@ -288,9 +288,9 @@ public void testHeadroomWithBlackListedNodes() {
Resource clusterResource = scheduler.getClusterResource();
Resource clusterUsage = scheduler.getRootQueueMetrics()
.getAllocatedResources();
- assertEquals(12 * 1024, clusterResource.getMemory());
+ assertEquals(12 * 1024, clusterResource.getMemoryLong());
assertEquals(12, clusterResource.getVirtualCores());
- assertEquals(0, clusterUsage.getMemory());
+ assertEquals(0, clusterUsage.getMemoryLong());
assertEquals(0, clusterUsage.getVirtualCores());
ApplicationAttemptId id11 = createAppAttemptId(1, 1);
createMockRMApp(id11);
@@ -302,7 +302,7 @@ public void testHeadroomWithBlackListedNodes() {
FSAppAttempt app = scheduler.getSchedulerApp(id11);
assertNotNull(app);
Resource queueUsage = app.getQueue().getResourceUsage();
- assertEquals(0, queueUsage.getMemory());
+ assertEquals(0, queueUsage.getMemoryLong());
assertEquals(0, queueUsage.getVirtualCores());
SchedulerNode n1 = scheduler.getSchedulerNode(node1.getNodeID());
SchedulerNode n2 = scheduler.getSchedulerNode(node2.getNodeID());
@@ -337,14 +337,14 @@ public void testHeadroomWithBlackListedNodes() {
assertEquals(clusterResource, spyApp.getHeadroom());
}
- private static int min(int value1, int value2, int value3) {
+ private static long min(long value1, long value2, long value3) {
return Math.min(Math.min(value1, value2), value3);
}
protected void verifyHeadroom(FSAppAttempt schedulerApp,
- int expectedMemory, int expectedCPU) {
+ long expectedMemory, long expectedCPU) {
Resource headroom = schedulerApp.getHeadroom();
- assertEquals(expectedMemory, headroom.getMemory());
+ assertEquals(expectedMemory, headroom.getMemoryLong());
assertEquals(expectedCPU, headroom.getVirtualCores());
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
index 7daccad..f5c6b77 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
@@ -199,7 +199,7 @@ public void testIsStarvedForFairShare() throws Exception {
QueueManager queueMgr = scheduler.getQueueManager();
FSLeafQueue queueA = queueMgr.getLeafQueue("queueA", false);
- assertEquals(4 * 1024, queueA.getResourceUsage().getMemory());
+ assertEquals(4 * 1024, queueA.getResourceUsage().getMemoryLong());
// Both queue B1 and queue B2 want 3 * 1024
createSchedulingRequest(1 * 1024, "queueB.queueB1", "user1", 3);
@@ -211,8 +211,8 @@ public void testIsStarvedForFairShare() throws Exception {
FSLeafQueue queueB1 = queueMgr.getLeafQueue("queueB.queueB1", false);
FSLeafQueue queueB2 = queueMgr.getLeafQueue("queueB.queueB2", false);
- assertEquals(2 * 1024, queueB1.getResourceUsage().getMemory());
- assertEquals(2 * 1024, queueB2.getResourceUsage().getMemory());
+ assertEquals(2 * 1024, queueB1.getResourceUsage().getMemoryLong());
+ assertEquals(2 * 1024, queueB2.getResourceUsage().getMemoryLong());
// For queue B1, the fairSharePreemptionThreshold is 0.4, and the fair share
// threshold is 1.6 * 1024
@@ -225,8 +225,8 @@ public void testIsStarvedForFairShare() throws Exception {
// Node checks in again
scheduler.handle(nodeEvent2);
scheduler.handle(nodeEvent2);
- assertEquals(3 * 1024, queueB1.getResourceUsage().getMemory());
- assertEquals(3 * 1024, queueB2.getResourceUsage().getMemory());
+ assertEquals(3 * 1024, queueB1.getResourceUsage().getMemoryLong());
+ assertEquals(3 * 1024, queueB2.getResourceUsage().getMemoryLong());
// Both queue B1 and queue B2 usages go to 3 * 1024
assertFalse(queueB1.isStarvedForFairShare());
@@ -271,7 +271,7 @@ public void testIsStarvedForFairShareDRF() throws Exception {
QueueManager queueMgr = scheduler.getQueueManager();
FSLeafQueue queueA = queueMgr.getLeafQueue("queueA", false);
- assertEquals(7 * 1024, queueA.getResourceUsage().getMemory());
+ assertEquals(7 * 1024, queueA.getResourceUsage().getMemoryLong());
assertEquals(1, queueA.getResourceUsage().getVirtualCores());
// Queue B has 3 reqs :
@@ -286,7 +286,7 @@ public void testIsStarvedForFairShareDRF() throws Exception {
}
FSLeafQueue queueB = queueMgr.getLeafQueue("queueB", false);
- assertEquals(3 * 1024, queueB.getResourceUsage().getMemory());
+ assertEquals(3 * 1024, queueB.getResourceUsage().getMemoryLong());
assertEquals(6, queueB.getResourceUsage().getVirtualCores());
scheduler.update();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index a75b5ce..0fdaaca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -222,10 +222,10 @@ public void testLoadConfigurationOnInitialize() throws IOException {
Assert.assertEquals(10, scheduler.continuousSchedulingSleepMs);
Assert.assertEquals(5000, scheduler.nodeLocalityDelayMs);
Assert.assertEquals(5000, scheduler.rackLocalityDelayMs);
- Assert.assertEquals(1024, scheduler.getMaximumResourceCapability().getMemory());
- Assert.assertEquals(512, scheduler.getMinimumResourceCapability().getMemory());
+ Assert.assertEquals(1024, scheduler.getMaximumResourceCapability().getMemoryLong());
+ Assert.assertEquals(512, scheduler.getMinimumResourceCapability().getMemoryLong());
Assert.assertEquals(128,
- scheduler.getIncrementResourceCapability().getMemory());
+ scheduler.getIncrementResourceCapability().getMemoryLong());
}
@Test
@@ -240,9 +240,9 @@ public void testNonMinZeroResourcesSettings() throws IOException {
FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2);
scheduler.init(conf);
scheduler.reinitialize(conf, null);
- Assert.assertEquals(256, scheduler.getMinimumResourceCapability().getMemory());
+ Assert.assertEquals(256, scheduler.getMinimumResourceCapability().getMemoryLong());
Assert.assertEquals(1, scheduler.getMinimumResourceCapability().getVirtualCores());
- Assert.assertEquals(512, scheduler.getIncrementResourceCapability().getMemory());
+ Assert.assertEquals(512, scheduler.getIncrementResourceCapability().getMemoryLong());
Assert.assertEquals(2, scheduler.getIncrementResourceCapability().getVirtualCores());
}
@@ -258,9 +258,9 @@ public void testMinZeroResourcesSettings() throws IOException {
FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2);
scheduler.init(conf);
scheduler.reinitialize(conf, null);
- Assert.assertEquals(0, scheduler.getMinimumResourceCapability().getMemory());
+ Assert.assertEquals(0, scheduler.getMinimumResourceCapability().getMemoryLong());
Assert.assertEquals(0, scheduler.getMinimumResourceCapability().getVirtualCores());
- Assert.assertEquals(512, scheduler.getIncrementResourceCapability().getMemory());
+ Assert.assertEquals(512, scheduler.getIncrementResourceCapability().getMemoryLong());
Assert.assertEquals(2, scheduler.getIncrementResourceCapability().getVirtualCores());
}
@@ -276,19 +276,19 @@ public void testAggregateCapacityTracking() throws Exception {
.newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
- assertEquals(1024, scheduler.getClusterResource().getMemory());
+ assertEquals(1024, scheduler.getClusterResource().getMemoryLong());
// Add another node
RMNode node2 =
MockNodes.newNodeInfo(1, Resources.createResource(512), 2, "127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
- assertEquals(1536, scheduler.getClusterResource().getMemory());
+ assertEquals(1536, scheduler.getClusterResource().getMemoryLong());
// Remove the first node
NodeRemovedSchedulerEvent nodeEvent3 = new NodeRemovedSchedulerEvent(node1);
scheduler.handle(nodeEvent3);
- assertEquals(512, scheduler.getClusterResource().getMemory());
+ assertEquals(512, scheduler.getClusterResource().getMemoryLong());
}
@Test
@@ -319,9 +319,9 @@ public void testSimpleFairShareCalculation() throws IOException {
// Divided three ways - between the two queues and the default queue
for (FSLeafQueue p : queues) {
- assertEquals(3414, p.getFairShare().getMemory());
+ assertEquals(3414, p.getFairShare().getMemoryLong());
assertEquals(3414, p.getMetrics().getFairShareMB());
- assertEquals(3414, p.getSteadyFairShare().getMemory());
+ assertEquals(3414, p.getSteadyFairShare().getMemoryLong());
assertEquals(3414, p.getMetrics().getSteadyFairShareMB());
}
}
@@ -367,11 +367,11 @@ public void testFairShareWithMaxResources() throws IOException {
FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue(
"queueA", false);
// queueA's weight is 0.25, so its fair share should be 2 * 1024.
- assertEquals(2 * 1024, queue.getFairShare().getMemory());
+ assertEquals(2 * 1024, queue.getFairShare().getMemoryLong());
// queueB's weight is 0.75, so its fair share should be 6 * 1024.
queue = scheduler.getQueueManager().getLeafQueue(
"queueB", false);
- assertEquals(6 * 1024, queue.getFairShare().getMemory());
+ assertEquals(6 * 1024, queue.getFairShare().getMemoryLong());
}
@Test
@@ -411,11 +411,11 @@ public void testFairShareWithZeroWeight() throws IOException {
FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue(
"queueA", false);
// queueA's weight is 0.0, so its fair share should be 0.
- assertEquals(0, queue.getFairShare().getMemory());
+ assertEquals(0, queue.getFairShare().getMemoryLong());
// queueB's weight is 0.0, so its fair share should be 0.
queue = scheduler.getQueueManager().getLeafQueue(
"queueB", false);
- assertEquals(0, queue.getFairShare().getMemory());
+ assertEquals(0, queue.getFairShare().getMemoryLong());
}
@Test
@@ -459,12 +459,12 @@ public void testFairShareWithZeroWeightNoneZeroMinRes() throws IOException {
"queueA", false);
// queueA's weight is 0.0 and minResources is 1,
// so its fair share should be 1 (minShare).
- assertEquals(1, queue.getFairShare().getMemory());
+ assertEquals(1, queue.getFairShare().getMemoryLong());
// queueB's weight is 0.0 and minResources is 1,
// so its fair share should be 1 (minShare).
queue = scheduler.getQueueManager().getLeafQueue(
"queueB", false);
- assertEquals(1, queue.getFairShare().getMemory());
+ assertEquals(1, queue.getFairShare().getMemoryLong());
}
@Test
@@ -509,12 +509,12 @@ public void testFairShareWithNoneZeroWeightNoneZeroMinRes()
"queueA", false);
// queueA's weight is 0.5 and minResources is 1024,
// so its fair share should be 4096.
- assertEquals(4096, queue.getFairShare().getMemory());
+ assertEquals(4096, queue.getFairShare().getMemoryLong());
// queueB's weight is 0.5 and minResources is 1024,
// so its fair share should be 4096.
queue = scheduler.getQueueManager().getLeafQueue(
"queueB", false);
- assertEquals(4096, queue.getFairShare().getMemory());
+ assertEquals(4096, queue.getFairShare().getMemoryLong());
}
@Test
@@ -608,17 +608,17 @@ public void testSimpleHierarchicalFairShareCalculation() throws IOException {
FSLeafQueue queue1 = queueManager.getLeafQueue("default", true);
FSLeafQueue queue2 = queueManager.getLeafQueue("parent.queue2", true);
FSLeafQueue queue3 = queueManager.getLeafQueue("parent.queue3", true);
- assertEquals(capacity / 2, queue1.getFairShare().getMemory());
+ assertEquals(capacity / 2, queue1.getFairShare().getMemoryLong());
assertEquals(capacity / 2, queue1.getMetrics().getFairShareMB());
- assertEquals(capacity / 2, queue1.getSteadyFairShare().getMemory());
+ assertEquals(capacity / 2, queue1.getSteadyFairShare().getMemoryLong());
assertEquals(capacity / 2, queue1.getMetrics().getSteadyFairShareMB());
- assertEquals(capacity / 4, queue2.getFairShare().getMemory());
+ assertEquals(capacity / 4, queue2.getFairShare().getMemoryLong());
assertEquals(capacity / 4, queue2.getMetrics().getFairShareMB());
- assertEquals(capacity / 4, queue2.getSteadyFairShare().getMemory());
+ assertEquals(capacity / 4, queue2.getSteadyFairShare().getMemoryLong());
assertEquals(capacity / 4, queue2.getMetrics().getSteadyFairShareMB());
- assertEquals(capacity / 4, queue3.getFairShare().getMemory());
+ assertEquals(capacity / 4, queue3.getFairShare().getMemoryLong());
assertEquals(capacity / 4, queue3.getMetrics().getFairShareMB());
- assertEquals(capacity / 4, queue3.getSteadyFairShare().getMemory());
+ assertEquals(capacity / 4, queue3.getSteadyFairShare().getMemoryLong());
assertEquals(capacity / 4, queue3.getMetrics().getSteadyFairShareMB());
}
@@ -719,13 +719,13 @@ public void testSimpleContainerAllocation() throws IOException {
assertEquals(
FairSchedulerConfiguration.DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB,
scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2);
scheduler.handle(updateEvent2);
assertEquals(1024, scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
assertEquals(2, scheduler.getQueueManager().getQueue("queue1").
getResourceUsage().getVirtualCores());
@@ -762,7 +762,7 @@ public void testSimpleContainerReservation() throws Exception {
// Make sure queue 1 is allocated app capacity
assertEquals(1024, scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
// Now queue 2 requests likewise
ApplicationAttemptId attId = createSchedulingRequest(1024, "queue2", "user1", 1);
@@ -772,8 +772,8 @@ public void testSimpleContainerReservation() throws Exception {
// Make sure queue 2 is waiting with a reservation
assertEquals(0, scheduler.getQueueManager().getQueue("queue2").
- getResourceUsage().getMemory());
- assertEquals(1024, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemory());
+ getResourceUsage().getMemoryLong());
+ assertEquals(1024, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemoryLong());
// Now another node checks in with capacity
RMNode node2 =
@@ -786,13 +786,13 @@ public void testSimpleContainerReservation() throws Exception {
// Make sure this goes to queue 2
assertEquals(1024, scheduler.getQueueManager().getQueue("queue2").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
// The old reservation should still be there...
- assertEquals(1024, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemory());
+ assertEquals(1024, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemoryLong());
// ... but it should disappear when we update the first node.
scheduler.handle(updateEvent);
- assertEquals(0, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemory());
+ assertEquals(0, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemoryLong());
}
@@ -836,7 +836,7 @@ public void testOffSwitchAppReservationThreshold() throws Exception {
// Verify capacity allocation
assertEquals(6144, scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
// Create new app with a resource request that can be satisfied by any
// node but would be
@@ -868,7 +868,7 @@ public void testOffSwitchAppReservationThreshold() throws Exception {
scheduler.update();
scheduler.handle(new NodeUpdateSchedulerEvent(node4));
assertEquals(8192, scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
scheduler.handle(new NodeUpdateSchedulerEvent(node1));
scheduler.handle(new NodeUpdateSchedulerEvent(node2));
@@ -929,7 +929,7 @@ public void testRackLocalAppReservationThreshold() throws Exception {
// Verify capacity allocation
assertEquals(8192, scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
// Create new app with a resource request that can be satisfied by any
// node but would be
@@ -974,7 +974,7 @@ public void testRackLocalAppReservationThreshold() throws Exception {
scheduler.update();
scheduler.handle(new NodeUpdateSchedulerEvent(node4));
assertEquals(10240, scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
scheduler.handle(new NodeUpdateSchedulerEvent(node1));
scheduler.handle(new NodeUpdateSchedulerEvent(node2));
@@ -1017,7 +1017,7 @@ public void testReservationThresholdWithAssignMultiple() throws Exception {
// Verify capacity allocation
assertEquals(8192, scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
// Verify number of reservations have decremented
assertEquals(0,
@@ -1061,7 +1061,7 @@ public void testContainerReservationAttemptExceedingQueueMax()
// Make sure queue 1 is allocated app capacity
assertEquals(2048, scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
// Now queue 2 requests likewise
createSchedulingRequest(1024, "queue2", "user2", 1);
@@ -1070,7 +1070,7 @@ public void testContainerReservationAttemptExceedingQueueMax()
// Make sure queue 2 is allocated app capacity
assertEquals(1024, scheduler.getQueueManager().getQueue("queue2").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 1);
scheduler.update();
@@ -1079,7 +1079,7 @@ public void testContainerReservationAttemptExceedingQueueMax()
// Ensure the reservation does not get created as allocated memory of
// queue1 exceeds max
assertEquals(0, scheduler.getSchedulerApp(attId1).
- getCurrentReservation().getMemory());
+ getCurrentReservation().getMemoryLong());
}
@Test (timeout = 500000)
@@ -1118,7 +1118,7 @@ public void testContainerReservationNotExceedingQueueMax() throws Exception {
// Make sure queue 1 is allocated app capacity
assertEquals(2048, scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
// Now queue 2 requests likewise
createSchedulingRequest(1024, "queue2", "user2", 1);
@@ -1127,7 +1127,7 @@ public void testContainerReservationNotExceedingQueueMax() throws Exception {
// Make sure queue 2 is allocated app capacity
assertEquals(1024, scheduler.getQueueManager().getQueue("queue2").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 1);
scheduler.update();
@@ -1135,7 +1135,7 @@ public void testContainerReservationNotExceedingQueueMax() throws Exception {
// Make sure queue 1 is waiting with a reservation
assertEquals(1024, scheduler.getSchedulerApp(attId1)
- .getCurrentReservation().getMemory());
+ .getCurrentReservation().getMemoryLong());
// Exercise checks that reservation fits
scheduler.handle(updateEvent);
@@ -1143,7 +1143,7 @@ public void testContainerReservationNotExceedingQueueMax() throws Exception {
// Ensure the reservation still exists as allocated memory of queue1 doesn't
// exceed max
assertEquals(1024, scheduler.getSchedulerApp(attId1).
- getCurrentReservation().getMemory());
+ getCurrentReservation().getMemoryLong());
// Now reduce max Resources of queue1 down to 2048
out = new PrintWriter(new FileWriter(ALLOC_FILE));
@@ -1167,12 +1167,12 @@ public void testContainerReservationNotExceedingQueueMax() throws Exception {
// Make sure allocated memory of queue1 doesn't exceed its maximum
assertEquals(2048, scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
//the reservation of queue1 should be reclaim
assertEquals(0, scheduler.getSchedulerApp(attId1).
- getCurrentReservation().getMemory());
+ getCurrentReservation().getMemoryLong());
assertEquals(1024, scheduler.getQueueManager().getQueue("queue2").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
}
@Test
@@ -1212,7 +1212,7 @@ public void testReservationThresholdGatesReservations() throws Exception {
// Make sure queue 1 is allocated app capacity
assertEquals(4096, scheduler.getQueueManager().getQueue("queue1").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
// Now queue 2 requests below threshold
ApplicationAttemptId attId = createSchedulingRequest(1024, "queue2", "user1", 1);
@@ -1221,7 +1221,7 @@ public void testReservationThresholdGatesReservations() throws Exception {
// Make sure queue 2 has no reservation
assertEquals(0, scheduler.getQueueManager().getQueue("queue2").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
assertEquals(0,
scheduler.getSchedulerApp(attId).getReservedContainers().size());
@@ -1232,7 +1232,7 @@ public void testReservationThresholdGatesReservations() throws Exception {
// Make sure queue 2 is waiting with a reservation
assertEquals(0, scheduler.getQueueManager().getQueue("queue2").
- getResourceUsage().getMemory());
+ getResourceUsage().getMemoryLong());
assertEquals(3, scheduler.getSchedulerApp(attId).getCurrentReservation()
.getVirtualCores());
@@ -1445,10 +1445,10 @@ public void testFairShareWithMinAlloc() throws Exception {
for (FSLeafQueue p : queues) {
if (p.getName().equals("root.queueA")) {
- assertEquals(1024, p.getFairShare().getMemory());
+ assertEquals(1024, p.getFairShare().getMemoryLong());
}
else if (p.getName().equals("root.queueB")) {
- assertEquals(2048, p.getFairShare().getMemory());
+ assertEquals(2048, p.getFairShare().getMemoryLong());
}
}
}
@@ -1537,9 +1537,9 @@ public void testFairShareAndWeightsInNestedUserQueueRule() throws Exception {
if (leaf.getName().equals("root.parentq.user1")
|| leaf.getName().equals("root.parentq.user2")) {
// assert that the fair share is 1/4th node1's capacity
- assertEquals(capacity / 4, leaf.getFairShare().getMemory());
+ assertEquals(capacity / 4, leaf.getFairShare().getMemoryLong());
// assert that the steady fair share is 1/4th node1's capacity
- assertEquals(capacity / 4, leaf.getSteadyFairShare().getMemory());
+ assertEquals(capacity / 4, leaf.getSteadyFairShare().getMemoryLong());
// assert weights are equal for both the user queues
assertEquals(1.0, leaf.getWeights().getWeight(ResourceType.MEMORY), 0);
}
@@ -1573,9 +1573,9 @@ public void testSteadyFairShareWithReloadAndNodeAddRemove() throws Exception {
// The steady fair share for all queues should be 0
QueueManager queueManager = scheduler.getQueueManager();
assertEquals(0, queueManager.getLeafQueue("child1", false)
- .getSteadyFairShare().getMemory());
+ .getSteadyFairShare().getMemoryLong());
assertEquals(0, queueManager.getLeafQueue("child2", false)
- .getSteadyFairShare().getMemory());
+ .getSteadyFairShare().getMemoryLong());
// Add one node
RMNode node1 =
@@ -1583,13 +1583,13 @@ public void testSteadyFairShareWithReloadAndNodeAddRemove() throws Exception {
.newNodeInfo(1, Resources.createResource(6144), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
- assertEquals(6144, scheduler.getClusterResource().getMemory());
+ assertEquals(6144, scheduler.getClusterResource().getMemoryLong());
// The steady fair shares for all queues should be updated
assertEquals(2048, queueManager.getLeafQueue("child1", false)
- .getSteadyFairShare().getMemory());
+ .getSteadyFairShare().getMemoryLong());
assertEquals(2048, queueManager.getLeafQueue("child2", false)
- .getSteadyFairShare().getMemory());
+ .getSteadyFairShare().getMemoryLong());
// Reload the allocation configuration file
out = new PrintWriter(new FileWriter(ALLOC_FILE));
@@ -1614,20 +1614,20 @@ public void testSteadyFairShareWithReloadAndNodeAddRemove() throws Exception {
// The steady fair shares for all queues should be updated
assertEquals(1024, queueManager.getLeafQueue("child1", false)
- .getSteadyFairShare().getMemory());
+ .getSteadyFairShare().getMemoryLong());
assertEquals(2048, queueManager.getLeafQueue("child2", false)
- .getSteadyFairShare().getMemory());
+ .getSteadyFairShare().getMemoryLong());
assertEquals(2048, queueManager.getLeafQueue("child3", false)
- .getSteadyFairShare().getMemory());
+ .getSteadyFairShare().getMemoryLong());
// Remove the node, steady fair shares should back to 0
NodeRemovedSchedulerEvent nodeEvent2 = new NodeRemovedSchedulerEvent(node1);
scheduler.handle(nodeEvent2);
- assertEquals(0, scheduler.getClusterResource().getMemory());
+ assertEquals(0, scheduler.getClusterResource().getMemoryLong());
assertEquals(0, queueManager.getLeafQueue("child1", false)
- .getSteadyFairShare().getMemory());
+ .getSteadyFairShare().getMemoryLong());
assertEquals(0, queueManager.getLeafQueue("child2", false)
- .getSteadyFairShare().getMemory());
+ .getSteadyFairShare().getMemoryLong());
}
@Test
@@ -1645,19 +1645,19 @@ public void testSteadyFairShareWithQueueCreatedRuntime() throws Exception {
.newNodeInfo(1, Resources.createResource(6144), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
- assertEquals(6144, scheduler.getClusterResource().getMemory());
+ assertEquals(6144, scheduler.getClusterResource().getMemoryLong());
assertEquals(6144, scheduler.getQueueManager().getRootQueue()
- .getSteadyFairShare().getMemory());
+ .getSteadyFairShare().getMemoryLong());
assertEquals(6144, scheduler.getQueueManager()
- .getLeafQueue("default", false).getSteadyFairShare().getMemory());
+ .getLeafQueue("default", false).getSteadyFairShare().getMemoryLong());
// Submit one application
ApplicationAttemptId appAttemptId1 = createAppAttemptId(1, 1);
createApplicationWithAMResource(appAttemptId1, "default", "user1", null);
assertEquals(3072, scheduler.getQueueManager()
- .getLeafQueue("default", false).getSteadyFairShare().getMemory());
+ .getLeafQueue("default", false).getSteadyFairShare().getMemoryLong());
assertEquals(3072, scheduler.getQueueManager()
- .getLeafQueue("user1", false).getSteadyFairShare().getMemory());
+ .getLeafQueue("user1", false).getSteadyFairShare().getMemoryLong());
}
/**
@@ -1713,10 +1713,10 @@ public void testQueueDemandCalculation() throws Exception {
scheduler.update();
assertEquals(2 * minReqSize, scheduler.getQueueManager().getQueue("root.queue1")
- .getDemand().getMemory());
+ .getDemand().getMemoryLong());
assertEquals(2 * minReqSize + 2 * minReqSize + (2 * minReqSize), scheduler
.getQueueManager().getQueue("root.queue2").getDemand()
- .getMemory());
+ .getMemoryLong());
}
@Test
@@ -1826,9 +1826,9 @@ public void testMultipleContainersWaitingForReservation() throws IOException {
// One container should get reservation and the other should get nothing
assertEquals(1024,
- scheduler.getSchedulerApp(attId1).getCurrentReservation().getMemory());
+ scheduler.getSchedulerApp(attId1).getCurrentReservation().getMemoryLong());
assertEquals(0,
- scheduler.getSchedulerApp(attId2).getCurrentReservation().getMemory());
+ scheduler.getSchedulerApp(attId2).getCurrentReservation().getMemoryLong());
}
@Test (timeout = 5000)
@@ -3097,7 +3097,7 @@ public void testQueueMaxAMShare() throws Exception {
FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true);
assertEquals("Queue queue1's fair share should be 0", 0, queue1
- .getFairShare().getMemory());
+ .getFairShare().getMemoryLong());
createSchedulingRequest(1 * 1024, "root.default", "user1");
scheduler.update();
@@ -3115,11 +3115,11 @@ public void testQueueMaxAMShare() throws Exception {
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application1's AM requests 1024 MB memory",
- 1024, app1.getAMResource().getMemory());
+ 1024, app1.getAMResource().getMemoryLong());
assertEquals("Application1's AM should be running",
1, app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 1024 MB memory",
- 1024, queue1.getAmResourceUsage().getMemory());
+ 1024, queue1.getAmResourceUsage().getMemoryLong());
// Exceeds no limits
ApplicationAttemptId attId2 = createAppAttemptId(2, 1);
@@ -3129,11 +3129,11 @@ public void testQueueMaxAMShare() throws Exception {
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application2's AM requests 1024 MB memory",
- 1024, app2.getAMResource().getMemory());
+ 1024, app2.getAMResource().getMemoryLong());
assertEquals("Application2's AM should be running",
1, app2.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
// Exceeds queue limit
ApplicationAttemptId attId3 = createAppAttemptId(3, 1);
@@ -3143,11 +3143,11 @@ public void testQueueMaxAMShare() throws Exception {
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application3's AM resource shouldn't be updated",
- 0, app3.getAMResource().getMemory());
+ 0, app3.getAMResource().getMemoryLong());
assertEquals("Application3's AM should not be running",
0, app3.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
// Still can run non-AM container
createSchedulingRequestExistingApplication(1024, 1, attId1);
@@ -3156,7 +3156,7 @@ public void testQueueMaxAMShare() throws Exception {
assertEquals("Application1 should have two running containers",
2, app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
// Remove app1, app3's AM should become running
AppAttemptRemovedSchedulerEvent appRemovedEvent1 =
@@ -3169,9 +3169,9 @@ public void testQueueMaxAMShare() throws Exception {
assertEquals("Application3's AM should be running",
1, app3.getLiveContainers().size());
assertEquals("Application3's AM requests 1024 MB memory",
- 1024, app3.getAMResource().getMemory());
+ 1024, app3.getAMResource().getMemoryLong());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
// Exceeds queue limit
ApplicationAttemptId attId4 = createAppAttemptId(4, 1);
@@ -3181,11 +3181,11 @@ public void testQueueMaxAMShare() throws Exception {
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application4's AM resource shouldn't be updated",
- 0, app4.getAMResource().getMemory());
+ 0, app4.getAMResource().getMemoryLong());
assertEquals("Application4's AM should not be running",
0, app4.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
// Exceeds queue limit
ApplicationAttemptId attId5 = createAppAttemptId(5, 1);
@@ -3195,11 +3195,11 @@ public void testQueueMaxAMShare() throws Exception {
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application5's AM resource shouldn't be updated",
- 0, app5.getAMResource().getMemory());
+ 0, app5.getAMResource().getMemoryLong());
assertEquals("Application5's AM should not be running",
0, app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
// Remove un-running app doesn't affect others
AppAttemptRemovedSchedulerEvent appRemovedEvent4 =
@@ -3210,7 +3210,7 @@ public void testQueueMaxAMShare() throws Exception {
assertEquals("Application5's AM should not be running",
0, app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
// Remove app2 and app3, app5's AM should become running
AppAttemptRemovedSchedulerEvent appRemovedEvent2 =
@@ -3228,9 +3228,9 @@ public void testQueueMaxAMShare() throws Exception {
assertEquals("Application5's AM should be running",
1, app5.getLiveContainers().size());
assertEquals("Application5's AM requests 2048 MB memory",
- 2048, app5.getAMResource().getMemory());
+ 2048, app5.getAMResource().getMemoryLong());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
// request non-AM container for app5
createSchedulingRequestExistingApplication(1024, 1, attId5);
@@ -3245,7 +3245,7 @@ public void testQueueMaxAMShare() throws Exception {
assertEquals("Application5's AM should have 0 container",
0, app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
scheduler.update();
scheduler.handle(updateEvent);
// non-AM container should be allocated
@@ -3255,7 +3255,7 @@ public void testQueueMaxAMShare() throws Exception {
1, app5.getLiveContainers().size());
// check non-AM container allocation won't affect queue AmResourceUsage
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
// Check amResource normalization
ApplicationAttemptId attId6 = createAppAttemptId(6, 1);
@@ -3267,9 +3267,9 @@ public void testQueueMaxAMShare() throws Exception {
assertEquals("Application6's AM should not be running",
0, app6.getLiveContainers().size());
assertEquals("Application6's AM resource shouldn't be updated",
- 0, app6.getAMResource().getMemory());
+ 0, app6.getAMResource().getMemoryLong());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
// Remove all apps
AppAttemptRemovedSchedulerEvent appRemovedEvent5 =
@@ -3280,7 +3280,7 @@ public void testQueueMaxAMShare() throws Exception {
scheduler.handle(appRemovedEvent6);
scheduler.update();
assertEquals("Queue1's AM resource usage should be 0",
- 0, queue1.getAmResourceUsage().getMemory());
+ 0, queue1.getAmResourceUsage().getMemoryLong());
}
@Test
@@ -3319,23 +3319,23 @@ public void testQueueMaxAMShareDefault() throws Exception {
FSLeafQueue queue1 =
scheduler.getQueueManager().getLeafQueue("queue1", true);
assertEquals("Queue queue1's fair share should be 0", 0, queue1
- .getFairShare().getMemory());
+ .getFairShare().getMemoryLong());
FSLeafQueue queue2 =
scheduler.getQueueManager().getLeafQueue("queue2", true);
assertEquals("Queue queue2's fair share should be 0", 0, queue2
- .getFairShare().getMemory());
+ .getFairShare().getMemoryLong());
FSLeafQueue queue3 =
scheduler.getQueueManager().getLeafQueue("queue3", true);
assertEquals("Queue queue3's fair share should be 0", 0, queue3
- .getFairShare().getMemory());
+ .getFairShare().getMemoryLong());
FSLeafQueue queue4 =
scheduler.getQueueManager().getLeafQueue("queue4", true);
assertEquals("Queue queue4's fair share should be 0", 0, queue4
- .getFairShare().getMemory());
+ .getFairShare().getMemoryLong());
FSLeafQueue queue5 =
scheduler.getQueueManager().getLeafQueue("queue5", true);
assertEquals("Queue queue5's fair share should be 0", 0, queue5
- .getFairShare().getMemory());
+ .getFairShare().getMemoryLong());
List queues = Arrays.asList("root.queue3", "root.queue4",
"root.queue5");
@@ -3357,11 +3357,11 @@ public void testQueueMaxAMShareDefault() throws Exception {
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application1's AM requests 1024 MB memory",
- 1024, app1.getAMResource().getMemory());
+ 1024, app1.getAMResource().getMemoryLong());
assertEquals("Application1's AM should be running",
1, app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 1024 MB memory",
- 1024, queue1.getAmResourceUsage().getMemory());
+ 1024, queue1.getAmResourceUsage().getMemoryLong());
// Now the fair share is 1639 MB, and the maxAMShare is 0.4f,
// so the AM is not accepted.
@@ -3372,11 +3372,11 @@ public void testQueueMaxAMShareDefault() throws Exception {
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application2's AM resource shouldn't be updated",
- 0, app2.getAMResource().getMemory());
+ 0, app2.getAMResource().getMemoryLong());
assertEquals("Application2's AM should not be running",
0, app2.getLiveContainers().size());
assertEquals("Queue2's AM resource usage should be 0 MB memory",
- 0, queue2.getAmResourceUsage().getMemory());
+ 0, queue2.getAmResourceUsage().getMemoryLong());
}
/**
@@ -3458,11 +3458,11 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// Allocate app1's AM container on node1.
scheduler.handle(updateE1);
assertEquals("Application1's AM requests 1024 MB memory",
- 1024, app1.getAMResource().getMemory());
+ 1024, app1.getAMResource().getMemoryLong());
assertEquals("Application1's AM should be running",
1, app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 1024 MB memory",
- 1024, queue1.getAmResourceUsage().getMemory());
+ 1024, queue1.getAmResourceUsage().getMemoryLong());
ApplicationAttemptId attId2 = createAppAttemptId(2, 1);
createApplicationWithAMResource(attId2, "queue1", "user1", amResource2);
@@ -3472,11 +3472,11 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// Allocate app2's AM container on node2.
scheduler.handle(updateE2);
assertEquals("Application2's AM requests 1024 MB memory",
- 1024, app2.getAMResource().getMemory());
+ 1024, app2.getAMResource().getMemoryLong());
assertEquals("Application2's AM should be running",
1, app2.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
ApplicationAttemptId attId3 = createAppAttemptId(3, 1);
createApplicationWithAMResource(attId3, "queue1", "user1", amResource3);
@@ -3489,11 +3489,11 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// Similarly app3 reserves a container on node2.
scheduler.handle(updateE2);
assertEquals("Application3's AM resource shouldn't be updated",
- 0, app3.getAMResource().getMemory());
+ 0, app3.getAMResource().getMemoryLong());
assertEquals("Application3's AM should not be running",
0, app3.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
ApplicationAttemptId attId4 = createAppAttemptId(4, 1);
createApplicationWithAMResource(attId4, "queue1", "user1", amResource4);
@@ -3504,21 +3504,21 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// app3 already reserved its container on node1.
scheduler.handle(updateE1);
assertEquals("Application4's AM resource shouldn't be updated",
- 0, app4.getAMResource().getMemory());
+ 0, app4.getAMResource().getMemoryLong());
assertEquals("Application4's AM should not be running",
0, app4.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
scheduler.update();
// Allocate app4's AM container on node3.
scheduler.handle(updateE3);
assertEquals("Application4's AM requests 5120 MB memory",
- 5120, app4.getAMResource().getMemory());
+ 5120, app4.getAMResource().getMemoryLong());
assertEquals("Application4's AM should be running",
1, app4.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 7168 MB memory",
- 7168, queue1.getAmResourceUsage().getMemory());
+ 7168, queue1.getAmResourceUsage().getMemoryLong());
AppAttemptRemovedSchedulerEvent appRemovedEvent1 =
new AppAttemptRemovedSchedulerEvent(attId1,
@@ -3526,7 +3526,7 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// Release app1's AM container on node1.
scheduler.handle(appRemovedEvent1);
assertEquals("Queue1's AM resource usage should be 6144 MB memory",
- 6144, queue1.getAmResourceUsage().getMemory());
+ 6144, queue1.getAmResourceUsage().getMemoryLong());
ApplicationAttemptId attId5 = createAppAttemptId(5, 1);
createApplicationWithAMResource(attId5, "queue1", "user1", amResource5);
@@ -3538,11 +3538,11 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// exceeding queue MaxAMShare limit.
scheduler.handle(updateE1);
assertEquals("Application5's AM requests 1024 MB memory",
- 1024, app5.getAMResource().getMemory());
+ 1024, app5.getAMResource().getMemoryLong());
assertEquals("Application5's AM should be running",
1, app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 7168 MB memory",
- 7168, queue1.getAmResourceUsage().getMemory());
+ 7168, queue1.getAmResourceUsage().getMemoryLong());
AppAttemptRemovedSchedulerEvent appRemovedEvent3 =
new AppAttemptRemovedSchedulerEvent(attId3,
@@ -3550,7 +3550,7 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// Remove app3.
scheduler.handle(appRemovedEvent3);
assertEquals("Queue1's AM resource usage should be 7168 MB memory",
- 7168, queue1.getAmResourceUsage().getMemory());
+ 7168, queue1.getAmResourceUsage().getMemoryLong());
ApplicationAttemptId attId6 = createAppAttemptId(6, 1);
createApplicationWithAMResource(attId6, "queue1", "user1", amResource6);
@@ -3561,11 +3561,11 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// it exceeds queue MaxAMShare limit.
scheduler.handle(updateE1);
assertEquals("Application6's AM resource shouldn't be updated",
- 0, app6.getAMResource().getMemory());
+ 0, app6.getAMResource().getMemoryLong());
assertEquals("Application6's AM should not be running",
0, app6.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 7168 MB memory",
- 7168, queue1.getAmResourceUsage().getMemory());
+ 7168, queue1.getAmResourceUsage().getMemoryLong());
ApplicationAttemptId attId7 = createAppAttemptId(7, 1);
createApplicationWithAMResource(attId7, "queue1", "user1", amResource7);
@@ -3576,11 +3576,11 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// app6 didn't reserve a container on node1.
scheduler.handle(updateE1);
assertEquals("Application7's AM requests 1024 MB memory",
- 1024, app7.getAMResource().getMemory());
+ 1024, app7.getAMResource().getMemoryLong());
assertEquals("Application7's AM should be running",
1, app7.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 8192 MB memory",
- 8192, queue1.getAmResourceUsage().getMemory());
+ 8192, queue1.getAmResourceUsage().getMemoryLong());
AppAttemptRemovedSchedulerEvent appRemovedEvent4 =
new AppAttemptRemovedSchedulerEvent(attId4,
@@ -3588,7 +3588,7 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// Release app4's AM container on node3.
scheduler.handle(appRemovedEvent4);
assertEquals("Queue1's AM resource usage should be 3072 MB memory",
- 3072, queue1.getAmResourceUsage().getMemory());
+ 3072, queue1.getAmResourceUsage().getMemoryLong());
AppAttemptRemovedSchedulerEvent appRemovedEvent5 =
new AppAttemptRemovedSchedulerEvent(attId5,
@@ -3596,7 +3596,7 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// Release app5's AM container on node1.
scheduler.handle(appRemovedEvent5);
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
scheduler.update();
// app6 reserves a container on node1 because node1's available resource
@@ -3615,21 +3615,21 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// app6 already reserved a container on node1.
scheduler.handle(updateE1);
assertEquals("Application8's AM resource shouldn't be updated",
- 0, app8.getAMResource().getMemory());
+ 0, app8.getAMResource().getMemoryLong());
assertEquals("Application8's AM should not be running",
0, app8.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
scheduler.update();
// app8 can't allocate a container on node2 because
// app6 already reserved a container on node2.
scheduler.handle(updateE2);
assertEquals("Application8's AM resource shouldn't be updated",
- 0, app8.getAMResource().getMemory());
+ 0, app8.getAMResource().getMemoryLong());
assertEquals("Application8's AM should not be running",
0, app8.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",
- 2048, queue1.getAmResourceUsage().getMemory());
+ 2048, queue1.getAmResourceUsage().getMemoryLong());
AppAttemptRemovedSchedulerEvent appRemovedEvent2 =
new AppAttemptRemovedSchedulerEvent(attId2,
@@ -3637,17 +3637,17 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// Release app2's AM container on node2.
scheduler.handle(appRemovedEvent2);
assertEquals("Queue1's AM resource usage should be 1024 MB memory",
- 1024, queue1.getAmResourceUsage().getMemory());
+ 1024, queue1.getAmResourceUsage().getMemoryLong());
scheduler.update();
// app6 turns the reservation into an allocation on node2.
scheduler.handle(updateE2);
assertEquals("Application6's AM requests 10240 MB memory",
- 10240, app6.getAMResource().getMemory());
+ 10240, app6.getAMResource().getMemoryLong());
assertEquals("Application6's AM should be running",
1, app6.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 11264 MB memory",
- 11264, queue1.getAmResourceUsage().getMemory());
+ 11264, queue1.getAmResourceUsage().getMemoryLong());
scheduler.update();
// app6 unreserve its container on node1 because
@@ -3655,11 +3655,11 @@ public void testQueueMaxAMShareWithContainerReservation() throws Exception {
// Now app8 can allocate its AM container on node1.
scheduler.handle(updateE1);
assertEquals("Application8's AM requests 1024 MB memory",
- 1024, app8.getAMResource().getMemory());
+ 1024, app8.getAMResource().getMemoryLong());
assertEquals("Application8's AM should be running",
1, app8.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 12288 MB memory",
- 12288, queue1.getAmResourceUsage().getMemory());
+ 12288, queue1.getAmResourceUsage().getMemoryLong());
}
@Test
@@ -4352,23 +4352,23 @@ public void handle(Event event) {
new NodeUpdateSchedulerEvent(spyNode));
// Check the used resource is 0 GB 0 core
- // Assert.assertEquals(1 * GB, nm_0.getUsed().getMemory());
+ // Assert.assertEquals(1 * GB, nm_0.getUsed().getMemoryLong());
Resource usedResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getAllocatedResource();
- Assert.assertEquals(usedResource.getMemory(), 0);
+ Assert.assertEquals(usedResource.getMemoryLong(), 0);
Assert.assertEquals(usedResource.getVirtualCores(), 0);
// Check total resource of scheduler node is also changed to 0 GB 0 core
Resource totalResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getTotalResource();
- Assert.assertEquals(totalResource.getMemory(), 0 * GB);
+ Assert.assertEquals(totalResource.getMemoryLong(), 0 * GB);
Assert.assertEquals(totalResource.getVirtualCores(), 0);
// Check the available resource is 0/0
Resource availableResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource();
- Assert.assertEquals(availableResource.getMemory(), 0);
+ Assert.assertEquals(availableResource.getMemoryLong(), 0);
Assert.assertEquals(availableResource.getVirtualCores(), 0);
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java
index ab8fcbc..d2b5ae4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java
@@ -109,10 +109,10 @@ public void testFairShareNoAppsRunning() throws IOException {
for (FSLeafQueue leaf : leafQueues) {
if (leaf.getName().startsWith("root.parentA")) {
- assertEquals(0, (double) leaf.getFairShare().getMemory() / nodeCapacity,
+ assertEquals(0, (double) leaf.getFairShare().getMemoryLong() / nodeCapacity,
0);
} else if (leaf.getName().startsWith("root.parentB")) {
- assertEquals(0, (double) leaf.getFairShare().getMemory() / nodeCapacity,
+ assertEquals(0, (double) leaf.getFairShare().getMemoryLong() / nodeCapacity,
0);
}
}
@@ -137,12 +137,12 @@ public void testFairShareOneAppRunning() throws IOException {
100,
(double) scheduler.getQueueManager()
.getLeafQueue("root.parentA.childA1", false).getFairShare()
- .getMemory() / nodeCapacity * 100, 0.1);
+ .getMemoryLong() / nodeCapacity * 100, 0.1);
assertEquals(
0,
(double) scheduler.getQueueManager()
.getLeafQueue("root.parentA.childA2", false).getFairShare()
- .getMemory() / nodeCapacity, 0.1);
+ .getMemoryLong() / nodeCapacity, 0.1);
verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(),
nodeCapacity);
@@ -167,7 +167,7 @@ public void testFairShareMultipleActiveQueuesUnderSameParent()
33,
(double) scheduler.getQueueManager()
.getLeafQueue("root.parentA.childA" + i, false).getFairShare()
- .getMemory()
+ .getMemoryLong()
/ nodeCapacity * 100, .9);
}
@@ -200,7 +200,7 @@ public void testFairShareMultipleActiveQueuesUnderDifferentParent()
40,
(double) scheduler.getQueueManager()
.getLeafQueue("root.parentA.childA" + i, false).getFairShare()
- .getMemory()
+ .getMemoryLong()
/ nodeCapacity * 100, .9);
}
@@ -210,7 +210,7 @@ public void testFairShareMultipleActiveQueuesUnderDifferentParent()
10,
(double) scheduler.getQueueManager()
.getLeafQueue("root.parentB.childB1", false).getFairShare()
- .getMemory()
+ .getMemoryLong()
/ nodeCapacity * 100, .9);
verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(),
@@ -237,7 +237,7 @@ public void testFairShareResetsToZeroWhenAppsComplete() throws IOException {
50,
(double) scheduler.getQueueManager()
.getLeafQueue("root.parentA.childA" + i, false).getFairShare()
- .getMemory()
+ .getMemoryLong()
/ nodeCapacity * 100, .9);
}
// Let app under childA1 complete. This should cause the fair share
@@ -254,13 +254,13 @@ public void testFairShareResetsToZeroWhenAppsComplete() throws IOException {
0,
(double) scheduler.getQueueManager()
.getLeafQueue("root.parentA.childA1", false).getFairShare()
- .getMemory()
+ .getMemoryLong()
/ nodeCapacity * 100, 0);
assertEquals(
100,
(double) scheduler.getQueueManager()
.getLeafQueue("root.parentA.childA2", false).getFairShare()
- .getMemory()
+ .getMemoryLong()
/ nodeCapacity * 100, 0.1);
verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(),
@@ -293,7 +293,7 @@ public void testFairShareWithDRFMultipleActiveQueuesUnderDifferentParent()
40,
(double) scheduler.getQueueManager()
.getLeafQueue("root.parentA.childA" + i, false).getFairShare()
- .getMemory()
+ .getMemoryLong()
/ nodeMem * 100, .9);
assertEquals(
40,
@@ -308,7 +308,7 @@ public void testFairShareWithDRFMultipleActiveQueuesUnderDifferentParent()
10,
(double) scheduler.getQueueManager()
.getLeafQueue("root.parentB.childB1", false).getFairShare()
- .getMemory()
+ .getMemoryLong()
/ nodeMem * 100, .9);
assertEquals(
10,
@@ -322,13 +322,13 @@ public void testFairShareWithDRFMultipleActiveQueuesUnderDifferentParent()
for (FSLeafQueue leaf : leafQueues) {
if (leaf.getName().startsWith("root.parentA")) {
assertEquals(0.2,
- (double) leaf.getSteadyFairShare().getMemory() / nodeMem, 0.001);
+ (double) leaf.getSteadyFairShare().getMemoryLong() / nodeMem, 0.001);
assertEquals(0.2,
(double) leaf.getSteadyFairShare().getVirtualCores() / nodeVCores,
0.001);
} else if (leaf.getName().startsWith("root.parentB")) {
assertEquals(0.05,
- (double) leaf.getSteadyFairShare().getMemory() / nodeMem, 0.001);
+ (double) leaf.getSteadyFairShare().getMemoryLong() / nodeMem, 0.001);
assertEquals(0.1,
(double) leaf.getSteadyFairShare().getVirtualCores() / nodeVCores,
0.001);
@@ -348,11 +348,11 @@ private void verifySteadyFairShareMemory(Collection leafQueues,
for (FSLeafQueue leaf : leafQueues) {
if (leaf.getName().startsWith("root.parentA")) {
assertEquals(0.2,
- (double) leaf.getSteadyFairShare().getMemory() / nodeCapacity,
+ (double) leaf.getSteadyFairShare().getMemoryLong() / nodeCapacity,
0.001);
} else if (leaf.getName().startsWith("root.parentB")) {
assertEquals(0.05,
- (double) leaf.getSteadyFairShare().getMemory() / nodeCapacity,
+ (double) leaf.getSteadyFairShare().getMemoryLong() / nodeCapacity,
0.001);
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
index 2456594..4e62c06 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
@@ -66,11 +66,11 @@
private ControlledClock clock;
private static class StubbedFairScheduler extends FairScheduler {
- public int lastPreemptMemory = -1;
+ public long lastPreemptMemory = -1;
@Override
protected void preemptResources(Resource toPreempt) {
- lastPreemptMemory = toPreempt.getMemory();
+ lastPreemptMemory = toPreempt.getMemoryLong();
}
public void resetLastPreemptResources() {
@@ -485,7 +485,7 @@ public void testPreemptionIsNotDelayedToNextRound() throws Exception {
scheduler.update();
Resource toPreempt = scheduler.resourceDeficit(scheduler.getQueueManager()
.getLeafQueue("queueA.queueA2", false), clock.getTime());
- assertEquals(3277, toPreempt.getMemory());
+ assertEquals(3277, toPreempt.getMemoryLong());
// verify if the 3 containers required by queueA2 are preempted in the same
// round
@@ -616,18 +616,18 @@ public void testPreemptionDecision() throws Exception {
// share.
clock.tickSec(6);
assertEquals(
- 1024, scheduler.resourceDeficit(schedC, clock.getTime()).getMemory());
+ 1024, scheduler.resourceDeficit(schedC, clock.getTime()).getMemoryLong());
assertEquals(
- 1024, scheduler.resourceDeficit(schedD, clock.getTime()).getMemory());
+ 1024, scheduler.resourceDeficit(schedD, clock.getTime()).getMemoryLong());
// After fairSharePreemptionTime has passed, they should want to preempt
// fair share.
scheduler.update();
clock.tickSec(6);
assertEquals(
- 1536 , scheduler.resourceDeficit(schedC, clock.getTime()).getMemory());
+ 1536 , scheduler.resourceDeficit(schedC, clock.getTime()).getMemoryLong());
assertEquals(
- 1536, scheduler.resourceDeficit(schedD, clock.getTime()).getMemory());
+ 1536, scheduler.resourceDeficit(schedD, clock.getTime()).getMemoryLong());
stopResourceManager();
}
@@ -758,12 +758,12 @@ public void testPreemptionDecisionWithDRF() throws Exception {
// share.
clock.tickSec(6);
Resource res = scheduler.resourceDeficit(schedC, clock.getTime());
- assertEquals(1024, res.getMemory());
+ assertEquals(1024, res.getMemoryLong());
// Demand = 3
assertEquals(3, res.getVirtualCores());
res = scheduler.resourceDeficit(schedD, clock.getTime());
- assertEquals(1024, res.getMemory());
+ assertEquals(1024, res.getMemoryLong());
// Demand = 6, but min share = 2
assertEquals(2, res.getVirtualCores());
@@ -772,11 +772,11 @@ public void testPreemptionDecisionWithDRF() throws Exception {
scheduler.update();
clock.tickSec(6);
res = scheduler.resourceDeficit(schedC, clock.getTime());
- assertEquals(1536, res.getMemory());
+ assertEquals(1536, res.getMemoryLong());
assertEquals(3, res.getVirtualCores());
res = scheduler.resourceDeficit(schedD, clock.getTime());
- assertEquals(1536, res.getMemory());
+ assertEquals(1536, res.getMemoryLong());
// Demand = 6, but fair share = 3
assertEquals(3, res.getVirtualCores());
stopResourceManager();
@@ -907,61 +907,61 @@ public void testPreemptionDecisionWithVariousTimeout() throws Exception {
scheduler.update();
clock.tickSec(6);
assertEquals(
- 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory());
+ 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemoryLong());
assertEquals(
- 0, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory());
+ 0, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemoryLong());
assertEquals(
- 0, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory());
+ 0, scheduler.resourceDeficit(queueC, clock.getTime()).getMemoryLong());
// After 10 seconds, queueB2 wants to preempt min share
scheduler.update();
clock.tickSec(5);
assertEquals(
- 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory());
+ 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemoryLong());
assertEquals(
- 1024, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory());
+ 1024, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemoryLong());
assertEquals(
- 0, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory());
+ 0, scheduler.resourceDeficit(queueC, clock.getTime()).getMemoryLong());
// After 15 seconds, queueC wants to preempt min share
scheduler.update();
clock.tickSec(5);
assertEquals(
- 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory());
+ 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemoryLong());
assertEquals(
- 1024, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory());
+ 1024, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemoryLong());
assertEquals(
- 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory());
+ 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemoryLong());
// After 20 seconds, queueB2 should want to preempt fair share
scheduler.update();
clock.tickSec(5);
assertEquals(
- 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory());
+ 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemoryLong());
assertEquals(
- 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory());
+ 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemoryLong());
assertEquals(
- 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory());
+ 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemoryLong());
// After 25 seconds, queueB1 should want to preempt fair share
scheduler.update();
clock.tickSec(5);
assertEquals(
- 1536, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory());
+ 1536, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemoryLong());
assertEquals(
- 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory());
+ 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemoryLong());
assertEquals(
- 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory());
+ 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemoryLong());
// After 30 seconds, queueC should want to preempt fair share
scheduler.update();
clock.tickSec(5);
assertEquals(
- 1536, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory());
+ 1536, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemoryLong());
assertEquals(
- 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory());
+ 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemoryLong());
assertEquals(
- 1536, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory());
+ 1536, scheduler.resourceDeficit(queueC, clock.getTime()).getMemoryLong());
stopResourceManager();
}
@@ -1087,7 +1087,7 @@ public void testPreemptionDecisionWithNonPreemptableQueue() throws Exception {
// queueB to queueD
clock.tickSec(6);
assertEquals(2048,
- scheduler.resourceDeficit(schedD, clock.getTime()).getMemory());
+ scheduler.resourceDeficit(schedD, clock.getTime()).getMemoryLong());
scheduler.preemptResources(Resources.createResource(2 * 1024));
// now only app2 is selected to be preempted
@@ -1256,7 +1256,7 @@ public void testPreemptionDecisionWhenPreemptionDisabledOnAllQueues()
// After minSharePreemptionTime has passed, resource deficit is 2G
clock.tickSec(6);
assertEquals(2048,
- scheduler.resourceDeficit(schedA, clock.getTime()).getMemory());
+ scheduler.resourceDeficit(schedA, clock.getTime()).getMemoryLong());
scheduler.preemptResources(Resources.createResource(2 * 1024));
// now none app is selected to be preempted
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
index 44877fb..9db14c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -337,9 +337,9 @@ public void testUpdateResourceOnNode() throws Exception {
// SchedulerNode's total resource and available resource are changed.
assertEquals(1024, scheduler.getNodeTracker().getNode(node0.getNodeID())
- .getTotalResource().getMemory());
+ .getTotalResource().getMemoryLong());
assertEquals(1024, scheduler.getNodeTracker().getNode(node0.getNodeID()).
- getUnallocatedResource().getMemory(), 1024);
+ getUnallocatedResource().getMemoryLong(), 1024);
QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false);
Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f);
@@ -695,7 +695,7 @@ public void testFifoScheduling() throws Exception {
am1.registerAppAttempt();
SchedulerNodeReport report_nm1 =
rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
+ Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemoryLong());
RMApp app2 = rm.submitApp(2048);
// kick the scheduling, 2GB given to AM, remaining 2 GB on nm2
@@ -705,7 +705,7 @@ public void testFifoScheduling() throws Exception {
am2.registerAppAttempt();
SchedulerNodeReport report_nm2 =
rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
- Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory());
+ Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemoryLong());
// add request for containers
am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, GB, 1, 1);
@@ -731,24 +731,24 @@ public void testFifoScheduling() throws Exception {
List allocated1 = alloc1Response.getAllocatedContainers();
Assert.assertEquals(1, allocated1.size());
- Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory());
+ Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemoryLong());
Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
List allocated2 = alloc2Response.getAllocatedContainers();
Assert.assertEquals(1, allocated2.size());
- Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemory());
+ Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemoryLong());
Assert.assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
- Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
- Assert.assertEquals(2 * GB, report_nm2.getAvailableResource().getMemory());
+ Assert.assertEquals(0, report_nm1.getAvailableResource().getMemoryLong());
+ Assert.assertEquals(2 * GB, report_nm2.getAvailableResource().getMemoryLong());
- Assert.assertEquals(6 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory());
+ Assert.assertEquals(6 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemoryLong());
Container c1 = allocated1.get(0);
- Assert.assertEquals(GB, c1.getResource().getMemory());
+ Assert.assertEquals(GB, c1.getResource().getMemoryLong());
ContainerStatus containerStatus =
BuilderUtils.newContainerStatus(c1.getId(), ContainerState.COMPLETE,
"", 0, c1.getResource());
@@ -763,7 +763,7 @@ public void testFifoScheduling() throws Exception {
Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses()
.size());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(5 * GB, report_nm1.getUsedResource().getMemory());
+ Assert.assertEquals(5 * GB, report_nm1.getUsedResource().getMemoryLong());
rm.stop();
}
@@ -820,7 +820,7 @@ private void testMinimumAllocation(YarnConfiguration conf, int testAlloc)
int checkAlloc =
conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
- Assert.assertEquals(checkAlloc, report_nm1.getUsedResource().getMemory());
+ Assert.assertEquals(checkAlloc, report_nm1.getUsedResource().getMemoryLong());
rm.stop();
}
@@ -1074,12 +1074,12 @@ public void testHeadroom() throws Exception {
Allocation allocation1 =
fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null, null, null);
Assert.assertEquals("Allocation headroom", 1 * GB, allocation1
- .getResourceLimit().getMemory());
+ .getResourceLimit().getMemoryLong());
Allocation allocation2 =
fs.allocate(appAttemptId2, emptyAsk, emptyId, null, null, null, null);
Assert.assertEquals("Allocation headroom", 1 * GB, allocation2
- .getResourceLimit().getMemory());
+ .getResourceLimit().getMemoryLong());
rm.stop();
}
@@ -1100,8 +1100,8 @@ public void testResourceOverCommit() throws Exception {
SchedulerNodeReport report_nm1 =
rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
// check node report, 2 GB used and 2 GB available
- Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemoryLong());
// add request for containers
am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1);
@@ -1117,17 +1117,17 @@ public void testResourceOverCommit() throws Exception {
List allocated1 = alloc1Response.getAllocatedContainers();
Assert.assertEquals(1, allocated1.size());
- Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory());
+ Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemoryLong());
Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
// check node report, 4 GB used and 0 GB available
- Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory());
- Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
+ Assert.assertEquals(0, report_nm1.getAvailableResource().getMemoryLong());
+ Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemoryLong());
// check container is assigned with 2 GB.
Container c1 = allocated1.get(0);
- Assert.assertEquals(2 * GB, c1.getResource().getMemory());
+ Assert.assertEquals(2 * GB, c1.getResource().getMemoryLong());
// update node resource to 2 GB, so resource is over-consumed.
Map nodeResourceMap =
@@ -1141,8 +1141,8 @@ public void testResourceOverCommit() throws Exception {
// Now, the used resource is still 4 GB, and available resource is minus
// value.
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory());
- Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemoryLong());
+ Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemoryLong());
// Check container can complete successfully in case of resource
// over-commitment.
@@ -1160,9 +1160,9 @@ public void testResourceOverCommit() throws Exception {
Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses()
.size());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
- Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory());
+ Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemoryLong());
// As container return 2 GB back, the available resource becomes 0 again.
- Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory());
+ Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemoryLong());
rm.stop();
}
@@ -1235,34 +1235,34 @@ public void handle(Event event) {
application_0.schedule();
// Check the used resource is 1 GB 1 core
- // Assert.assertEquals(1 * GB, nm_0.getUsed().getMemory());
+ // Assert.assertEquals(1 * GB, nm_0.getUsed().getMemoryLong());
Resource usedResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getAllocatedResource();
- Assert.assertEquals(usedResource.getMemory(), 1 * GB);
+ Assert.assertEquals(usedResource.getMemoryLong(), 1 * GB);
Assert.assertEquals(usedResource.getVirtualCores(), 1);
// Check total resource of scheduler node is also changed to 1 GB 1 core
Resource totalResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getTotalResource();
- Assert.assertEquals(totalResource.getMemory(), 1 * GB);
+ Assert.assertEquals(totalResource.getMemoryLong(), 1 * GB);
Assert.assertEquals(totalResource.getVirtualCores(), 1);
// Check the available resource is 0/0
Resource availableResource =
resourceManager.getResourceScheduler()
.getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource();
- Assert.assertEquals(availableResource.getMemory(), 0);
+ Assert.assertEquals(availableResource.getMemoryLong(), 0);
Assert.assertEquals(availableResource.getVirtualCores(), 0);
}
private void checkApplicationResourceUsage(int expected,
Application application) {
- Assert.assertEquals(expected, application.getUsedResources().getMemory());
+ Assert.assertEquals(expected, application.getUsedResources().getMemoryLong());
}
private void checkNodeResourceUsage(int expected,
org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) {
- Assert.assertEquals(expected, node.getUsed().getMemory());
+ Assert.assertEquals(expected, node.getUsed().getMemoryLong());
node.checkResourceUsage();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 72cccf6..c2cb103 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -1410,7 +1410,7 @@ public void verifyAppInfoGeneric(RMApp app, String id, String user,
assertEquals("clusterUsagePerc doesn't match", 50.0f, clusterUsagePerc, 0.01f);
assertEquals("numContainers doesn't match", 1, numContainers);
assertEquals("preemptedResourceMB doesn't match", app
- .getRMAppMetrics().getResourcePreempted().getMemory(),
+ .getRMAppMetrics().getResourcePreempted().getMemoryLong(),
preemptedResourceMB);
assertEquals("preemptedResourceVCores doesn't match", app
.getRMAppMetrics().getResourcePreempted().getVirtualCores(),
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
index 3fd1fd5..49313f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
@@ -780,9 +780,9 @@ public void verifyNodeInfoGeneric(MockNM nm, String state, String rack,
assertEquals("numContainers doesn't match: " + numContainers,
report.getNumContainers(), numContainers);
assertEquals("usedMemoryMB doesn't match: " + usedMemoryMB, report
- .getUsedResource().getMemory(), usedMemoryMB);
+ .getUsedResource().getMemoryLong(), usedMemoryMB);
assertEquals("availMemoryMB doesn't match: " + availMemoryMB, report
- .getAvailableResource().getMemory(), availMemoryMB);
+ .getAvailableResource().getMemoryLong(), availMemoryMB);
assertEquals("usedVirtualCores doesn't match: " + usedVirtualCores, report
.getUsedResource().getVirtualCores(), usedVirtualCores);
assertEquals("availVirtualCores doesn't match: " + availVirtualCores, report