diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 9ec25ae..3682bad 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1034,18 +1034,30 @@ private static void addDeprecatedKeys() {
public static final float DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION = 0.25F;
/**
- * The maximum percentage of disk space that can be used after which a disk is
- * marked as offline. Values can range from 0.0 to 100.0. If the value is
- * greater than or equal to 100, NM will check for full disk. This applies to
- * nm-local-dirs and nm-log-dirs.
+ * The high threshold percentage of disk space that can be used after which
+ * a online disk is marked as offline. Values can range from 0.0 to 100.0.
+ * If the value is greater than or equal to 100, NM will check for full disk
+ * This applies to nm-local-dirs and nm-log-dirs.
*/
- public static final String NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE =
- NM_DISK_HEALTH_CHECK_PREFIX + "max-disk-utilization-per-disk-percentage";
+ public static final String NM_WM_HIGH_PER_DISK_UTILIZATION_PERCENTAGE =
+ NM_DISK_HEALTH_CHECK_PREFIX +
+ "disk-utilization-watermark-high-per-disk-percentage";
/**
* By default, 90% of the disk can be used before it is marked as offline.
*/
- public static final float DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE =
- 90.0F;
+ public static final float
+ DEFAULT_NM_WM_HIGH_PER_DISK_UTILIZATION_PERCENTAGE = 90.0F;
+
+ /**
+ * The low threshold percentage of disk space used when an offline disk is
+ * marked as online. Values can range from 0.0 to 100.0.
+ * This applies to nm-local-dirs and nm-log-dirs.
+ */
+ public static final String NM_WM_LOW_PER_DISK_UTILIZATION_PERCENTAGE =
+ NM_DISK_HEALTH_CHECK_PREFIX +
+ "disk-utilization-watermark-low-per-disk-percentage";
+ public static final float
+ DEFAULT_NM_WM_LOW_PER_DISK_UTILIZATION_PERCENTAGE = 85.0F;
/**
* The minimum space that must be available on a local dir for it to be used.
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index b76defb..b9d0760 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1308,16 +1308,24 @@
- The maximum percentage of disk space utilization allowed after
- which a disk is marked as bad. Values can range from 0.0 to 100.0.
- If the value is greater than or equal to 100, the nodemanager will check
- for full disk. This applies to yarn-nodemanager.local-dirs and
+ The high threshold percentage of disk space utilization allowed after
+ which a good disk is marked as bad. Values can range from 0.0 to 100.0.
+ If the value is greater than or equal to 100, the nodemanager will check
+ for full disk. This applies to yarn-nodemanager.local-dirs and
yarn.nodemanager.log-dirs.
- yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
+ yarn.nodemanager.disk-health-checker.disk-utilization-watermark-high-per-disk-percentage
90.0
+ The low threshold percentage of disk space used when a bad disk is
+ marked as good. Values can range from 0.0 to 100.0. This applies to
+ yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs.
+ yarn.nodemanager.disk-health-checker.disk-utilization-watermark-low-per-disk-percentage
+ 85.0
+
+
+
The minimum space that must be available on a disk for
it to be used. This applies to yarn-nodemanager.local-dirs and
yarn.nodemanager.log-dirs.
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
index 32046c5..9f1ae06 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
@@ -39,6 +39,8 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.DiskChecker;
+import com.google.common.annotations.VisibleForTesting;
+
/**
* Manages a list of local storage directories.
*/
@@ -88,8 +90,9 @@
private List fullDirs;
private int numFailures;
-
- private float diskUtilizationPercentageCutoff;
+
+ private float diskUtilizationPercentageCutoffHigh;
+ private float diskUtilizationPercentageCutoffLow;
private long diskUtilizationSpaceCutoff;
private int goodDirsDiskUtilizationPercentage;
@@ -103,7 +106,7 @@
* directories to be monitored
*/
public DirectoryCollection(String[] dirs) {
- this(dirs, 100.0F, 0);
+ this(dirs, 100.0F, 100.0F, 0);
}
/**
@@ -119,7 +122,7 @@ public DirectoryCollection(String[] dirs) {
*
*/
public DirectoryCollection(String[] dirs, float utilizationPercentageCutOff) {
- this(dirs, utilizationPercentageCutOff, 0);
+ this(dirs, utilizationPercentageCutOff, utilizationPercentageCutOff, 0);
}
/**
@@ -134,7 +137,7 @@ public DirectoryCollection(String[] dirs, float utilizationPercentageCutOff) {
*
*/
public DirectoryCollection(String[] dirs, long utilizationSpaceCutOff) {
- this(dirs, 100.0F, utilizationSpaceCutOff);
+ this(dirs, 100.0F, 100.0F, utilizationSpaceCutOff);
}
/**
@@ -145,25 +148,33 @@ public DirectoryCollection(String[] dirs, long utilizationSpaceCutOff) {
*
* @param dirs
* directories to be monitored
- * @param utilizationPercentageCutOff
+ * @param utilizationPercentageCutOffHigh
* percentage of disk that can be used before the dir is taken out of
* the good dirs list
+ * @param utilizationPercentageCutOffLow
+ * percentage of disk that can be used when the dir is moved from
+ * the bad dirs list to the good dirs list
* @param utilizationSpaceCutOff
* minimum space, in MB, that must be available on the disk for the
* dir to be marked as good
*
*/
- public DirectoryCollection(String[] dirs,
- float utilizationPercentageCutOff,
+ public DirectoryCollection(String[] dirs,
+ float utilizationPercentageCutOffHigh,
+ float utilizationPercentageCutOffLow,
long utilizationSpaceCutOff) {
localDirs = new CopyOnWriteArrayList(dirs);
errorDirs = new CopyOnWriteArrayList();
fullDirs = new CopyOnWriteArrayList();
- diskUtilizationPercentageCutoff =
- utilizationPercentageCutOff < 0.0F ? 0.0F
- : (utilizationPercentageCutOff > 100.0F ? 100.0F
- : utilizationPercentageCutOff);
+ diskUtilizationPercentageCutoffHigh =
+ utilizationPercentageCutOffHigh < 0.0F ? 0.0F
+ : (utilizationPercentageCutOffHigh > 100.0F ? 100.0F
+ : utilizationPercentageCutOffHigh);
+ diskUtilizationPercentageCutoffLow =
+ utilizationPercentageCutOffLow < 0.0F ? 0.0F
+ : (utilizationPercentageCutOffLow > 100.0F ? 100.0F
+ : utilizationPercentageCutOffLow);
diskUtilizationSpaceCutoff =
utilizationSpaceCutOff < 0 ? 0 : utilizationSpaceCutOff;
@@ -254,7 +265,8 @@ synchronized boolean checkDirs() {
List allLocalDirs =
DirectoryCollection.concat(localDirs, failedDirs);
- Map dirsFailedCheck = testDirs(allLocalDirs);
+ Map dirsFailedCheck = testDirs(allLocalDirs,
+ preCheckGoodDirs);
localDirs.clear();
errorDirs.clear();
@@ -314,7 +326,8 @@ synchronized boolean checkDirs() {
return setChanged;
}
- Map testDirs(List dirs) {
+ Map testDirs(List dirs,
+ Set goodDirs) {
HashMap ret =
new HashMap();
for (final String dir : dirs) {
@@ -322,7 +335,10 @@ synchronized boolean checkDirs() {
try {
File testDir = new File(dir);
DiskChecker.checkDir(testDir);
- if (isDiskUsageOverPercentageLimit(testDir)) {
+ float diskUtilizationPercentageCutoff = goodDirs.contains(dir) ?
+ diskUtilizationPercentageCutoffHigh : diskUtilizationPercentageCutoffLow;
+ if (isDiskUsageOverPercentageLimit(testDir,
+ diskUtilizationPercentageCutoff)) {
msg =
"used space above threshold of "
+ diskUtilizationPercentageCutoff
@@ -374,7 +390,8 @@ private void verifyDirUsingMkdir(File dir) throws IOException {
}
}
- private boolean isDiskUsageOverPercentageLimit(File dir) {
+ private boolean isDiskUsageOverPercentageLimit(File dir,
+ float diskUtilizationPercentageCutoff) {
float freePercentage =
100 * (dir.getUsableSpace() / (float) dir.getTotalSpace());
float usedPercentage = 100.0F - freePercentage;
@@ -402,17 +419,28 @@ private void createDir(FileContext localFs, Path dir, FsPermission perm)
}
}
}
-
- public float getDiskUtilizationPercentageCutoff() {
- return diskUtilizationPercentageCutoff;
+
+ @VisibleForTesting
+ float getDiskUtilizationPercentageCutoffHigh() {
+ return diskUtilizationPercentageCutoffHigh;
+ }
+
+ @VisibleForTesting
+ float getDiskUtilizationPercentageCutoffLow() {
+ return diskUtilizationPercentageCutoffLow;
}
public void setDiskUtilizationPercentageCutoff(
- float diskUtilizationPercentageCutoff) {
- this.diskUtilizationPercentageCutoff =
- diskUtilizationPercentageCutoff < 0.0F ? 0.0F
- : (diskUtilizationPercentageCutoff > 100.0F ? 100.0F
- : diskUtilizationPercentageCutoff);
+ float utilizationPercentageCutOffHigh,
+ float utilizationPercentageCutOffLow) {
+ diskUtilizationPercentageCutoffHigh =
+ utilizationPercentageCutOffHigh < 0.0F ? 0.0F
+ : (utilizationPercentageCutOffHigh > 100.0F ? 100.0F
+ : utilizationPercentageCutOffHigh);
+ diskUtilizationPercentageCutoffLow =
+ utilizationPercentageCutOffLow < 0.0F ? 0.0F
+ : (utilizationPercentageCutOffLow > 100.0F ? 100.0F
+ : utilizationPercentageCutOffLow);
}
public long getDiskUtilizationSpaceCutoff() {
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
index 6709c90..c3f67ef 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LocalDirsHandlerService.java
@@ -97,22 +97,31 @@
private final class MonitoringTimerTask extends TimerTask {
public MonitoringTimerTask(Configuration conf) throws YarnRuntimeException {
- float maxUsableSpacePercentagePerDisk =
+ float highUsableSpacePercentagePerDisk =
conf.getFloat(
- YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
- YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE);
+ YarnConfiguration.NM_WM_HIGH_PER_DISK_UTILIZATION_PERCENTAGE,
+ YarnConfiguration.
+ DEFAULT_NM_WM_HIGH_PER_DISK_UTILIZATION_PERCENTAGE);
+ float lowUsableSpacePercentagePerDisk =
+ conf.getFloat(
+ YarnConfiguration.NM_WM_LOW_PER_DISK_UTILIZATION_PERCENTAGE,
+ YarnConfiguration.
+ DEFAULT_NM_WM_LOW_PER_DISK_UTILIZATION_PERCENTAGE);
long minFreeSpacePerDiskMB =
conf.getLong(YarnConfiguration.NM_MIN_PER_DISK_FREE_SPACE_MB,
YarnConfiguration.DEFAULT_NM_MIN_PER_DISK_FREE_SPACE_MB);
localDirs =
new DirectoryCollection(
validatePaths(conf
- .getTrimmedStrings(YarnConfiguration.NM_LOCAL_DIRS)),
- maxUsableSpacePercentagePerDisk, minFreeSpacePerDiskMB);
+ .getTrimmedStrings(YarnConfiguration.NM_LOCAL_DIRS)),
+ highUsableSpacePercentagePerDisk, lowUsableSpacePercentagePerDisk,
+ minFreeSpacePerDiskMB);
logDirs =
new DirectoryCollection(
- validatePaths(conf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)),
- maxUsableSpacePercentagePerDisk, minFreeSpacePerDiskMB);
+ validatePaths(conf
+ .getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)),
+ highUsableSpacePercentagePerDisk, lowUsableSpacePercentagePerDisk,
+ minFreeSpacePerDiskMB);
localDirsAllocator = new LocalDirAllocator(
YarnConfiguration.NM_LOCAL_DIRS);
logDirsAllocator = new LocalDirAllocator(YarnConfiguration.NM_LOG_DIRS);
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java
index 2fd89c6..2190f11 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDirectoryCollection.java
@@ -68,8 +68,9 @@ public void testConcurrentAccess() throws IOException {
String[] dirs = {testFile.getPath()};
DirectoryCollection dc =
new DirectoryCollection(dirs, conf.getFloat(
- YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
- YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
+ YarnConfiguration.NM_WM_HIGH_PER_DISK_UTILIZATION_PERCENTAGE,
+ YarnConfiguration.
+ DEFAULT_NM_WM_HIGH_PER_DISK_UTILIZATION_PERCENTAGE));
// Create an iterator before checkDirs is called to reliable test case
List list = dc.getGoodDirs();
@@ -102,8 +103,9 @@ public void testCreateDirectories() throws IOException {
String[] dirs = { dirA, dirB, dirC };
DirectoryCollection dc =
new DirectoryCollection(dirs, conf.getFloat(
- YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
- YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
+ YarnConfiguration.NM_WM_HIGH_PER_DISK_UTILIZATION_PERCENTAGE,
+ YarnConfiguration.
+ DEFAULT_NM_WM_HIGH_PER_DISK_UTILIZATION_PERCENTAGE));
FsPermission defaultPerm = FsPermission.getDefault()
.applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK));
boolean createResult = dc.createNonExistentDirs(localFs, defaultPerm);
@@ -152,7 +154,7 @@ public void testDiskSpaceUtilizationLimit() throws IOException {
// no good dirs
Assert.assertEquals(0, dc.getGoodDirsDiskUtilizationPercentage());
- dc = new DirectoryCollection(dirs, 100.0F, 0);
+ dc = new DirectoryCollection(dirs, 100.0F, 100.0F, 0);
utilizedSpacePerc =
(int)((testDir.getTotalSpace() - testDir.getUsableSpace()) * 100 /
testDir.getTotalSpace());
@@ -168,18 +170,28 @@ public void testDiskSpaceUtilizationLimit() throws IOException {
public void testDiskLimitsCutoffSetters() throws IOException {
String[] dirs = { "dir" };
- DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F, 100);
+ DirectoryCollection dc = new DirectoryCollection(dirs, 0.0F, 0.0F, 100);
float testValue = 57.5F;
float delta = 0.1F;
- dc.setDiskUtilizationPercentageCutoff(testValue);
- Assert.assertEquals(testValue, dc.getDiskUtilizationPercentageCutoff(),
- delta);
+ dc.setDiskUtilizationPercentageCutoff(testValue, 50.0F);
+ Assert.assertEquals(testValue, dc.getDiskUtilizationPercentageCutoffHigh(),
+ delta);
+ Assert.assertEquals(50.0F, dc.getDiskUtilizationPercentageCutoffLow(),
+ delta);
+
testValue = -57.5F;
- dc.setDiskUtilizationPercentageCutoff(testValue);
- Assert.assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
+ dc.setDiskUtilizationPercentageCutoff(testValue, testValue);
+ Assert.assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
+ delta);
+ Assert.assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoffLow(),
+ delta);
+
testValue = 157.5F;
- dc.setDiskUtilizationPercentageCutoff(testValue);
- Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
+ dc.setDiskUtilizationPercentageCutoff(testValue, testValue);
+ Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
+ delta);
+ Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
+ delta);
long spaceValue = 57;
dc.setDiskUtilizationSpaceCutoff(spaceValue);
@@ -200,7 +212,7 @@ public void testFailedDisksBecomingGoodAgain() throws Exception {
Assert.assertEquals(1, dc.getFailedDirs().size());
Assert.assertEquals(1, dc.getFullDirs().size());
- dc.setDiskUtilizationPercentageCutoff(100.0F);
+ dc.setDiskUtilizationPercentageCutoff(100.0F, 100.0F);
dc.checkDirs();
Assert.assertEquals(1, dc.getGoodDirs().size());
Assert.assertEquals(0, dc.getFailedDirs().size());
@@ -236,27 +248,45 @@ public void testConstructors() {
String[] dirs = { "dir" };
float delta = 0.1F;
DirectoryCollection dc = new DirectoryCollection(dirs);
- Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
+ Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
+ delta);
+ Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
+ delta);
Assert.assertEquals(0, dc.getDiskUtilizationSpaceCutoff());
dc = new DirectoryCollection(dirs, 57.5F);
- Assert.assertEquals(57.5F, dc.getDiskUtilizationPercentageCutoff(), delta);
+ Assert.assertEquals(57.5F, dc.getDiskUtilizationPercentageCutoffHigh(),
+ delta);
+ Assert.assertEquals(57.5F, dc.getDiskUtilizationPercentageCutoffLow(),
+ delta);
Assert.assertEquals(0, dc.getDiskUtilizationSpaceCutoff());
dc = new DirectoryCollection(dirs, 57);
- Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
+ Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
+ delta);
+ Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
+ delta);
Assert.assertEquals(57, dc.getDiskUtilizationSpaceCutoff());
- dc = new DirectoryCollection(dirs, 57.5F, 67);
- Assert.assertEquals(57.5F, dc.getDiskUtilizationPercentageCutoff(), delta);
+ dc = new DirectoryCollection(dirs, 57.5F, 50.5F, 67);
+ Assert.assertEquals(57.5F, dc.getDiskUtilizationPercentageCutoffHigh(),
+ delta);
+ Assert.assertEquals(50.5F, dc.getDiskUtilizationPercentageCutoffLow(),
+ delta);
Assert.assertEquals(67, dc.getDiskUtilizationSpaceCutoff());
- dc = new DirectoryCollection(dirs, -57.5F, -67);
- Assert.assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
+ dc = new DirectoryCollection(dirs, -57.5F, -57.5F, -67);
+ Assert.assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
+ delta);
+ Assert.assertEquals(0.0F, dc.getDiskUtilizationPercentageCutoffLow(),
+ delta);
Assert.assertEquals(0, dc.getDiskUtilizationSpaceCutoff());
- dc = new DirectoryCollection(dirs, 157.5F, -67);
- Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoff(), delta);
+ dc = new DirectoryCollection(dirs, 157.5F, 157.5F, -67);
+ Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffHigh(),
+ delta);
+ Assert.assertEquals(100.0F, dc.getDiskUtilizationPercentageCutoffLow(),
+ delta);
Assert.assertEquals(0, dc.getDiskUtilizationSpaceCutoff());
}
@@ -288,7 +318,7 @@ public void testDirsChangeListener() {
Assert.assertEquals(listener3.num, 1);
dc.deregisterDirsChangeListener(listener2);
- dc.setDiskUtilizationPercentageCutoff(100.0F);
+ dc.setDiskUtilizationPercentageCutoff(100.0F, 100.0F);
dc.checkDirs();
Assert.assertEquals(1, dc.getGoodDirs().size());
Assert.assertEquals(listener1.num, 3);
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
index c61d1f0..f7e5adc 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
@@ -105,8 +105,10 @@ public void testGetFullDirs() throws Exception {
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1 + "," + logDir2);
- conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
- 0.0f);
+ conf.setFloat(YarnConfiguration.NM_WM_HIGH_PER_DISK_UTILIZATION_PERCENTAGE,
+ 0.0f);
+ conf.setFloat(YarnConfiguration.NM_WM_LOW_PER_DISK_UTILIZATION_PERCENTAGE,
+ 0.0f);
NodeManagerMetrics nm = NodeManagerMetrics.create();
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService(nm);
dirSvc.init(conf);
@@ -120,8 +122,10 @@ public void testGetFullDirs() throws Exception {
Assert.assertEquals(0, nm.getGoodLocalDirsDiskUtilizationPerc());
Assert.assertEquals(0, nm.getGoodLogDirsDiskUtilizationPerc());
- conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
- 100.0f);
+ conf.setFloat(YarnConfiguration.NM_WM_HIGH_PER_DISK_UTILIZATION_PERCENTAGE,
+ 100.0f);
+ conf.setFloat(YarnConfiguration.NM_WM_LOW_PER_DISK_UTILIZATION_PERCENTAGE,
+ 100.0f);
nm = NodeManagerMetrics.create();
dirSvc = new LocalDirsHandlerService(nm);
dirSvc.init(conf);
diff --git hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java
index 84e42fc..9a9cf21 100644
--- hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java
+++ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java
@@ -153,7 +153,9 @@ public void testContainerLogFile() throws IOException, YarnException {
String logdirwithFile = absLogDir.toURI().toString();
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS, logdirwithFile);
- conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
+ conf.setFloat(YarnConfiguration.NM_WM_HIGH_PER_DISK_UTILIZATION_PERCENTAGE,
+ 0.0f);
+ conf.setFloat(YarnConfiguration.NM_WM_LOW_PER_DISK_UTILIZATION_PERCENTAGE,
0.0f);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
dirsHandler.init(conf);