Index: src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java (revision 1198848) +++ src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java (working copy) @@ -254,7 +254,7 @@ private static FileStatus getTableInfoPath(final FileSystem fs, final Path tabledir) throws IOException { - FileStatus [] status = fs.listStatus(tabledir, new PathFilter() { + FileStatus [] status = FSUtils.listStatus(fs, tabledir, new PathFilter() { @Override public boolean accept(Path p) { // Accept any file that starts with TABLEINFO_NAME Index: src/main/java/org/apache/hadoop/hbase/util/FSUtils.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (revision 1198848) +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (working copy) @@ -21,6 +21,7 @@ import java.io.DataInputStream; import java.io.EOFException; +import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -905,5 +906,26 @@ } return map; } - + + /** + * Calls fs.listStatus() and treats FileNotFoundException as non-fatal + * This would accommodate difference in various hadoop versions + * + * @param fs file system + * @param dir directory + * @param filter path filter + * @return null if tabledir doesn't exist, otherwise FileStatus array + */ + public static FileStatus [] listStatus(final FileSystem fs, + final Path dir, final PathFilter filter) throws IOException { + FileStatus [] status = null; + try { + status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter); + } catch (FileNotFoundException fnfe) { + // if directory doesn't exist, return null + LOG.info(dir + " doesn't exist"); + } + if (status == null || status.length < 1) return null; + return status; + } } Index: src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (revision 1198848) +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (working copy) @@ -1534,7 +1534,7 @@ NavigableSet filesSorted = new TreeSet(); Path editsdir = getRegionDirRecoveredEditsDir(regiondir); if (!fs.exists(editsdir)) return filesSorted; - FileStatus[] files = fs.listStatus(editsdir, new PathFilter() { + FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() { @Override public boolean accept(Path p) { boolean result = false; Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1198848) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -3457,7 +3457,7 @@ */ private static void listPaths(FileSystem fs, Path dir) throws IOException { if (LOG.isDebugEnabled()) { - FileStatus[] stats = fs.listStatus(dir); + FileStatus[] stats = FSUtils.listStatus(fs, dir, null); if (stats == null || stats.length == 0) { return; } Index: src/main/java/org/apache/hadoop/hbase/regionserver/Store.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (revision 1198848) +++ src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (working copy) @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CollectionBackedScanner; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.util.StringUtils; import com.google.common.base.Preconditions; @@ -253,7 +254,7 @@ private List loadStoreFiles() throws IOException { ArrayList results = new ArrayList(); - FileStatus files[] = this.fs.listStatus(this.homedir); + FileStatus files[] = FSUtils.listStatus(this.fs, this.homedir, null); for (int i = 0; files != null && i < files.length; i++) { // Skip directories. if (files[i].isDir()) { Index: src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (revision 1198848) +++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (working copy) @@ -215,7 +215,7 @@ } // cache the modification time stamp of this store file - FileStatus[] stats = fs.listStatus(p); + FileStatus[] stats = FSUtils.listStatus(fs, p, null); if (stats != null && stats.length == 1) { this.modificationTimeStamp = stats[0].getModificationTime(); } else { Index: src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (revision 1198848) +++ src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (working copy) @@ -191,7 +191,7 @@ List serverNames = new ArrayList(); try { if (!this.fs.exists(logsDirPath)) return; - FileStatus[] logFolders = this.fs.listStatus(logsDirPath); + FileStatus[] logFolders = FSUtils.listStatus(this.fs, logsDirPath, null); if (logFolders == null || logFolders.length == 0) { LOG.debug("No log files to split, proceeding..."); Index: src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (revision 1198848) +++ src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (working copy) @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Writables; @@ -324,7 +325,7 @@ family.getName()); if (!fs.exists(p)) continue; // Look for reference files. Call listStatus with anonymous instance of PathFilter. - FileStatus [] ps = fs.listStatus(p, + FileStatus [] ps = FSUtils.listStatus(fs, p, new PathFilter () { public boolean accept(Path path) { return StoreFile.isReference(path); Index: src/main/java/org/apache/hadoop/hbase/master/LogCleaner.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/LogCleaner.java (revision 1198848) +++ src/main/java/org/apache/hadoop/hbase/master/LogCleaner.java (working copy) @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.regionserver.wal.HLog; +import org.apache.hadoop.hbase.util.FSUtils; import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; @@ -118,7 +119,7 @@ @Override protected void chore() { try { - FileStatus [] files = this.fs.listStatus(this.oldLogDir); + FileStatus [] files = FSUtils.listStatus(this.fs, this.oldLogDir, null); if (files == null) return; FILE: for (FileStatus file : files) { Path filePath = file.getPath(); Index: src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java (revision 1198848) +++ src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java (working copy) @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter; import org.apache.hadoop.hbase.regionserver.wal.OrphanHLogAfterSplitException; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -183,7 +184,8 @@ LOG.warn(hLogDir + " doesn't exist. Nothing to do!"); continue; } - FileStatus[] logfiles = fs.listStatus(hLogDir); // TODO filter filenames? + // TODO filter filenames? + FileStatus[] logfiles = FSUtils.listStatus(fs, hLogDir, null); if (logfiles == null || logfiles.length == 0) { LOG.info(hLogDir + " is empty dir, no logs to split"); } else { @@ -989,7 +991,7 @@ } LOG.debug("re-listing " + logdir); tot_mgr_relist_logdir.incrementAndGet(); - FileStatus[] newfiles = fs.listStatus(logdir); + FileStatus[] newfiles = FSUtils.listStatus(fs, logdir, null); if (newfiles == null) { return false; }