diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 082c5ce..ed19492 100644 --- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -881,18 +881,6 @@ public final class HConstants { */ public static final String REGIONSERVER_METRICS_PERIOD = "hbase.regionserver.metrics.period"; public static final long DEFAULT_REGIONSERVER_METRICS_PERIOD = 5000; - /** Directories that are not HBase table directories */ - public static final List HBASE_NON_TABLE_DIRS = - Collections.unmodifiableList(Arrays.asList(new String[] { HREGION_LOGDIR_NAME, - HREGION_OLDLOGDIR_NAME, CORRUPT_DIR_NAME, SPLIT_LOGDIR_NAME, - HBCK_SIDELINEDIR_NAME, HFILE_ARCHIVE_DIRECTORY, SNAPSHOT_DIR_NAME, HBASE_TEMP_DIRECTORY, - OLD_SNAPSHOT_DIR_NAME, BASE_NAMESPACE_DIR, MIGRATION_NAME, LIB_DIR})); - - /** Directories that are not HBase user table directories */ - public static final List HBASE_NON_USER_TABLE_DIRS = - Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll( - new String[] { TableName.META_TABLE_NAME.getNameAsString() }, - HBASE_NON_TABLE_DIRS.toArray()))); /** Health script related settings. */ public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location"; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/NamespaceJanitor.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/NamespaceJanitor.java index 3a4ee99..39d78e0 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/NamespaceJanitor.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/NamespaceJanitor.java @@ -108,26 +108,17 @@ public class NamespaceJanitor extends Chore { new Path(FSUtils.getRootDir(services.getConfiguration()), HConstants.BASE_NAMESPACE_DIR))) { if (!descs.contains(nsStatus.getPath().getName()) && !NamespaceDescriptor.RESERVED_NAMESPACES.contains(nsStatus.getPath().getName())) { - boolean isEmpty = true; - for(FileStatus status : fs.listStatus(nsStatus.getPath())) { - if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) { - isEmpty = false; - break; - } - } - if(isEmpty) { - try { - if (!fs.delete(nsStatus.getPath(), true)) { - LOG.error("Failed to remove namespace directory: " + nsStatus.getPath()); - } - } catch (IOException ex) { - LOG.error("Failed to remove namespace directory: " + nsStatus.getPath(), - ex); + try { + if (!fs.delete(nsStatus.getPath(), true)) { + LOG.error("Failed to remove namespace directory: " + nsStatus.getPath()); } - LOG.debug("Removed namespace directory: "+nsStatus.getPath()); - } else { - LOG.debug("Skipping non-empty namespace directory: " + nsStatus.getPath()); + } catch (IOException ex) { + LOG.error("Failed to remove namespace directory: " + nsStatus.getPath(), + ex); } + LOG.debug("Removed namespace directory: "+nsStatus.getPath()); + } else { + LOG.debug("Skipping non-empty namespace directory: " + nsStatus.getPath()); } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index e8e5a28..8e7dd9e 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -192,13 +192,6 @@ public class TableNamespaceManager { //it will be replaced on new namespace creation zkNamespaceManager.remove(name); FileSystem fs = masterServices.getMasterFileSystem().getFileSystem(); - for(FileStatus status : - fs.listStatus(FSUtils.getNamespaceDir( - masterServices.getMasterFileSystem().getRootDir(), name))) { - if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) { - throw new IOException("Namespace directory contains table dir: "+status.getPath()); - } - } if (!fs.delete(FSUtils.getNamespaceDir( masterServices.getMasterFileSystem().getRootDir(), name), true)) { throw new IOException("Failed to remove namespace: "+name); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java index b054813..bb1fcda 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java @@ -423,8 +423,8 @@ public class NamespaceUpgrade implements Tool { HRegionFileSystem.openRegionFromFileSystem(conf, fs, newTablePath, newRegionInfo, false); //migrate region contents - for(FileStatus file : fs.listStatus(regionDir, new FSUtils.UserTableDirFilter(fs))) { - if(file.getPath().getName().equals(HRegionFileSystem.REGION_INFO_FILE)) + for(FileStatus file : fs.listStatus(regionDir)) { + if(file.getPath().getName().startsWith(".")) continue; if(!fs.rename(file.getPath(), newRegionDir)) { throw new IllegalStateException("Failed to move file "+file.getPath()+" to " + diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java index 497a12e..6d938e0 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java @@ -90,8 +90,7 @@ class FSRegionScanner implements Runnable { // skip because this is not a CF directory continue; } - if (cfStatus.getPath().getName().startsWith(".") || - HConstants.HBASE_NON_USER_TABLE_DIRS.contains(cfStatus.getPath().getName())) { + if (cfStatus.getPath().getName().startsWith(".")) { continue; } FileStatus[] storeFileLists = fs.listStatus(cfStatus.getPath()); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index f3f2968..eab2450 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -148,11 +148,6 @@ public class FSTableDescriptors implements TableDescriptors { cachehits++; return HTableDescriptor.META_TABLEDESC; } - // hbase:meta is already handled. If some one tries to get the descriptor for - // .logs, .oldlogs or .corrupt throw an exception. - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) { - throw new IOException("No descriptor found for non table = " + tablename); - } // Look in cache of descriptors. TableDescriptorAndModtime cachedtdm = this.cache.get(tablename); @@ -239,10 +234,6 @@ public class FSTableDescriptors implements TableDescriptors { if (TableName.META_TABLE_NAME.equals(htd.getTableName())) { throw new NotImplementedException(); } - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) { - throw new NotImplementedException( - "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString()); - } updateTableDescriptor(htd); long modtime = getTableInfoModtime(htd.getTableName()); this.cache.put(htd.getTableName(), new TableDescriptorAndModtime(modtime, htd)); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 6c53f2a..b441e89 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -1240,17 +1240,6 @@ public abstract class FSUtils { } /** - * A {@link PathFilter} that returns usertable directories. To get all directories use the - * {@link BlackListDirFilter} with a null blacklist - */ - public static class UserTableDirFilter extends BlackListDirFilter { - - public UserTableDirFilter(FileSystem fs) { - super(fs, HConstants.HBASE_NON_TABLE_DIRS); - } - } - - /** * Heuristic to determine whether is safe or not to open a file for append * Looks both for dfs.support.append and use reflection to search * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush() @@ -1327,7 +1316,7 @@ public abstract class FSUtils { public static List getLocalTableDirs(final FileSystem fs, final Path rootdir) throws IOException { // presumes any directory under hbase.rootdir is a table - FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs)); + FileStatus[] dirs = fs.listStatus(rootdir); List tabledirs = new ArrayList(dirs.length); for (FileStatus dir: dirs) { tabledirs.add(dir.getPath()); @@ -1508,17 +1497,19 @@ public abstract class FSUtils { Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName); // Inside a table, there are compaction.dir directories to skip. Otherwise, all else // should be regions. - PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS); FileStatus[] regionDirs = fs.listStatus(tableDir); for (FileStatus regionDir : regionDirs) { Path dd = regionDir.getPath(); - if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) { + if (dd.getName().startsWith(".")) { continue; } // else its a region name, now look in region for families - FileStatus[] familyDirs = fs.listStatus(dd, df); + FileStatus[] familyDirs = fs.listStatus(dd); for (FileStatus familyDir : familyDirs) { Path family = familyDir.getPath(); + if (family.getName().startsWith(".")) { + continue; + } // now in family, iterate over the StoreFiles and // put in map FileStatus[] familyStatus = fs.listStatus(family); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index aa16177..1534ae7 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -262,13 +262,17 @@ public class TestFSTableDescriptors { @Test public void testReadingArchiveDirectoryFromFS() throws IOException { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); + Path testDir = UTIL.getDataTestDir("testReadingArchiveDirectoryFromFS"); try { - new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration())) - .get(TableName.valueOf(HConstants.HFILE_ARCHIVE_DIRECTORY)); - fail("Shouldn't be able to read a table descriptor for the archive directory."); + FSTableDescriptors fsTableDesc = new FSTableDescriptors(fs, testDir); + HTableDescriptor htd = + new HTableDescriptor(TableName.valueOf(HConstants.HFILE_ARCHIVE_DIRECTORY)); + assertTrue(fsTableDesc.createTableDescriptor(htd)); + HTableDescriptor htdResult = + fsTableDesc.get(TableName.valueOf(HConstants.HFILE_ARCHIVE_DIRECTORY)); + assertEquals(htd.compareTo(htdResult), 0); // descriptors should be equals. } catch (Exception e) { - LOG.debug("Correctly got error when reading a table descriptor from the archive directory: " - + e.getMessage()); + LOG.debug("Fail to read archive direcotory.", e); } }